gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from thrift.Thrift import TException, TType, TFrozenDict
from thrift.transport.TTransport import TTransportException
from ..compat import binary_to_str, str_to_binary
import six
import sys
from itertools import islice
from six.moves import zip
class TProtocolException(TException):
"""Custom Protocol Exception class"""
UNKNOWN = 0
INVALID_DATA = 1
NEGATIVE_SIZE = 2
SIZE_LIMIT = 3
BAD_VERSION = 4
NOT_IMPLEMENTED = 5
DEPTH_LIMIT = 6
INVALID_PROTOCOL = 7
def __init__(self, type=UNKNOWN, message=None):
TException.__init__(self, message)
self.type = type
class TProtocolBase(object):
"""Base class for Thrift protocol driver."""
def __init__(self, trans):
self.trans = trans
self._fast_decode = None
self._fast_encode = None
@staticmethod
def _check_length(limit, length):
if length < 0:
raise TTransportException(TTransportException.NEGATIVE_SIZE,
'Negative length: %d' % length)
if limit is not None and length > limit:
raise TTransportException(TTransportException.SIZE_LIMIT,
'Length exceeded max allowed: %d' % limit)
def writeMessageBegin(self, name, ttype, seqid):
pass
def writeMessageEnd(self):
pass
def writeStructBegin(self, name):
pass
def writeStructEnd(self):
pass
def writeFieldBegin(self, name, ttype, fid):
pass
def writeFieldEnd(self):
pass
def writeFieldStop(self):
pass
def writeMapBegin(self, ktype, vtype, size):
pass
def writeMapEnd(self):
pass
def writeListBegin(self, etype, size):
pass
def writeListEnd(self):
pass
def writeSetBegin(self, etype, size):
pass
def writeSetEnd(self):
pass
def writeBool(self, bool_val):
pass
def writeByte(self, byte):
pass
def writeI16(self, i16):
pass
def writeI32(self, i32):
pass
def writeI64(self, i64):
pass
def writeDouble(self, dub):
pass
def writeString(self, str_val):
self.writeBinary(str_to_binary(str_val))
def writeBinary(self, str_val):
pass
def writeUtf8(self, str_val):
self.writeString(str_val.encode('utf8'))
def readMessageBegin(self):
pass
def readMessageEnd(self):
pass
def readStructBegin(self):
pass
def readStructEnd(self):
pass
def readFieldBegin(self):
pass
def readFieldEnd(self):
pass
def readMapBegin(self):
pass
def readMapEnd(self):
pass
def readListBegin(self):
pass
def readListEnd(self):
pass
def readSetBegin(self):
pass
def readSetEnd(self):
pass
def readBool(self):
pass
def readByte(self):
pass
def readI16(self):
pass
def readI32(self):
pass
def readI64(self):
pass
def readDouble(self):
pass
def readString(self):
return binary_to_str(self.readBinary())
def readBinary(self):
pass
def readUtf8(self):
return self.readString().decode('utf8')
def skip(self, ttype):
if ttype == TType.BOOL:
self.readBool()
elif ttype == TType.BYTE:
self.readByte()
elif ttype == TType.I16:
self.readI16()
elif ttype == TType.I32:
self.readI32()
elif ttype == TType.I64:
self.readI64()
elif ttype == TType.DOUBLE:
self.readDouble()
elif ttype == TType.STRING:
self.readString()
elif ttype == TType.STRUCT:
name = self.readStructBegin()
while True:
(name, ttype, id) = self.readFieldBegin()
if ttype == TType.STOP:
break
self.skip(ttype)
self.readFieldEnd()
self.readStructEnd()
elif ttype == TType.MAP:
(ktype, vtype, size) = self.readMapBegin()
for i in range(size):
self.skip(ktype)
self.skip(vtype)
self.readMapEnd()
elif ttype == TType.SET:
(etype, size) = self.readSetBegin()
for i in range(size):
self.skip(etype)
self.readSetEnd()
elif ttype == TType.LIST:
(etype, size) = self.readListBegin()
for i in range(size):
self.skip(etype)
self.readListEnd()
else:
raise TProtocolException(
TProtocolException.INVALID_DATA,
"invalid TType")
# tuple of: ( 'reader method' name, is_container bool, 'writer_method' name )
_TTYPE_HANDLERS = (
(None, None, False), # 0 TType.STOP
(None, None, False), # 1 TType.VOID # TODO: handle void?
('readBool', 'writeBool', False), # 2 TType.BOOL
('readByte', 'writeByte', False), # 3 TType.BYTE and I08
('readDouble', 'writeDouble', False), # 4 TType.DOUBLE
(None, None, False), # 5 undefined
('readI16', 'writeI16', False), # 6 TType.I16
(None, None, False), # 7 undefined
('readI32', 'writeI32', False), # 8 TType.I32
(None, None, False), # 9 undefined
('readI64', 'writeI64', False), # 10 TType.I64
('readString', 'writeString', False), # 11 TType.STRING and UTF7
('readContainerStruct', 'writeContainerStruct', True), # 12 *.STRUCT
('readContainerMap', 'writeContainerMap', True), # 13 TType.MAP
('readContainerSet', 'writeContainerSet', True), # 14 TType.SET
('readContainerList', 'writeContainerList', True), # 15 TType.LIST
(None, None, False), # 16 TType.UTF8 # TODO: handle utf8 types?
(None, None, False) # 17 TType.UTF16 # TODO: handle utf16 types?
)
def _ttype_handlers(self, ttype, spec):
if spec == 'BINARY':
if ttype != TType.STRING:
raise TProtocolException(type=TProtocolException.INVALID_DATA,
message='Invalid binary field type %d' % ttype)
return ('readBinary', 'writeBinary', False)
if sys.version_info[0] == 2 and spec == 'UTF8':
if ttype != TType.STRING:
raise TProtocolException(type=TProtocolException.INVALID_DATA,
message='Invalid string field type %d' % ttype)
return ('readUtf8', 'writeUtf8', False)
return self._TTYPE_HANDLERS[ttype] if ttype < len(self._TTYPE_HANDLERS) else (None, None, False)
def _read_by_ttype(self, ttype, spec, espec):
reader_name, _, is_container = self._ttype_handlers(ttype, espec)
if reader_name is None:
raise TProtocolException(type=TProtocolException.INVALID_DATA,
message='Invalid type %d' % (ttype))
reader_func = getattr(self, reader_name)
read = (lambda: reader_func(espec)) if is_container else reader_func
while True:
yield read()
def readFieldByTType(self, ttype, spec):
return next(self._read_by_ttype(ttype, spec, spec))
def readContainerList(self, spec):
ttype, tspec, is_immutable = spec
(list_type, list_len) = self.readListBegin()
# TODO: compare types we just decoded with thrift_spec
elems = islice(self._read_by_ttype(ttype, spec, tspec), list_len)
results = (tuple if is_immutable else list)(elems)
self.readListEnd()
return results
def readContainerSet(self, spec):
ttype, tspec, is_immutable = spec
(set_type, set_len) = self.readSetBegin()
# TODO: compare types we just decoded with thrift_spec
elems = islice(self._read_by_ttype(ttype, spec, tspec), set_len)
results = (frozenset if is_immutable else set)(elems)
self.readSetEnd()
return results
def readContainerStruct(self, spec):
(obj_class, obj_spec) = spec
obj = obj_class()
obj.read(self)
return obj
def readContainerMap(self, spec):
ktype, kspec, vtype, vspec, is_immutable = spec
(map_ktype, map_vtype, map_len) = self.readMapBegin()
# TODO: compare types we just decoded with thrift_spec and
# abort/skip if types disagree
keys = self._read_by_ttype(ktype, spec, kspec)
vals = self._read_by_ttype(vtype, spec, vspec)
keyvals = islice(zip(keys, vals), map_len)
results = (TFrozenDict if is_immutable else dict)(keyvals)
self.readMapEnd()
return results
def readStruct(self, obj, thrift_spec, is_immutable=False):
if is_immutable:
fields = {}
self.readStructBegin()
while True:
(fname, ftype, fid) = self.readFieldBegin()
if ftype == TType.STOP:
break
try:
field = thrift_spec[fid]
except IndexError:
self.skip(ftype)
else:
if field is not None and ftype == field[1]:
fname = field[2]
fspec = field[3]
val = self.readFieldByTType(ftype, fspec)
if is_immutable:
fields[fname] = val
else:
setattr(obj, fname, val)
else:
self.skip(ftype)
self.readFieldEnd()
self.readStructEnd()
if is_immutable:
return obj(**fields)
def writeContainerStruct(self, val, spec):
val.write(self)
def writeContainerList(self, val, spec):
ttype, tspec, _ = spec
self.writeListBegin(ttype, len(val))
for _ in self._write_by_ttype(ttype, val, spec, tspec):
pass
self.writeListEnd()
def writeContainerSet(self, val, spec):
ttype, tspec, _ = spec
self.writeSetBegin(ttype, len(val))
for _ in self._write_by_ttype(ttype, val, spec, tspec):
pass
self.writeSetEnd()
def writeContainerMap(self, val, spec):
ktype, kspec, vtype, vspec, _ = spec
self.writeMapBegin(ktype, vtype, len(val))
for _ in zip(self._write_by_ttype(ktype, six.iterkeys(val), spec, kspec),
self._write_by_ttype(vtype, six.itervalues(val), spec, vspec)):
pass
self.writeMapEnd()
def writeStruct(self, obj, thrift_spec):
self.writeStructBegin(obj.__class__.__name__)
for field in thrift_spec:
if field is None:
continue
fname = field[2]
val = getattr(obj, fname)
if val is None:
# skip writing out unset fields
continue
fid = field[0]
ftype = field[1]
fspec = field[3]
self.writeFieldBegin(fname, ftype, fid)
self.writeFieldByTType(ftype, val, fspec)
self.writeFieldEnd()
self.writeFieldStop()
self.writeStructEnd()
def _write_by_ttype(self, ttype, vals, spec, espec):
_, writer_name, is_container = self._ttype_handlers(ttype, espec)
writer_func = getattr(self, writer_name)
write = (lambda v: writer_func(v, espec)) if is_container else writer_func
for v in vals:
yield write(v)
def writeFieldByTType(self, ttype, val, spec):
next(self._write_by_ttype(ttype, [val], spec, spec))
def checkIntegerLimits(i, bits):
if bits == 8 and (i < -128 or i > 127):
raise TProtocolException(TProtocolException.INVALID_DATA,
"i8 requires -128 <= number <= 127")
elif bits == 16 and (i < -32768 or i > 32767):
raise TProtocolException(TProtocolException.INVALID_DATA,
"i16 requires -32768 <= number <= 32767")
elif bits == 32 and (i < -2147483648 or i > 2147483647):
raise TProtocolException(TProtocolException.INVALID_DATA,
"i32 requires -2147483648 <= number <= 2147483647")
elif bits == 64 and (i < -9223372036854775808 or i > 9223372036854775807):
raise TProtocolException(TProtocolException.INVALID_DATA,
"i64 requires -9223372036854775808 <= number <= 9223372036854775807")
class TProtocolFactory(object):
def getProtocol(self, trans):
pass
|
|
#!/usr/bin/env python
#
# $Id: build.py 9318 2011-06-10 02:37:10Z nathan_george $
#
# Proprietary and confidential.
# Copyright $Date:: 2011#$ Perfect Search Corporation.
# All rights reserved.
#
import datetime
import optparse
import os
import shutil
import subprocess
import sys
import time
import traceback
import component
import runnable_assembly
import sandbox
import timeout_monitor
_META_DATA_TXT = 'metadata.txt'
_IF_TOP_DIR = '.if_top'
# I have to import builders two different ways to make the next 10 lines work.
# The first import is necessary because I can't reference builders.__all__
# without it. The second import is what brings the specific builders (ant,
# cmake, etc) into visibility without me having to enumerate all of them on
# separate import statements
from builders import __all__ as all_builders
#from builders import *
# Create a builder for each type of codebase.
_builders = {}
for builder in all_builders:
command = \
'''
from builders.{0} import Builder as {0}_builder
new_builder = {0}_builder()
_builders[new_builder.get_name()] = new_builder
'''
exec(command.format(builder))
def select_builder(sb):
'''
Find a Builder that supports the specified sandbox.
'''
x = _builders.values()[:]
x.sort(cmp=lambda a,b: cmp(a.priority(), b.priority()))
for b in x:
if b.supports(sb):
return b
class _SimulatedBuilder():
def __init__(self, quiet=False):
self.quiet = quiet
def build(self, sb, options, targets):
if not self.quiet:
for t in targets:
print('Building "{0}" target...'.format(t))
print('BUILD SUCCEEDED')
return 0
def get_name(self):
return 'simulated'
class BuilderOptions:
def __init__(self, prompt=False, verbose=False, build_type=None, compiler=None, timeout=None, quick=False):
self.prompt = prompt
self.verbose = verbose
self.build_type = build_type
self.compiler = compiler
self.timeout = timeout
self.quick = quick
def __str__(self):
s = 'Prompt: {0}, Verbose: {1}, BuildType: {2}, Compiler: {3}, Timeout: {4}'.format \
(self.prompt, self.verbose, self.build_type, self.compiler, self.timeout)
return s
def _define_options():
description = "Make specified targets using tools appropriate for codebase."
usage = "Usage: %prog [options] [targets]"
parser = optparse.OptionParser(usage=usage, description=description)
parser.add_option('--sandbox', dest="sandbox", help="path to sandbox to build",
metavar="FLDR", default=sandbox.current.get_root())
parser.add_option('--timeout', dest="timeout",
help="override timeout after which build should abort if no new text arrives on stdout",
type='int', metavar='SECS', default=None)
parser.add_option('-d', '--dry-run', dest='dry_run', action='store_true', \
help="simulate build and return success", default=False)
parser.add_option('-v', '--verbose', dest="verbose", action='store_true', \
help="emit detailed messages", default=False)
parser.add_option('-p', '--prompt', dest="prompt", action='store_true', \
help="allow interacting with build tool.", default=False)
parser.add_option('-r', '--assemble-only', dest='assemble_only', action='store_true', \
help="bypass building and just assemble the runnable aspect", default=False)
parser.add_option('-b', '--builder', dest="builder", action="store", \
help="override auto-detected make tool", \
type='string', metavar='|'.join(_builders.keys()), default=None)
parser.add_option('-t', '--build-type', dest="buildtype", action="store", \
help="specify the build type", \
type='string', metavar='Release|Debug', default=None)
parser.add_option('-c', '--compiler', dest="compiler", action="store", \
help="override the default compiler (windows cmake specific)", \
type='string', default=None)
parser.add_option('-a', '--auto', dest="auto", action="store_true", \
help="automatically rebuild whenever code updates", \
default=False)
parser.add_option('--quick', dest="quick", action="store_true",
help="only run test if code aspect present",
default=False)
return parser
def parse_args(argv):
parser = _define_options()
args_options, args = parser.parse_args(argv)
if args_options.dry_run and args_options.auto:
print('The --dry-run and --auto options are mutually exclusive.')
sys.exit(1)
if args_options.prompt and args_options.auto:
print('The --prompt and --auto options are mutually exclusive.')
sys.exit(1)
# The user may have not been case sensitive on the key name!
builder = None
if args_options.builder:
builders = [key for key in _builders.iterkeys() if key.lower() == args_options.builder.lower()]
builder = builders[0] if len(builders) == 1 else None
if not builder:
raise optparse.OptionError('Invalid builder specified {0}'.format(args_options.builder), option.builder)
build_type = None
if args_options.buildtype and args_options.buildtype.capitalize() in ['Release', 'Debug']:
build_type = args_options.buildtype.capitalize()
builder_options = BuilderOptions(prompt=args_options.prompt, verbose=args_options.verbose,
build_type=build_type, compiler=args_options.compiler, \
timeout=args_options.timeout)
return args, args_options, builder, builder_options
def copy_required_built_files(sb):
component_built_dir = os.path.join(sb.get_built_root(), sb.get_top_component())
component_code_dir = os.path.join(sb.get_code_root(), sb.get_top_component())
# Ensure that the component directory exists
if not os.path.isdir(component_built_dir):
os.makedirs(component_built_dir)
code_meta = os.path.join(component_code_dir, _META_DATA_TXT)
built_meta = os.path.join(component_built_dir, _META_DATA_TXT)
if os.path.isfile(code_meta):
# If the destination exists then delete it
if os.path.isfile(built_meta):
os.remove(built_meta)
# Copy from the code aspect to the built aspect
shutil.copy2(code_meta, built_meta)
code_if_top = os.path.join(component_code_dir, _IF_TOP_DIR)
built_if_top = os.path.join(component_built_dir, _IF_TOP_DIR)
if os.path.isdir(code_if_top):
# If the destination exists then delete it
if os.path.isdir(built_if_top):
shutil.rmtree(built_if_top)
# Copy from the code aspect to the built aspect
shutil.copytree(code_if_top, built_if_top)
def assemble_run(sb):
print('Assembling run/...')
err = 0
try:
try:
sbr = sb.get_root()
top = sb.get_top_component()
assemble_script = sb.get_iftop_folder_path() + 'assemble_run.py'
built_path = sb.get_component_path(top, component.BUILT_ASPECT_NAME)
if os.path.exists(assemble_script):
runnable_assembly.assemble_custom(top, sb)
else:
print(' {0} does not exist. Copying {1} instead.'.format(
assemble_script,#[len(sbr):],
built_path[len(sbr):]))
runnable_assembly.assemble_default(top, sb)
except:
# Make sure our finally block reports outcome.
err = 1
raise
finally:
if err:
print('... FAILED')
else:
print('... OK')
return err
def do_build(sb, args_options, args, builder, builder_options):
err = 0
try:
build_date = time.time()
# Start up a thread that will force us to exit if we hang.
if builder_options.timeout is not None:
sb.set_build_timeout_seconds(int(builder_options.timeout), persist=False)
global _timeout_monitor
_timeout_monitor = timeout_monitor.start(sb.get_build_timeout_seconds())
try:
err = 0
configuring = 'config' in args
building = bool([x for x in args if x.startswith('build')])
if not configuring:
# Always call the script builder, even for sandboxes that are driven
# by ant, cmake, etc. This will allow us to build the buildscripts
# component and any other components that just contain script, like
# python components, php components, pure html+javascript, etc.
if builder.get_name() not in ['script', 'simulated'] and not args_options.assemble_only:
err = _builders['script'].build(sb, builder_options, args)
if not err:
if not args_options.assemble_only:
copy_required_built_files(sb)
if 'clean' in args:
err = builder.clean(sb.get_built_root(), builder.get_clean_exclusions(sb))
args.remove('clean')
if not err and len(args) > 0:
if building:
sb.set_last_build_date(build_date)
err = builder.build(sb, builder_options, args)
if not err and building:
# Always generate the runnable aspect for the sandbox. We do
# this outside of the main build tool because logic to create
# runnable aspects doesn't need to vary from code type to code
# type; it's always a bunch of file copies.
err = assemble_run(sb)
finally:
_timeout_monitor.stop()
if not err:
sb.set_last_successful_build_date(build_date)
except:
err = 1
traceback.print_exc()
return err
def auto_build(sb, args_options, args, builder, builder_options):
err = 0
print('Auto-building whenever code changes. Press CTRL+C to break.')
try:
while True:
if sb.needs_build():
print('\n\nRebuild started at {0:%I:%M %p}...'.format(datetime.datetime.now()))
do_build(sb, args_options, args, builder, builder_options)
time.sleep(1.0)
except KeyboardInterrupt:
pass
except:
traceback.print_exc()
err = 1
return err
def main(argv):
err = 0
args, args_options, builder, builder_options = parse_args(argv)
sb = sandbox.create_from_within(args_options.sandbox)
# Is this a buildable sandbox?
if component.CODE_ASPECT_NAME not in sb.get_component_aspects(sb.get_top_component()):
builder = _SimulatedBuilder(True)
else:
if not builder_options.build_type:
builder_options.build_type = sb.get_build_config()
else:
sb.set_build_config(builder_options.build_type)
builder_options.quick = args_options.quick
args_options.assemble_only = args_options.assemble_only and os.path.isdir(sb.get_built_root())
if args_options.dry_run:
builder = _SimulatedBuilder()
else:
# If cmdline specified a builder name, look up the corresponding object.
# Otherwise, select default one.
if builder:
builder = _builders[builder]
else:
builder = select_builder(sb)
if not builder:
if not os.path.isdir(sb.get_code_root()):
# User has requested a built sandbox -- now requesting that it be runnable!
args_options.assemble_only = True
else:
print('No build tool supports {0}.'.format(sb.get_code_root()))
return 2
print('Using {0} as build tool.'.format(builder.get_name()))
if not args:
args = ['build']
if args_options.auto:
if sb.get_component_reused_aspect(sb.get_top_component()) != component.CODE_ASPECT_NAME:
print("Can't auto-build if top component is pre-built.")
err = 1
else:
auto_build(sb, args_options, args, builder, builder_options)
else:
err = do_build(sb, args_options, args, builder, builder_options)
return err
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Annotates the resource types with extra information."""
import collections
import httplib
from apitools.base.protorpclite import messages
from googlecloudsdk.api_lib.compute import instance_utils
from googlecloudsdk.api_lib.compute import path_simplifier
from googlecloudsdk.api_lib.compute import property_selector
def _FirewallRulesToCell(firewall):
"""Returns a compact string describing the firewall rules."""
rules = []
for allowed in firewall.get('allowed', []):
protocol = allowed.get('IPProtocol')
if not protocol:
continue
port_ranges = allowed.get('ports')
if port_ranges:
for port_range in port_ranges:
rules.append('{0}:{1}'.format(protocol, port_range))
else:
rules.append(protocol)
return ','.join(rules)
def _TargetPoolHealthChecksToCell(target_pool):
"""Comma-joins the names of health checks of the given target pool."""
return ','.join(path_simplifier.Name(check) for check in
target_pool.get('healthChecks', []))
def _FirewallSourceRangesToCell(firewall):
"""Comma-joins the source ranges of the given firewall rule."""
return ','.join(firewall.get('sourceRanges', []))
def _FirewallSourceTagsToCell(firewall):
"""Comma-joins the source tags of the given firewall rule."""
return ','.join(firewall.get('sourceTags', []))
def _FirewallTargetTagsToCell(firewall):
"""Comma-joins the target tags of the given firewall rule."""
return ','.join(firewall.get('targetTags', []))
def _ForwardingRuleTarget(forwarding_rule):
"""Gets the API-level target or backend-service of the given rule."""
backend_service = forwarding_rule.get('backendService', None)
if backend_service is not None:
return backend_service
else:
return forwarding_rule.get('target', None)
def _StatusToCell(zone_or_region):
"""Returns status of a machine with deprecation information if applicable."""
deprecated = zone_or_region.get('deprecated', '')
if deprecated:
return '{0} ({1})'.format(zone_or_region.get('status'),
deprecated.get('state'))
else:
return zone_or_region.get('status')
def _DeprecatedDateTimeToCell(zone_or_region):
"""Returns the turndown timestamp of a deprecated machine or ''."""
deprecated = zone_or_region.get('deprecated', '')
if deprecated:
return deprecated.get('deleted')
else:
return ''
def _QuotaToCell(metric, is_integer=True):
"""Returns a function that can format the given quota as usage/limit."""
def QuotaToCell(region):
"""Formats the metric from the parent function."""
for quota in region.get('quotas', []):
if quota.get('metric') != metric:
continue
if is_integer:
return '{0:6}/{1}'.format(
int(quota.get('usage')),
int(quota.get('limit')))
else:
return '{0:7.2f}/{1:.2f}'.format(
quota.get('usage'),
quota.get('limit'))
return ''
return QuotaToCell
def _LocationName(instance_group):
"""Returns a location name, could be region name or zone name."""
if 'zone' in instance_group:
return path_simplifier.Name(instance_group['zone'])
elif 'region' in instance_group:
return path_simplifier.Name(instance_group['region'])
else:
return None
def _LocationScopeType(instance_group):
"""Returns a location scope type, could be region or zone."""
if 'zone' in instance_group:
return 'zone'
elif 'region' in instance_group:
return 'region'
else:
return None
def _MachineTypeMemoryToCell(machine_type):
"""Returns the memory of the given machine type in GB."""
memory = machine_type.get('memoryMb')
if memory:
return '{0:5.2f}'.format(memory / 2.0 ** 10)
else:
return ''
def _FormatCustomMachineTypeName(mt):
"""Checks for custom machine type and modifies output.
Args:
mt: machine type to be formatted
Returns:
If mt was a custom type, then it will be formatted into the desired custom
machine type output. Otherwise, it is returned unchanged.
Helper function for _MachineTypeNameToCell
"""
custom_cpu, custom_ram = instance_utils.GetCpuRamFromCustomName(mt)
if custom_cpu and custom_ram:
# Restricting output to 2 decimal places
custom_ram_gb = '{0:.2f}'.format(float(custom_ram) / (2 ** 10))
mt = 'custom ({0} vCPU, {1} GiB)'.format(custom_cpu, custom_ram_gb)
return mt
def _MachineTypeNameToCell(machine_type):
"""Returns the formatted name of the given machine type.
Most machine types will be untouched, with the exception of the custom machine
type. This modifies the 'custom-N-M' custom machine types with
'custom (N vCPU, M GiB)'.
For example, given the following custom machine_type:
custom-2-3500
This function will return:
custom (2 vCPU, 3.41 GiB)
in the MACHINE_TYPE field when listing out the current instances.
Args:
machine_type: The machine type of the given instance
Returns:
A formatted version of the given custom machine type (as shown in example
in docstring above).
"""
mt = machine_type.get('properties', machine_type).get('machineType')
if mt:
return _FormatCustomMachineTypeName(mt)
return mt
def FormatDescribeMachineTypeName(resources, com_path):
"""Formats a custom machine type when 'instances describe' is called.
Args:
resources: dict of resources available for the instance in question
com_path: command path of the calling command
Returns:
If input is a custom type, returns the formatted custom machine type.
Otherwise, returns None.
"""
if ('instances' in com_path) and ('describe' in com_path):
if not resources:
return None
if 'machineType' not in resources:
return None
mt_splitlist = resources['machineType'].split('/')
mt = mt_splitlist[-1]
if 'custom' not in mt:
return None
formatted_mt = _FormatCustomMachineTypeName(mt)
mt_splitlist[-1] = formatted_mt
return '/'.join(mt_splitlist)
else:
return None
def _OperationHttpStatusToCell(operation):
"""Returns the HTTP response code of the given operation."""
if operation.get('status') == 'DONE':
return operation.get('httpErrorStatusCode') or httplib.OK
else:
return ''
def _ProjectToCell(resource):
"""Returns the project name of the given resource."""
self_link = resource.get('selfLink')
if self_link:
return path_simplifier.ProjectSuffix(self_link).split('/')[0]
else:
return ''
def _MembersToCell(group):
members = group.get('members')
if members:
return len(members)
# Must be '0' instead of 0 to pass comparison 0 or ''.
return '0'
def _BackendsToCell(backend_service):
"""Comma-joins the names of the backend services."""
return ','.join(backend.get('group')
for backend in backend_service.get('backends', []))
def _RoutesNextHopToCell(route):
"""Returns the next hop value in a compact form."""
if route.get('nextHopInstance'):
return path_simplifier.ScopedSuffix(route.get('nextHopInstance'))
elif route.get('nextHopGateway'):
return path_simplifier.ScopedSuffix(route.get('nextHopGateway'))
elif route.get('nextHopIp'):
return route.get('nextHopIp')
elif route.get('nextHopVpnTunnel'):
return path_simplifier.ScopedSuffix(route.get('nextHopVpnTunnel'))
elif route.get('nextHopPeering'):
return route.get('nextHopPeering')
else:
return ''
def _TargetProxySslCertificatesToCell(target_proxy):
"""Joins the names of ssl certificates of the given HTTPS or SSL proxy."""
return ','.join(path_simplifier.Name(cert) for cert in
target_proxy.get('sslCertificates', []))
def _ProtobufDefinitionToFields(message_class):
"""Flattens the fields in a protocol buffer definition.
For example, given the following definition:
message Point {
required int32 x = 1;
required int32 y = 2;
optional string label = 3;
}
message Polyline {
repeated Point point = 1;
optional string label = 2;
}
a call to this function with the Polyline class would produce:
['label',
'point[].label',
'point[].x',
'point[].y']
Args:
message_class: A class that inherits from protorpc.self.messages.Message
and defines a protocol buffer.
Yields:
The flattened fields, in non-decreasing order.
"""
for field in sorted(message_class.all_fields(), key=lambda field: field.name):
if isinstance(field, messages.MessageField):
for remainder in _ProtobufDefinitionToFields(field.type):
if field.repeated:
yield field.name + '[].' + remainder
else:
yield field.name + '.' + remainder
else:
if field.repeated:
yield field.name + '[]'
else:
yield field.name
_InternalSpec = collections.namedtuple(
'Spec',
['message_class_name', 'table_cols', 'transformations', 'editables'])
_SPECS_V1 = {
'addresses': _InternalSpec(
message_class_name='Address',
table_cols=[
('NAME', 'name'),
('REGION', 'region'),
('ADDRESS', 'address'),
('STATUS', 'status'),
],
transformations=[
('region', path_simplifier.Name),
('users[]', path_simplifier.ScopedSuffix),
],
editables=None,
),
'autoscalers': _InternalSpec(
message_class_name='Autoscaler',
table_cols=[
('NAME', 'name'),
('TARGET', 'target'),
('POLICY', 'autoscalingPolicy'),
],
transformations=[
('zone', path_simplifier.Name),
('target', path_simplifier.Name),
],
editables=None,
),
'backendServices': _InternalSpec(
message_class_name='BackendService',
table_cols=[
('NAME', 'name'),
('BACKENDS', _BackendsToCell),
('PROTOCOL', 'protocol'),
],
transformations=[
('backends[].group', path_simplifier.ScopedSuffix),
],
editables=[
'backends',
'description',
'enableCDN',
'healthChecks',
'port',
'portName',
'protocol',
'timeoutSec',
],
),
'backendServiceGroupHealth': _InternalSpec(
message_class_name='BackendServiceGroupHealth',
table_cols=[
],
transformations=[
('healthStatus[].instance', path_simplifier.ScopedSuffix),
],
editables=None,
),
'disks': _InternalSpec(
message_class_name='Disk',
table_cols=[
('NAME', 'name'),
('ZONE', 'zone'),
('SIZE_GB', 'sizeGb'),
('TYPE', 'type'),
('STATUS', 'status'),
],
transformations=[
('sourceSnapshot', path_simplifier.Name),
('type', path_simplifier.Name),
('zone', path_simplifier.Name),
],
editables=None,
),
'diskTypes': _InternalSpec(
message_class_name='DiskType',
table_cols=[
('NAME', 'name'),
('ZONE', 'zone'),
('VALID_DISK_SIZES', 'validDiskSize'),
],
transformations=[
('zone', path_simplifier.Name),
],
editables=None,
),
'firewalls': _InternalSpec(
message_class_name='Firewall',
table_cols=[
('NAME', 'name'),
('NETWORK', 'network'),
('SRC_RANGES', _FirewallSourceRangesToCell),
('RULES', _FirewallRulesToCell),
('SRC_TAGS', _FirewallSourceTagsToCell),
('TARGET_TAGS', _FirewallTargetTagsToCell),
],
transformations=[
('network', path_simplifier.Name),
],
editables=None,
),
'forwardingRules': _InternalSpec(
message_class_name='ForwardingRule',
table_cols=[
('NAME', 'name'),
('REGION', 'region'),
('IP_ADDRESS', 'IPAddress'),
('IP_PROTOCOL', 'IPProtocol'),
('TARGET', _ForwardingRuleTarget),
],
transformations=[
('region', path_simplifier.Name),
('target', path_simplifier.ScopedSuffix),
],
editables=None,
),
'groups': _InternalSpec(
message_class_name='Group',
table_cols=[
('NAME', 'name'),
('NUM_MEMBERS', _MembersToCell),
('DESCRIPTION', 'description'),
],
transformations=[],
editables=[],
),
'healthChecks': _InternalSpec(
message_class_name='HealthCheck',
table_cols=[
('NAME', 'name'),
('PROTOCOL', 'type'),
],
transformations=[],
editables=None,
),
'httpHealthChecks': _InternalSpec(
message_class_name='HttpHealthCheck',
table_cols=[
('NAME', 'name'),
('HOST', 'host'),
('PORT', 'port'),
('REQUEST_PATH', 'requestPath'),
],
transformations=[
],
editables=None,
),
'httpsHealthChecks': _InternalSpec(
message_class_name='HttpsHealthCheck',
table_cols=[
('NAME', 'name'),
('HOST', 'host'),
('PORT', 'port'),
('REQUEST_PATH', 'requestPath'),
],
transformations=[
],
editables=None,
),
'iap': _InternalSpec(
message_class_name='BackendServiceIAP',
table_cols=[
('NAME', 'name'),
('ENABLED', 'enabled'),
('OAUTH2_CLIENT_ID', 'oauth2ClientId'),
('OAUTH2_CLIENT_SECRET', 'oauth2ClientSecret'),
('OAUTH2_CLIENT_SECRET_SHA256', 'oauth2ClientSecretSha256'),
],
transformations=[],
editables=None,
),
'images': _InternalSpec(
message_class_name='Image',
table_cols=[
('NAME', 'name'),
('PROJECT', _ProjectToCell),
('FAMILY', 'family'),
('DEPRECATED', 'deprecated.state'),
('STATUS', 'status'),
],
transformations=[],
editables=None,
),
'instanceGroups': _InternalSpec(
message_class_name='InstanceGroup',
table_cols=[
('NAME', 'name'),
('ZONE', 'zone'),
('NETWORK', 'network'),
('MANAGED', 'isManaged'),
('INSTANCES', 'size'),
],
transformations=[
('zone', path_simplifier.Name),
('size', str),
],
editables=None,
),
'instanceGroupManagers': _InternalSpec(
message_class_name='InstanceGroupManager',
table_cols=[
('NAME', 'name'),
('ZONE', 'zone'),
('BASE_INSTANCE_NAME', 'baseInstanceName'),
('SIZE', 'size'),
('TARGET_SIZE', 'targetSize'),
('INSTANCE_TEMPLATE', 'instanceTemplate'),
('AUTOSCALED', 'autoscaled'),
],
transformations=[
('zone', path_simplifier.Name),
('instanceGroup', path_simplifier.Name),
('instanceTemplate', path_simplifier.Name),
],
editables=None,
),
'instances': _InternalSpec(
message_class_name='Instance',
table_cols=[
('NAME', 'name'),
('ZONE', 'zone'),
('MACHINE_TYPE', _MachineTypeNameToCell),
('PREEMPTIBLE', 'scheduling.preemptible'),
('INTERNAL_IP', 'networkInterfaces[].networkIP.notnull().list()'),
('EXTERNAL_IP',
'networkInterfaces[].accessConfigs[0].natIP.notnull().list()'),
('STATUS', 'status'),
],
transformations=[
('disks[].source', path_simplifier.Name),
('machineType', path_simplifier.Name),
('networkInterfaces[].network', path_simplifier.Name),
('zone', path_simplifier.Name),
],
editables=None,
),
'instanceTemplates': _InternalSpec(
message_class_name='InstanceTemplate',
table_cols=[
('NAME', 'name'),
('MACHINE_TYPE', _MachineTypeNameToCell),
('PREEMPTIBLE', 'properties.scheduling.preemptible'),
('CREATION_TIMESTAMP', 'creationTimestamp'),
],
transformations=[],
editables=None,
),
'machineTypes': _InternalSpec(
message_class_name='MachineType',
table_cols=[
('NAME', 'name'),
('ZONE', 'zone'),
('CPUS', 'guestCpus'),
('MEMORY_GB', _MachineTypeMemoryToCell),
('DEPRECATED', 'deprecated.state'),
],
transformations=[
('zone', path_simplifier.Name),
],
editables=None,
),
'networks': _InternalSpec(
message_class_name='Network',
table_cols=[
('NAME', 'name'),
('MODE', 'x_gcloud_mode'),
('IPV4_RANGE', 'IPv4Range'),
('GATEWAY_IPV4', 'gatewayIPv4'),
],
transformations=[],
editables=None,
),
'projects': _InternalSpec(
message_class_name='Project',
table_cols=[], # We do not support listing projects since
# there is only one project (and there is no
# API support).
transformations=[
],
editables=None,
),
'operations': _InternalSpec(
message_class_name='Operation',
table_cols=[
('NAME', 'name'),
('TYPE', 'operationType'),
('TARGET', 'targetLink'),
('HTTP_STATUS', _OperationHttpStatusToCell),
('STATUS', 'status'),
('TIMESTAMP', 'insertTime'),
],
transformations=[
('targetLink', path_simplifier.ScopedSuffix),
],
editables=None,
),
'invalidations': _InternalSpec(
message_class_name='Operation',
table_cols=[
('DESCRIPTION', 'description'),
('HTTP_STATUS', _OperationHttpStatusToCell),
('STATUS', 'status'),
('TIMESTAMP', 'insertTime'),
],
transformations=[
('targetLink', path_simplifier.ScopedSuffix),
],
editables=None,
),
'regionBackendServices': _InternalSpec(
message_class_name='BackendService',
table_cols=[
('NAME', 'name'),
('BACKENDS', _BackendsToCell),
('PROTOCOL', 'protocol'),
],
transformations=[
('backends[].group', path_simplifier.ScopedSuffix),
],
editables=[
'backends',
'description',
'enableCDN',
'healthChecks',
'port',
'portName',
'protocol',
'timeoutSec',
],
),
'regions': _InternalSpec(
message_class_name='Region',
table_cols=[
('NAME', 'name'),
('CPUS', _QuotaToCell('CPUS', is_integer=False)),
('DISKS_GB', _QuotaToCell('DISKS_TOTAL_GB', is_integer=True)),
('ADDRESSES', _QuotaToCell('IN_USE_ADDRESSES', is_integer=True)),
('RESERVED_ADDRESSES',
_QuotaToCell('STATIC_ADDRESSES', is_integer=True)),
('STATUS', _StatusToCell),
('TURNDOWN_DATE', _DeprecatedDateTimeToCell),
],
transformations=[
('zones[]', path_simplifier.Name),
],
editables=None,
),
'routes': _InternalSpec(
message_class_name='Route',
table_cols=[
('NAME', 'name'),
('NETWORK', 'network'),
('DEST_RANGE', 'destRange'),
('NEXT_HOP', _RoutesNextHopToCell),
('PRIORITY', 'priority'),
],
transformations=[
('network', path_simplifier.Name),
],
editables=None,
),
'snapshots': _InternalSpec(
message_class_name='Snapshot',
table_cols=[
('NAME', 'name'),
('DISK_SIZE_GB', 'diskSizeGb'),
('SRC_DISK', 'sourceDisk'),
('STATUS', 'status'),
],
transformations=[
('sourceDisk', path_simplifier.ScopedSuffix),
],
editables=None,
),
'sslCertificates': _InternalSpec(
message_class_name='Network',
table_cols=[
('NAME', 'name'),
('CREATION_TIMESTAMP', 'creationTimestamp'),
],
transformations=[],
editables=None,
),
'subnetworks': _InternalSpec(
message_class_name='Subnetwork',
table_cols=[
('NAME', 'name'),
('REGION', 'region'),
('NETWORK', 'network'),
('RANGE', 'ipCidrRange')
],
transformations=[
('network', path_simplifier.Name),
('region', path_simplifier.Name),
],
editables=None,
),
'targetHttpProxies': _InternalSpec(
message_class_name='TargetHttpProxy',
table_cols=[
('NAME', 'name'),
('URL_MAP', 'urlMap'),
],
transformations=[
('urlMap', path_simplifier.Name),
],
editables=None,
),
'targetHttpsProxies': _InternalSpec(
message_class_name='TargetHttpsProxy',
table_cols=[
('NAME', 'name'),
('SSL_CERTIFICATES', _TargetProxySslCertificatesToCell),
('URL_MAP', 'urlMap'),
],
transformations=[
('sslCertificates[]', path_simplifier.Name),
('urlMap', path_simplifier.Name),
],
editables=None,
),
'targetSslProxies': _InternalSpec(
message_class_name='TargetSslProxy',
table_cols=[
('NAME', 'name'),
('PROXY_HEADER', 'proxyHeader'),
('SERVICE', 'service'),
('SSL_CERTIFICATES', _TargetProxySslCertificatesToCell)
],
transformations=[
('sslCertificates[]', path_simplifier.Name),
('service', path_simplifier.Name),
],
editables=None,
),
'targetInstances': _InternalSpec(
message_class_name='TargetInstance',
table_cols=[
('NAME', 'name'),
('ZONE', 'zone'),
('INSTANCE', 'instance'),
('NAT_POLICY', 'natPolicy'),
],
transformations=[
('instance', path_simplifier.Name),
('zone', path_simplifier.Name),
],
editables=None,
),
'targetPoolInstanceHealth': _InternalSpec(
message_class_name='TargetPoolInstanceHealth',
table_cols=[
],
transformations=[
('healthStatus[].instance', path_simplifier.ScopedSuffix),
],
editables=None,
),
'targetPools': _InternalSpec(
message_class_name='TargetPool',
table_cols=[
('NAME', 'name'),
('REGION', 'region'),
('SESSION_AFFINITY', 'sessionAffinity'),
('BACKUP', 'backupPool'),
('HEALTH_CHECKS', _TargetPoolHealthChecksToCell),
],
transformations=[
('backupPool', path_simplifier.Name),
('healthChecks[]', path_simplifier.Name),
('instances[]', path_simplifier.ScopedSuffix),
('region', path_simplifier.Name),
],
editables=None,
),
'targetVpnGateways': _InternalSpec(
message_class_name='TargetVpnGateway',
table_cols=[
('NAME', 'name'),
('NETWORK', 'network'),
('REGION', 'region')
],
transformations=[
('network', path_simplifier.Name),
('region', path_simplifier.Name)],
editables=None
),
'urlMaps': _InternalSpec(
message_class_name='UrlMap',
table_cols=[
('NAME', 'name'),
('DEFAULT_SERVICE', 'defaultService'),
],
transformations=[
('defaultService', path_simplifier.Name),
('pathMatchers[].defaultService', path_simplifier.Name),
('pathMatchers[].pathRules[].service', path_simplifier.Name),
('tests[].service', path_simplifier.Name),
],
editables=[
'defaultService',
'description',
'hostRules',
'pathMatchers',
'tests',
],
),
'users': _InternalSpec(
message_class_name='User',
table_cols=[
('NAME', 'name'),
('OWNER', 'owner'),
('DESCRIPTION', 'description'),
],
transformations=[],
editables=[],
),
'zones': _InternalSpec(
message_class_name='Zone',
table_cols=[
('NAME', 'name'),
('REGION', 'region'),
('STATUS', _StatusToCell),
('TURNDOWN_DATE', _DeprecatedDateTimeToCell),
],
transformations=[
('region', path_simplifier.Name),
],
editables=None,
),
'vpnTunnels': _InternalSpec(
message_class_name='VpnTunnel',
table_cols=[
('NAME', 'name'),
('REGION', 'region'),
('GATEWAY', 'targetVpnGateway'),
('PEER_ADDRESS', 'peerIp')
],
transformations=[
('region', path_simplifier.Name),
('targetVpnGateway', path_simplifier.Name)],
editables=None
),
'routers': _InternalSpec(
message_class_name='Router',
table_cols=[
('NAME', 'name'),
('REGION', 'region'),
('NETWORK', 'network'),
],
transformations=[
('network', path_simplifier.Name),
('region', path_simplifier.Name),
],
editables=None,
),
}
_SPECS_BETA = _SPECS_V1.copy()
_SPECS_BETA['backendBuckets'] = _InternalSpec(
message_class_name='BackendBucket',
table_cols=[
('NAME', 'name'),
('GCS_BUCKET_NAME', 'bucketName'),
('ENABLE_CDN', 'enableCdn')
],
transformations=[
('enableCdn', lambda x: str(x).lower()),
],
editables=[
'bucketName'
'description',
'enableCdn',
])
_SPECS_BETA['urlMaps'] = _InternalSpec(
message_class_name='UrlMap',
table_cols=[
('NAME', 'name'),
('DEFAULT_SERVICE', 'defaultService'),
],
transformations=[
('defaultService', path_simplifier.TypeSuffix),
('pathMatchers[].defaultService', path_simplifier.TypeSuffix),
('pathMatchers[].pathRules[].service', path_simplifier.TypeSuffix),
('tests[].service', path_simplifier.TypeSuffix),
],
editables=[
'defaultService',
'description',
'hostRules',
'pathMatchers',
'tests',
])
_SPECS_ALPHA = _SPECS_BETA.copy()
_SPECS_ALPHA['hosts'] = _InternalSpec(
message_class_name='Host',
table_cols=[
('NAME', 'name'),
('REQUEST_PATH', 'requestPath'),
],
transformations=[],
editables=None)
_SPECS_ALPHA['hostTypes'] = _InternalSpec(
message_class_name='HostType',
table_cols=[
('NAME', 'name'),
('ZONE', 'zone'),
('DEPRECATED', 'deprecated'),
('CPUs', 'guestCpus'),
('MEMORY(MB)', 'memoryMb'),
('LOCAL SSD(GB)', 'localSsdGb'),
],
transformations=[],
editables=None)
_SPECS_ALPHA['instanceGroups'] = _InternalSpec(
message_class_name='InstanceGroup',
table_cols=[
('NAME', 'name'),
('LOCATION', _LocationName),
('SCOPE', _LocationScopeType),
('NETWORK', 'network'),
('MANAGED', 'isManaged'),
('INSTANCES', 'size'),
],
transformations=[
('size', str),
],
editables=None,
)
_SPECS_ALPHA['instanceGroupManagers'] = _InternalSpec(
message_class_name='InstanceGroupManager',
table_cols=[
('NAME', 'name'),
('LOCATION', _LocationName),
('SCOPE', _LocationScopeType),
('BASE_INSTANCE_NAME', 'baseInstanceName'),
('SIZE', 'size'),
('TARGET_SIZE', 'targetSize'),
('INSTANCE_TEMPLATE', 'instanceTemplate'),
('AUTOSCALED', 'autoscaled'),
],
transformations=[
('instanceGroup', path_simplifier.Name),
('instanceTemplate', path_simplifier.Name),
],
editables=None,
)
_SPECS_ALPHA['backendServices'] = _InternalSpec(
message_class_name='BackendService',
table_cols=[
('NAME', 'name'),
('BACKENDS', _BackendsToCell),
('PROTOCOL', 'protocol'),
],
transformations=[
('backends[].group', path_simplifier.ScopedSuffix),
],
editables=[
'backends',
'description',
'enableCDN',
'sessionAffinity',
'affinityCookieTTL',
'healthChecks',
'iap.enabled',
'iap.oauth2ClientId',
'iap.oauth2ClientSecret',
'port',
'portName',
'protocol',
'timeoutSec',
],)
_SPECS_ALPHA['urlMaps'] = _InternalSpec(
message_class_name='UrlMap',
table_cols=[
('NAME', 'name'),
('DEFAULT_SERVICE', 'defaultService'),
],
transformations=[
('defaultService', path_simplifier.TypeSuffix),
('pathMatchers[].defaultService', path_simplifier.TypeSuffix),
('pathMatchers[].pathRules[].service', path_simplifier.TypeSuffix),
('tests[].service', path_simplifier.TypeSuffix),
],
editables=[
'defaultService',
'description',
'hostRules',
'pathMatchers',
'tests',
])
_SPECS_ALPHA['peerings'] = _InternalSpec(
message_class_name='NetworkPeering',
table_cols=[
('NAME', 'name'),
('network', 'network'),
('autoCreateRoutes', 'autoCreateRoutes'),
('state', 'state'),
],
transformations=None,
editables=None,
)
def _GetSpecsForVersion(api_version):
"""Get Specs for the given API version.
This currently always returns _SPECS_V1, but is left here for the future,
as a pattern for providing different specs for different versions.
Args:
api_version: A string identifying the API version, e.g. 'v1'.
Returns:
A map associating each message class name with an _InternalSpec object.
"""
if api_version == 'v1' or api_version == 'v2beta1':
return _SPECS_V1
if 'alpha' in api_version:
return _SPECS_ALPHA
return _SPECS_BETA
Spec = collections.namedtuple(
'Spec',
['message_class', 'fields', 'table_cols', 'transformations', 'editables'])
def GetSpec(resource_type, message_classes, api_version):
"""Returns a Spec for the given resource type."""
spec = _GetSpecsForVersion(api_version)
if resource_type not in spec:
raise KeyError('"%s" not found in Specs for version "%s"' %
(resource_type, api_version))
spec = spec[resource_type]
table_cols = []
for name, action in spec.table_cols:
if isinstance(action, basestring):
table_cols.append((name, property_selector.PropertyGetter(action)))
elif callable(action):
table_cols.append((name, action))
else:
raise ValueError('expected function or property in table_cols list: {0}'
.format(spec))
message_class = getattr(message_classes, spec.message_class_name)
fields = list(_ProtobufDefinitionToFields(message_class))
return Spec(message_class=message_class,
fields=fields,
table_cols=table_cols,
transformations=spec.transformations,
editables=spec.editables)
|
|
#
# Copyright 2014 Rackspace. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cryptography.hazmat import backends
from cryptography.hazmat.primitives import serialization
from cryptography import x509
from oslo_log import log as logging
import six
from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\
common import data_models as data_models
from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\
common import exceptions
from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\
i18n import _LE
X509_BEG = "-----BEGIN CERTIFICATE-----"
X509_END = "-----END CERTIFICATE-----"
LOG = logging.getLogger(__name__)
def validate_cert(certificate, private_key=None,
private_key_passphrase=None, intermediates=None):
"""Validate that the certificate is a valid PEM encoded X509 object
Optionally verify that the private key matches the certificate.
Optionally verify that the intermediates are valid X509 objects.
:param certificate: A PEM encoded certificate
:param private_key: The private key for the certificate
:param private_key_passphrase: Passphrase for accessing the private key
:param intermediates: PEM encoded intermediate certificates
:returns: boolean
"""
cert = _get_x509_from_pem_bytes(certificate)
if intermediates:
for x509Pem in _split_x509s(intermediates):
_get_x509_from_pem_bytes(x509Pem)
if private_key:
pkey = _read_privatekey(private_key, passphrase=private_key_passphrase)
pknum = pkey.public_key().public_numbers()
certnum = cert.public_key().public_numbers()
if pknum != certnum:
raise exceptions.MisMatchedKey
return True
def _read_privatekey(privatekey_pem, passphrase=None):
if passphrase:
if six.PY2:
passphrase = passphrase.encode("utf-8")
elif six.PY3:
passphrase = six.b(passphrase)
try:
pkey = privatekey_pem.encode('ascii')
return serialization.load_pem_private_key(pkey, passphrase,
backends.default_backend())
except Exception:
raise exceptions.NeedsPassphrase
def _split_x509s(xstr):
"""Split the input string into individual x509 text blocks
:param xstr: A large multi x509 certificate blcok
:returns: A list of strings where each string represents an
X509 pem block surrounded by BEGIN CERTIFICATE,
END CERTIFICATE block tags
"""
curr_pem_block = []
inside_x509 = False
for line in xstr.replace("\r", "").split("\n"):
if inside_x509:
curr_pem_block.append(line)
if line == X509_END:
yield "\n".join(curr_pem_block)
curr_pem_block = []
inside_x509 = False
continue
else:
if line == X509_BEG:
curr_pem_block.append(line)
inside_x509 = True
def get_host_names(certificate):
"""Extract the host names from the Pem encoded X509 certificate
:param certificate: A PEM encoded certificate
:returns: A dictionary containing the following keys:
['cn', 'dns_names']
where 'cn' is the CN from the SubjectName of the certificate, and
'dns_names' is a list of dNSNames (possibly empty) from
the SubjectAltNames of the certificate.
"""
try:
certificate = certificate.encode('ascii')
cert = x509.load_pem_x509_certificate(certificate,
backends.default_backend())
cn = cert.subject.get_attributes_for_oid(x509.OID_COMMON_NAME)[0]
host_names = {
'cn': cn.value.lower(),
'dns_names': []
}
try:
ext = cert.extensions.get_extension_for_oid(
x509.OID_SUBJECT_ALTERNATIVE_NAME
)
host_names['dns_names'] = ext.value.get_values_for_type(
x509.DNSName)
except x509.ExtensionNotFound:
LOG.debug("%s extension not found",
x509.OID_SUBJECT_ALTERNATIVE_NAME)
return host_names
except Exception:
LOG.exception(_LE("Unreadable certificate."))
raise exceptions.UnreadableCert
def get_cert_expiration(certificate_pem):
"""Extract the expiration date from the Pem encoded X509 certificate
:param certificate_pem: Certificate in PEM format
:returns: Expiration date of certificate_pem
"""
try:
certificate = certificate_pem.encode('ascii')
cert = x509.load_pem_x509_certificate(certificate,
backends.default_backend())
return cert.not_valid_after
except Exception as e:
LOG.exception(e)
raise exceptions.UnreadableCert
def _get_x509_from_pem_bytes(certificate_pem):
"""Parse X509 data from a PEM encoded certificate
:param certificate_pem: Certificate in PEM format
:returns: crypto high-level x509 data from the PEM string
"""
try:
certificate = certificate_pem.encode('ascii')
x509cert = x509.load_pem_x509_certificate(certificate,
backends.default_backend())
except Exception:
raise exceptions.UnreadableCert
return x509cert
def build_pem(tls_container):
"""Concatenate TLS container fields to create a PEM
encoded certificate file
:param tls_container: Object container TLS certificates
:returns: Pem encoded certificate file
"""
pem = []
if tls_container.intermediates:
pem = tls_container.intermediates[:]
pem.extend([tls_container.certificate, tls_container.private_key])
return '\n'.join(pem)
def load_certificates_data(cert_mngr, listener):
"""Load TLS certificate data from the listener.
return TLS_CERT and SNI_CERTS
"""
tls_cert = None
sni_certs = []
if listener.tls_certificate_id:
tls_cert = _map_cert_tls_container(
cert_mngr.get_cert(listener.project_id,
listener.tls_certificate_id,
check_only=True))
if listener.sni_containers:
for sni_cont in listener.sni_containers:
cert_container = _map_cert_tls_container(
cert_mngr.get_cert(listener.project_id,
sni_cont.tls_container.id,
check_only=True))
sni_certs.append(cert_container)
return {'tls_cert': tls_cert, 'sni_certs': sni_certs}
def _map_cert_tls_container(cert):
return data_models.TLSContainer(
primary_cn=get_primary_cn(cert),
private_key=cert.get_private_key(),
certificate=cert.get_certificate(),
intermediates=cert.get_intermediates())
def get_primary_cn(tls_cert):
"""Returns primary CN for Certificate."""
return get_host_names(tls_cert.get_certificate())['cn']
|
|
#!/usr/bin/env python3
'''
lib/util/fs.py
File-system utilities.
'''
import itertools
import json
import logging
import os
logger = logging.getLogger('sublime-ycmd.' + __name__)
def is_directory(path):
'''
Returns true if the supplied `path` refers to a valid directory, and
false otherwise.
'''
if not isinstance(path, str):
raise TypeError('path must be a str: %r' % (path))
return os.path.exists(path) and os.path.isdir(path)
def is_file(path):
'''
Returns true if the supplied `path` refers to a valid plain file, and
false otherwise.
'''
if not isinstance(path, str):
raise TypeError('path must be a str: %r' % (path))
return os.path.exists(path) and os.path.isfile(path)
def get_directory_name(path):
'''
Returns the directory name for the file at `path`. If `path` refers to
a directory, the parent directory is returned.
'''
if not isinstance(path, str):
raise TypeError('path must be a str: %r' % (path))
head, tail = os.path.split(path)
if not tail and head != path:
# stripped a trailing directory separator, so redo it
head, tail = os.path.split(head)
return head
def get_base_name(path):
'''
Returns the base name for the file at `path`. If `path` refers to a
directory, the directory name is returned. If `path` refers to a mount
point, the base name will be `None`.
'''
if not isinstance(path, str):
raise TypeError('path must be a str: %r' % (path))
head, tail = os.path.split(path)
if tail:
return tail
if head != path:
# stripped a trailing directory separator, so redo it
head, tail = os.path.split(head)
return tail
def load_json_file(path, encoding='utf-8'):
'''
Returns a `dict` generated by reading the file at `path`, and then parsing
it as JSON. This will throw if the file does not exist, cannot be read, or
cannot be parsed as JSON.
The `encoding` parameter is used when initially reading the file.
'''
if not isinstance(path, str):
raise TypeError('path must be a str: %r' % (path))
if not isinstance(encoding, str):
raise TypeError('encoding must be a str: %r' % (encoding))
path = resolve_env(path)
if not is_file(path):
logger.warning('path does not seem to refer to a valid file: %s', path)
# but fall through and try anyway
logger.debug(
'attempting to parse file with path, encoding: %s, %s', path, encoding,
)
with open(path, encoding=encoding) as file_handle:
file_data = json.load(file_handle)
logger.debug('successfully parsed json file')
return file_data
def save_json_file(fp, data, encoding='utf-8'):
'''
Serializes and writes out `data` to `fp`. The data should be provided as
a `dict`, and will be serialized to a JSON string. The `fp` parameter
should support a `write` method.
The `encoding` parameter is used when encoding the serialized data.
'''
json_str = json.dumps(data)
json_bytes = json_str.encode(encoding=encoding)
fp.write(json_bytes)
def resolve_env(path):
'''
Resolves environment components in `path` and returns the result.
This will expand things like `'~'`, which need to be expanded.
'''
if not isinstance(path, str):
raise TypeError('path must be a str: %r' % (path))
# attempt to expand things like '~/' before proceeding
expanded_path = path[:]
expanded_path = os.path.expanduser(expanded_path)
expanded_path = os.path.expandvars(expanded_path)
return expanded_path
def resolve_abspath(path, start=None):
'''
Resolves `path` to an absolute path. If the path is already an absolute
path, it is returned as-is. Otherwise, it is joined to the `start` path,
which is assumed to be an absolute path.
If `start` is not provided, the current working directory is used.
'''
if not isinstance(path, str):
raise TypeError('path must be a str: %r' % (path))
# attempt to expand things like '~/' before proceeding
path = resolve_env(path)
if os.path.isabs(path):
logger.debug('path is already absolute: %s', path)
return path
logger.debug('path is not absolute, need to resolve it')
if start is None:
start = os.getcwd()
logger.debug('using working directory for relative paths: %s', start)
assert isinstance(start, str), 'start must be a str: %r' % (start)
logger.debug('joining path, start: %s, %s', path, start)
return os.path.join(path, start)
def resolve_binary_path(binpath, workingdir=None, *pathdirs):
'''
Resolves the binary path `binpath` to an absolute path based on the
supplied parameters. The following rules are applied until a path is found:
1. If it is already an absolute path, it is returned as-is.
2. If a file exists at the location relative to the working directory,
then the absolute path to that file is returned. When workingdir is
`None`, the current working directory is used (probably '.').
3. For each path directory in pathdirs, apply same relative location step
as above. Only the first match will be returned, even if there would be
more matches. When no pathdirs are provided, directories in the PATH
environment variable are used.
If no path is found, this will return `None`.
'''
if not isinstance(binpath, str):
raise TypeError('binpath must be a str: %r' % binpath)
if os.path.isabs(binpath):
logger.debug(
'binpath already absolute, returning as-is: %r', binpath
)
return binpath
if workingdir is None:
curdir = os.curdir
workingdir = curdir
logger.debug('filling in current working directory: %s', curdir)
if not pathdirs:
rawpath = os.getenv('PATH', default=None)
if rawpath is None:
logger.warning('cannot read PATH environment variable, might not '
'be able to resolve binary paths correctly')
# just in case, assign it a dummy iterable value too
pathdirs = tuple()
else:
assert isinstance(rawpath, str), \
'[internal] rawpath from os.getenv is not a str: %r' % rawpath
pathdirs = rawpath.split(os.path.pathsep)
def generate_binpaths():
'''
Provides an iterator for all possible absolute locations for binpath.
Platform dependent suffixes are automatically added, if applicable.
'''
should_add_win_exts = os.name == 'nt'
win_exts = ['.exe', '.cmd', '.bat']
for pathdir in itertools.chain([workingdir], pathdirs):
assert isinstance(pathdir, str), \
'[internal] pathdir is not a str: %r' % pathdir
yield os.path.join(pathdir, binpath)
if should_add_win_exts:
for win_ext in win_exts:
filebasename = '%s%s' % (binpath, win_ext)
yield os.path.join(pathdir, filebasename)
found_files = filter(os.path.isfile, generate_binpaths())
result_binpath = None
try:
result_binpath = next(found_files)
except StopIteration:
logger.debug('%s not found in %s, %s', binpath, workingdir, pathdirs)
else:
logger.debug('%s found at %s', binpath, result_binpath)
return result_binpath
def default_python_binary_path():
'''
Generates and returns a path to the python executable, as resolved by
`resolve_binary_path`. This will automatically prefer pythonw in Windows.
'''
if os.name == 'nt':
pythonw_binpath = resolve_binary_path('pythonw')
if pythonw_binpath:
return pythonw_binpath
python_binpath = resolve_binary_path('python')
if python_binpath:
return python_binpath
# best effort:
return 'python'
def _split_path_components(path):
'''
Splits `path` into a list of path components. The resulting list will start
with directory names, leading up to the basename of the path.
e.g. '/usr/lib' -> ['', usr', 'lib']
'C:\\Users' -> ['C:', 'Users']
'''
if not isinstance(path, str):
raise TypeError('path must be a str: %r' % (path))
primary_dirsep = os.sep
secondary_dirsep = os.altsep
if not secondary_dirsep:
# easy case, only one directory separator, so split on it
return path.split(primary_dirsep)
# ugh, more complicated case
# the file system might permit both directory separators
# e.g. 'C:\\Program Files/Sublime Text 3'
# need to split on both to get the correct result in that case...
def _iter_components(path=path):
current_path = path[:]
while current_path:
primary_position = current_path.find(primary_dirsep)
secondary_position = current_path.find(secondary_dirsep)
split_position = -1
# `str.find` returns -1 if no match, so check that first
if primary_position >= 0 and secondary_position >= 0:
# both are present - figure out which is first
if primary_position > secondary_position:
# secondary separator first - split on second
split_position = secondary_position
else:
# primary separator first - split on first
split_position = primary_position
elif primary_position >= 0:
# primary separator only - split on it
# technically we can just split on the one separator and
# yield the list items, but meh
split_position = primary_position
elif secondary_position >= 0:
split_position = secondary_position
# else, nothing to split
if split_position >= 0:
# perform split from 0 to split_position (non-inclusive), and
# from split_position+1 to the end
head = (
current_path[:split_position]
if split_position > 0 else ''
)
tail = (
current_path[split_position + 1:]
if split_position < len(current_path) else ''
)
yield head
current_path = tail
else:
yield current_path
current_path = ''
return list(_iter_components(path))
def _commonpath_polyfill(paths):
'''
Polyfill for `os.path.commonpath` (not available in Python 3.3).
This method returns the common ancestor between all the given `paths`.
Implementation note:
This works by splitting the paths using directory separators, and then
comparing each portion from left to right until a mismatch is found.
This calls `os.path.commonprefix`, which doesn't necessarily result in a
valid path. To get the valid path from it, we need to ensure that the
string ends in a directory separator. Anything else is considered invalid.
'''
if not paths:
raise ValueError('paths are invalid: %r' % (paths))
# NOTE : the `zip` logic below is slow... only do it if necessary:
if len(paths) == 1:
first_path = paths[0]
logger.debug('only one path, returning it: %s', first_path)
return first_path
path_components = [_split_path_components(path) for path in paths]
logger.debug(
'calculated path components for all paths: %s', path_components
)
# bundle up the components so we traverse them one at a time
# e.g. [['usr', 'lib'], ['usr', 'bin']]
# -> (('usr', 'usr'), ('lib', 'bin'))
path_traverser = zip(*path_components)
# now traverse them until there's a mismatch
common_path_components = []
for current_components in path_traverser:
# `current_components` is a tuple of strings
# e.g. ('usr', 'usr', 'lib') from ['/usr', '/usr/bin', '/lib']
if not current_components:
break
one_component = current_components[0]
all_equal = all(map(
lambda c, e=one_component: c == e, current_components
))
if not all_equal:
break
# else, equal! record this component and move on
common_path_components.append(one_component)
logger.debug(
'calculated common path components: %s', common_path_components
)
if not common_path_components:
raise ValueError('paths do not have a common ancestor')
# ugh, `ntpath` won't join absolute paths correctly, so ensure that the
# first item always ends in a directory separator
common_path_components[0] += os.sep
return os.path.join(*common_path_components)
def get_common_ancestor(paths, default=None):
common_path = default
try:
# common_path = os.path.commonpath(paths)
common_path = _commonpath_polyfill(paths)
except ValueError as e:
logger.debug(
'invalid paths, cannot get common ancenstor: %s, %r',
paths, e,
)
return common_path
|
|
#!/usr/bin/python
# Copyright 2015 SICS Swedish ICT AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides the general functions of a server.
We can start a server by running sudo ./server.py
This server receives and serves requests from guest_clients.
By default, the server's address is set to "192.168.122.1",
port number is "9999". Users can modify this setting according to their platform.
Functions:
sender(intQueue, request): the function sends the performance monitoring information back to a client
MyTCPHandler(SocketServer.StreamRequestHandler): receiving and handling guest requests from TCP connections
"""
# import subprocess
# import sys
from time import sleep
# #import psutil
import os
# from StringIO import StringIO # Python2
import sub
import perf_dict
import json
import socket
import SocketServer
import multiprocessing
import Queue
import argparse
from version import __version__
parser = argparse.ArgumentParser(description="Monitor Configuration")
parser.add_argument("--host", help='IP of the computer running the server; default 127.0.0.1', nargs='?', default="192.168.122.1")
parser.add_argument("--port", help='Port of the computer running the server; default 9999', nargs='?', default="9999", type=int)
parser.add_argument('-v',"--version", help='Show version and exit',action='store_true')
args = parser.parse_args()
if args.version is True:
print(__version__)
exit(0)
HOST, PORT = args.host, args.port
#mac = open('/sys/class/net/eth0/address').read()
"""
Build a dictionary for every KVM running on the host machine
"""
kvmInfo = perf_dict.perfDict()
"""
send the performance data back to the requesting guest-client
"""
def sender(intQueue, request):
try:
while(1):
data = intQueue.get(True)
print "sender: "+data
request.sendall(data)
except Queue.Empty:
pass
"""
Handler for the guest-client request
"""
class MyTCPHandler(SocketServer.StreamRequestHandler):
def handle(self):
try:
jmsg = self.rfile.readline().strip()
#print jmsg
# guest request is formatted to json.
# Parse the json message and get the guest mac address and the request details.
msg = json.loads(jmsg)
kvmID = msg['mac']
# print "{} wrote:".format(self.client_address[0])
# print kvmID
global kvmInfo
pid = kvmInfo[kvmID]['pid'][msg['cpu']]
occupy = kvmInfo[kvmID]['occupy'][msg['cpu']] #lock for multi-thread
perf_param = msg['param']
print "KVM pid {}, occupy {}, param {}".format(pid, occupy, perf_param)
# data for control information which needs to notify the client
data = ""
# check whether the requested Vcpu or the VM has already been monitored
if not occupy:
kvmInfo[kvmID]['occupy'][msg['cpu']] = True
ps = sub.perf_struct(pid, perf_param)
intQueue = multiprocessing.Queue()
# start the perf tool
ps.perf_stat_start()
# p1 read the real-time performance data to put it into a queue
p1= multiprocessing.Process(target=ps.perf_stat_interval, args=(intQueue,))
p1.start()
# p2 reports the performance data to guest-client
p2 = multiprocessing.Process(target=sender, args=(intQueue, self.request,))
p2.start()
print " waiting for perf end signal"
cmd = self.request.recv(1024).strip()
# get the "end" signal sent by the client
print cmd
# terminate both p1 and p2
p1.terminate()
# terminate the tool perf
ps.perf_stat_stop(intQueue)
kvmInfo[kvmID]['occupy'][msg['cpu']] = False
# after the tool perf receives the stop signal,
# it may generate some additional messages during its terminating process.
# we buffer these messages into queue
# sleep one second to get all the data delivered
sleep (1)
p2.terminate()
else:
data= "pmu is occupied, wait and retry"
print "close connection"
if len(data) > 0:
print data
self.request.sendall(data)
sleep(1)
self.request.close()
except KeyboardInterrupt:
print "keyboard interrupt, close connection"
self.request.close()
except Queue.Empty:
pass
if __name__ == "__main__":
try:
# HOST, PORT = "193.168.122.0", 9995
print "Start the server..."
# Create the server, binding to the ip and the port
SocketServer.TCPServer.allow_reuse_address = True
server = SocketServer.ThreadingTCPServer((HOST, PORT), MyTCPHandler)
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever()
except socket.error as (errno, string):
print("Error " + repr(errno) + ": " + string)
exit(0)
except KeyboardInterrupt:
print "keyboard interrupt, shutdown server"
server.server_close()
exit(0)
except Exception as e:
print(e)
|
|
import random
from dataclasses import dataclass
from logging import Logger
from typing import Any, Dict, List, Optional, Union
import virtool.utils
from virtool.db.transforms import AbstractTransform
from virtool.db.utils import get_non_existent_ids, handle_exists, id_exists, oid_exists
from virtool.errors import DatabaseError
from virtool.groups.db import get_merged_permissions
from virtool.types import Document
from virtool.users.utils import (
check_legacy_password,
check_password,
generate_base_permissions,
limit_permissions,
)
from virtool.utils import base_processor
logger = Logger(__name__)
PROJECTION = [
"_id",
"handle",
"administrator",
"force_reset",
"groups",
"last_password_change",
"permissions",
"primary_group",
]
ATTACH_PROJECTION = [
"administrator",
"handle",
]
@dataclass
class B2CUserAttributes:
"""
Class to store ID token claims from Azure AD B2C
"""
oid: str
display_name: str
given_name: str
family_name: str
def __init__(self, oid: str, display_name: str, given_name: str, family_name: str):
self.oid = oid
self.display_name = display_name
self.given_name = given_name
self.family_name = family_name
class AttachUserTransform(AbstractTransform):
"""
Attaches more complete user data to a document with a `user.id` field.
"""
def __init__(self, db, ignore_errors: bool = False):
self._db = db
self._ignore_errors = ignore_errors
def _extract_user_id(self, document: Document) -> Optional[str]:
try:
user = document["user"]
try:
return user["id"]
except TypeError:
if isinstance(user, str):
return user
raise
except KeyError:
if not self._ignore_errors:
raise KeyError("Document needs a value at user or user.id")
return None
async def attach_one(self, document, prepared):
try:
user_data = document["user"]
except KeyError:
if self._ignore_errors:
return document
raise
if isinstance(user_data, str):
return {
**document,
"user": {
"id": user_data,
**prepared,
},
}
return {
**document,
"user": {
**document["user"],
**prepared,
},
}
async def attach_many(
self, documents: List[Document], prepared: Dict[int, Any]
) -> List[Document]:
return [
await self.attach_one(document, prepared[self._extract_user_id(document)])
for document in documents
]
async def prepare_one(self, document):
user_id = self._extract_user_id(document)
user_data = base_processor(
await self._db.users.find_one(user_id, ATTACH_PROJECTION)
)
if not user_data:
raise KeyError(f"Document contains non-existent user: {user_id}.")
return user_data
async def prepare_many(
self, documents: List[Document]
) -> Dict[Union[int, str], Any]:
user_ids = list({self._extract_user_id(document) for document in documents})
return {
document["_id"]: base_processor(document)
async for document in self._db.users.find(
{"_id": {"$in": user_ids}}, ATTACH_PROJECTION
)
}
async def extend_user(db, user: Dict[str, Any]) -> Dict[str, Any]:
user_id = user["id"]
user_data = base_processor(await db.users.find_one(user_id, ATTACH_PROJECTION))
extended = {
**user,
**user_data,
}
return extended
def compose_force_reset_update(force_reset: Optional[bool]) -> dict:
"""
Compose a update dict for the database given a `force_reset` value.
:param force_reset: a force reset value
:return: an update
"""
if force_reset is None:
return dict()
return {"force_reset": force_reset, "invalidate_sessions": True}
async def compose_groups_update(db, groups: Optional[list]) -> dict:
"""
Compose a update dict for the updating the list of groups a user is a member of.
:param db: the application database client
:param groups: the groups to include in the user update
:return: an update
"""
if groups is None:
return dict()
non_existent_groups = await get_non_existent_ids(db.groups, groups)
if non_existent_groups:
raise DatabaseError("Non-existent groups: " + ", ".join(non_existent_groups))
update = {"groups": groups, "permissions": await get_merged_permissions(db, groups)}
return update
def compose_password_update(password: Optional[str]) -> dict:
"""
Compose an update dict for changing a user's password.
:param password: a new password
:return: an update
"""
if password is None:
return dict()
return {
"password": virtool.users.utils.hash_password(password),
"last_password_change": virtool.utils.timestamp(),
"invalidate_sessions": True,
}
async def compose_primary_group_update(
db, user_id: Optional[str], primary_group: Optional[str]
) -> dict:
"""
Compose an update dict for changing a user's `primary_group`.
:param db: the application database client
:param user_id: the id of the user being updated
:param primary_group: the primary group to set for the user
:return: an update
"""
if primary_group is None:
return dict()
if primary_group != "none":
if not await id_exists(db.groups, primary_group):
raise DatabaseError("Non-existent group: " + primary_group)
if not await is_member_of_group(db, user_id, primary_group):
raise DatabaseError("User is not member of group")
return {"primary_group": primary_group}
async def generate_handle(collection, given_name: str, family_name: str) -> str:
"""
Create handle for new B2C users in Virtool using values from ID token and random
integer.
:param collection: the mongo collection to check for existing usernames
:param given_name: user's first name collected from Azure AD B2C
:param family_name: user's last name collected from Azure AD B2C
:return: user handle created from B2C user info
"""
handle = f"{given_name}-{family_name}-{random.randint(1,100)}"
if await handle_exists(collection, handle):
return await generate_handle(collection, given_name, family_name)
return handle
async def create(
db,
password: Union[str, None],
handle: str,
force_reset: bool = True,
b2c_user_attributes: B2CUserAttributes = None,
) -> Document:
"""
Insert a new user document into the database.
If Azure AD B2C information is given, add it to user document.
:param db: the application database client
:param handle: the requested handle for the user
:param password: a password
:param force_reset: force the user to reset password on next login
:param b2c_user_attributes: arguments gathered from Azure AD B2C ID token
:return: the user document
"""
user_id = await virtool.db.utils.get_new_id(db.users)
if await virtool.db.utils.handle_exists(
db.users, handle
) or await virtool.db.utils.id_exists(db.users, user_id):
raise DatabaseError("User already exists")
document = {
"_id": user_id,
"handle": handle,
"administrator": False,
"groups": list(),
"settings": {
"skip_quick_analyze_dialog": True,
"show_ids": True,
"show_versions": True,
"quick_analyze_workflow": "pathoscope_bowtie",
},
"permissions": generate_base_permissions(),
"primary_group": "",
# Should the user be forced to reset their password on their next login?
"force_reset": force_reset,
# A timestamp taken at the last password change.
"last_password_change": virtool.utils.timestamp(),
# Should all of the user's sessions be invalidated so that they are forced to
# login next time they download the client.
"invalidate_sessions": False,
}
if password is not None:
document.update({"password": virtool.users.utils.hash_password(password)})
else:
if await oid_exists(db.users, b2c_user_attributes.oid):
raise DatabaseError("User already exists")
document.update(
{
"b2c_oid": b2c_user_attributes.oid,
"b2c_display_name": b2c_user_attributes.display_name,
"b2c_given_name": b2c_user_attributes.given_name,
"b2c_family_name": b2c_user_attributes.family_name,
}
)
await db.users.insert_one(document)
return document
async def edit(
db,
user_id: str,
administrator: Optional[bool] = None,
force_reset: Optional[bool] = None,
groups: Optional[list] = None,
password: Optional[str] = None,
primary_group: Optional[str] = None,
) -> Document:
if not await id_exists(db.users, user_id):
raise DatabaseError("User does not exist")
update = dict()
if administrator is not None:
update["administrator"] = administrator
update.update(
{**compose_force_reset_update(force_reset), **compose_password_update(password)}
)
groups_update = await compose_groups_update(db, groups)
primary_group_update = await compose_primary_group_update(
db, user_id, primary_group
)
update.update({**groups_update, **primary_group_update})
if not update:
return await db.users.find_one({"_id": user_id})
document = await db.users.find_one_and_update({"_id": user_id}, {"$set": update})
await update_sessions_and_keys(
db,
user_id,
document["administrator"],
document["groups"],
document["permissions"],
)
return document
async def is_member_of_group(db, user_id: str, group_id: str) -> bool:
return bool(await db.users.count_documents({"_id": user_id, "groups": group_id}))
async def validate_credentials(db, user_id: str, password: str) -> bool:
"""
Check if the ``user_id`` and ``password`` are valid.
Returns ``True`` if the username exists and the password is correct. Returns
``False`` if the username does not exist or the password is incorrect.
:param db: a database client
:param user_id: the username to check.
:param password: the password to check.
:return: validation success
"""
document = await db.users.find_one(user_id, ["password", "salt"])
if not document:
return False
# Return True if the attempted password matches the stored password.
try:
if check_password(password, document["password"]):
return True
except TypeError:
pass
if "salt" in document and check_legacy_password(
password, document["salt"], document["password"]
):
return True
return False
async def update_sessions_and_keys(
db, user_id: str, administrator: bool, groups: list, permissions: dict
):
"""
:param db: a database client
:param user_id: the id of the user to update keys and session for
:param administrator: the administrator flag for the user
:param groups: an updated list of groups
:param permissions: an updated set of permissions derived from the updated groups
"""
find_query = {"user.id": user_id}
async for document in db.keys.find(find_query, ["permissions"]):
await db.keys.update_one(
{"_id": document["_id"]},
{
"$set": {
"administrator": administrator,
"groups": groups,
"permissions": limit_permissions(
document["permissions"], permissions
),
}
},
)
await db.sessions.update_many(
find_query,
{
"$set": {
"administrator": administrator,
"groups": groups,
"permissions": permissions,
}
},
)
async def find_or_create_b2c_user(
db, b2c_user_attributes: B2CUserAttributes
) -> Document:
"""
search for existing user using oid value found in user_attributes.
If not found, create new user with oid value, generated handle and user_attribute
information.
:param b2c_user_attributes: User attributes collected from ID token claims
:param db: a database client
:return: found or created user document
"""
document = await db.users.find_one({"b2c_oid": b2c_user_attributes.oid})
if document is None:
handle = await virtool.users.db.generate_handle(
db.users, b2c_user_attributes.given_name, b2c_user_attributes.family_name
)
document = await virtool.users.db.create(
db,
password=None,
handle=handle,
force_reset=False,
b2c_user_attributes=b2c_user_attributes,
)
return document
|
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for handling events."""
import inspect
from core import jobs_registry
from core.domain import exp_domain
from core.domain import stats_domain
from core.domain import stats_services
from core.platform import models
from core.platform.taskqueue import gae_taskqueue_services as taskqueue_services
import feconf
(stats_models, feedback_models) = models.Registry.import_models([
models.NAMES.statistics, models.NAMES.feedback])
taskqueue_services = models.Registry.import_taskqueue_services()
class BaseEventHandler(object):
"""Base class for event dispatchers."""
# A string denoting the type of the event. Should be specified by
# subclasses and considered immutable.
EVENT_TYPE = None
@classmethod
def _notify_continuous_computation_listeners_async(cls, *args, **kwargs):
"""Dispatch events asynchronously to continuous computation realtime
layers that are listening for them.
"""
taskqueue_services.defer(
jobs_registry.ContinuousComputationEventDispatcher.dispatch_event,
taskqueue_services.QUEUE_NAME_EVENTS, cls.EVENT_TYPE, *args,
**kwargs)
@classmethod
def _handle_event(cls, *args, **kwargs):
"""Perform in-request processing of an incoming event."""
raise NotImplementedError(
'Subclasses of BaseEventHandler should implement the '
'_handle_event() method, using explicit arguments '
'(no *args or **kwargs).')
@classmethod
def record(cls, *args, **kwargs):
"""Process incoming events.
Callers of event handlers should call this method, not _handle_event().
"""
cls._notify_continuous_computation_listeners_async(*args, **kwargs)
cls._handle_event(*args, **kwargs)
class StatsEventsHandler(BaseEventHandler):
"""Event handler for incremental update of analytics model using aggregated
stats data.
"""
EVENT_TYPE = feconf.EVENT_TYPE_ALL_STATS
@classmethod
def _handle_event(cls, exploration_id, exp_version, aggregated_stats):
taskqueue_services.defer(
stats_services.update_stats, taskqueue_services.QUEUE_NAME_STATS,
exploration_id, exp_version, aggregated_stats)
class AnswerSubmissionEventHandler(BaseEventHandler):
"""Event handler for recording answer submissions."""
EVENT_TYPE = feconf.EVENT_TYPE_ANSWER_SUBMITTED
@classmethod
def _notify_continuous_computation_listeners_async(cls, *args, **kwargs):
# Disable this method until we can deal with large answers, otherwise
# the data that is being placed on the task queue is too large.
pass
@classmethod
def _handle_event(
cls, exploration_id, exploration_version, state_name,
interaction_id, answer_group_index, rule_spec_index,
classification_categorization, session_id, time_spent_in_secs,
params, normalized_answer):
"""Records an event when an answer triggers a rule. The answer recorded
here is a Python-representation of the actual answer submitted by the
user.
"""
# TODO(sll): Escape these args?
stats_services.record_answer(
exploration_id, exploration_version, state_name, interaction_id,
stats_domain.SubmittedAnswer(
normalized_answer, interaction_id, answer_group_index,
rule_spec_index, classification_categorization, params,
session_id, time_spent_in_secs))
if feconf.ENABLE_NEW_STATS_FRAMEWORK:
feedback_is_useful = (
classification_categorization != (
exp_domain.DEFAULT_OUTCOME_CLASSIFICATION))
stats_models.AnswerSubmittedEventLogEntryModel.create(
exploration_id, exploration_version, state_name, session_id,
time_spent_in_secs, feedback_is_useful)
class ExplorationActualStartEventHandler(BaseEventHandler):
"""Event handler for recording exploration actual start events."""
EVENT_TYPE = feconf.EVENT_TYPE_ACTUAL_START_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id):
stats_models.ExplorationActualStartEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id)
class SolutionHitEventHandler(BaseEventHandler):
"""Event handler for recording solution hit events."""
EVENT_TYPE = feconf.EVENT_TYPE_SOLUTION_HIT
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs):
stats_models.SolutionHitEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs)
class StartExplorationEventHandler(BaseEventHandler):
"""Event handler for recording exploration start events."""
EVENT_TYPE = feconf.EVENT_TYPE_START_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id, params,
play_type):
stats_models.StartExplorationEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id, params,
play_type)
class MaybeLeaveExplorationEventHandler(BaseEventHandler):
"""Event handler for recording exploration leave events."""
EVENT_TYPE = feconf.EVENT_TYPE_MAYBE_LEAVE_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id, time_spent,
params, play_type):
stats_models.MaybeLeaveExplorationEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id, time_spent,
params, play_type)
class CompleteExplorationEventHandler(BaseEventHandler):
"""Event handler for recording exploration completion events."""
EVENT_TYPE = feconf.EVENT_TYPE_COMPLETE_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id, time_spent,
params, play_type):
stats_models.CompleteExplorationEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id, time_spent,
params, play_type)
class RateExplorationEventHandler(BaseEventHandler):
"""Event handler for recording exploration rating events."""
EVENT_TYPE = feconf.EVENT_TYPE_RATE_EXPLORATION
@classmethod
def _handle_event(cls, exploration_id, user_id, rating, old_rating):
stats_models.RateExplorationEventLogEntryModel.create(
exploration_id, user_id, rating, old_rating)
class StateHitEventHandler(BaseEventHandler):
"""Event handler for recording state hit events."""
EVENT_TYPE = feconf.EVENT_TYPE_STATE_HIT
# TODO(sll): remove params before sending this event to the jobs taskqueue
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id,
params, play_type):
stats_models.StateHitEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id,
params, play_type)
class StateCompleteEventHandler(BaseEventHandler):
"""Event handler for recording state complete events."""
EVENT_TYPE = feconf.EVENT_TYPE_STATE_COMPLETED
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs):
stats_models.StateCompleteEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs)
class FeedbackThreadCreatedEventHandler(BaseEventHandler):
"""Event handler for recording new feedback thread creation events."""
EVENT_TYPE = feconf.EVENT_TYPE_NEW_THREAD_CREATED
@classmethod
def _handle_event(cls, exp_id):
pass
class FeedbackThreadStatusChangedEventHandler(BaseEventHandler):
"""Event handler for recording reopening feedback thread events."""
EVENT_TYPE = feconf.EVENT_TYPE_THREAD_STATUS_CHANGED
@classmethod
def _handle_event(cls, exp_id, old_status, new_status):
pass
class Registry(object):
"""Registry of event handlers."""
# Dict mapping event types to their classes.
_event_types_to_classes = {}
@classmethod
def _refresh_registry(cls):
"""Regenerates the event handler registry."""
cls._event_types_to_classes.clear()
# Find all subclasses of BaseEventHandler in the current module.
for obj_name, obj in globals().iteritems():
if inspect.isclass(obj) and issubclass(obj, BaseEventHandler):
if obj_name == 'BaseEventHandler':
continue
if not obj.EVENT_TYPE:
raise Exception(
'Event handler class %s does not specify an event '
'type' % obj_name)
elif obj.EVENT_TYPE in cls._event_types_to_classes:
raise Exception('Duplicate event type %s' % obj.EVENT_TYPE)
cls._event_types_to_classes[obj.EVENT_TYPE] = obj
@classmethod
def get_event_class_by_type(cls, event_type):
"""Gets an event handler class by its type.
Refreshes once if the event type is not found; subsequently, throws an
error.
"""
if event_type not in cls._event_types_to_classes:
cls._refresh_registry()
return cls._event_types_to_classes[event_type]
|
|
#!/usr/bin/env python
# Copyright (c) 2014 Arista Networks, Inc. All rights reserved.
# Arista Networks, Inc. Confidential and Proprietary.
"""The EOS SDK Policy Router.
The policy router is an EOS SDK application usable on Arista switches
supporting policy based routing, such as the Arista 7500E with second
generation linecards.
This long-running agent monitors its JSON formatted configuration file
and when that file changes parses the configuration and if successful
changes policy based routing configuration to match the provided
config.
To operate, the 'pyinotify' module must be installed first, e.g.,:
# pip install pyinotify
To install this on a switch build an RPM also containing pyinotify and
install that using the EOS extension manager.
"""
import cjson
import collections
import datetime
import functools
import os
import sys
import pyinotify
import eossdk
class Error(Exception):
pass
class ConfigError(Error):
pass
PROTOCOLS = {'icmp': 1,
'ip': 4, # IP-in-IP
'tcp': 6,
'udp': 17,
'gre': 47,
'esp': 50,
'ah': 51,
'ospf': 89}
NEXTHOP_GROUP_TYPE = {'ipinip': eossdk.NEXTHOP_GROUP_IP_IN_IP,
'gre': eossdk.NEXTHOP_GROUP_GRE,
}
#'mpls': eossdk.NEXTHOP_GROUP_MPLS} # not yet supported
CONFIG_SECTIONS = frozenset(('match',
'classifier',
'action',
'policy',
'nexthop_group',
'interface_policy'))
MANDATORY_CONFIG_SECTIONS = CONFIG_SECTIONS - set(['nexthop_group'])
# Configuration data structures
Filter = collections.namedtuple(
'Filter', 'src_ip dst_ip src_mac dst_mac sport dport proto')
Match = collections.namedtuple(
'Match', 'type acl_name')
Classifier = collections.namedtuple(
'Classifier', 'matches')
Action = collections.namedtuple('Action', 'type nexthop_group nexthops mpls_labels')
Policy = collections.namedtuple('Policy', 'classifier actions')
Apply = collections.namedtuple('Apply', 'policy')
NexthopGroup = collections.namedtuple('NexthopGroup',
'type src_intf src_ips dst_ips mpls_labels')
Config = collections.namedtuple('Config',
'match classifiers actions nexthop_groups policy '
'interface_policy')
def load_match(d):
acls = {}
for k, vs in sorted(d.iteritems()):
acls.setdefault(k, [])
for v in vs:
ace = Filter(src_ip=v.get('src_ip'),
dst_ip=v.get('dst_ip'),
src_mac=v.get('src_mac'),
dst_mac=v.get('dst_mac'),
sport=v.get('sport'),
dport=v.get('dport'),
proto=v.get('proto'))
acls[k].append(ace)
return acls
def load_classifier(d):
c = {}
for k, v in d.iteritems():
matches = [Match(type=None, acl_name=x.get('match')) for x in v]
c[k] = Classifier(matches=matches)
return c
def load_action(d):
a = {}
for k, v in d.iteritems():
a[k] = Action(v.get('type'),
nexthop_group=v.get('nexthop_group'),
nexthops=v.get('nexthops'),
mpls_labels=v.get('mpls_labels'))
return a
def load_policy(d):
p = {}
for k, v in d.iteritems():
p[k] = Policy(**v)
return p
def load_apply(d):
a = {}
for k, v in d.iteritems():
a[k] = Apply(policy=v)
return a
def load_nexthop_group(d):
g = {}
for k, v in d.iteritems():
g[k] = NexthopGroup(type=v.get('type', 'ipinip'),
src_intf=v.get('src_intf'),
src_ips=v.get('src_ips', []),
dst_ips=v.get('dst_ips', []),
mpls_labels=v.get('mpls_labels', []))
return g
def load_config(d):
for section in MANDATORY_CONFIG_SECTIONS:
if d.get(section) is None:
raise ValueError('Configuration missing %r section' % section)
config = Config(
match=load_match(d['match']),
policy=load_policy(d['policy']),
interface_policy=load_apply(d['interface_policy']),
actions=load_action(d['action']),
nexthop_groups=load_nexthop_group(d.get('nexthop_group', {})),
classifiers=load_classifier(d['classifier']))
return config
def load_config_file(filename):
config = {}
with open(filename) as f:
try:
config = load_config(cjson.decode(f.read()))
except (TypeError, ValueError, cjson.Error) as e:
sys.stderr.write('error reading config: %s\n' % e)
return config
def load_config_file_obj(f):
try:
return load_config(cjson.decode(f.read()))
except (TypeError, ValueError, cjson.Error) as e:
sys.stderr.write('error reading config: %s\n' % e)
return None
class PolicyRouter(object):
"""The policy router state and context."""
def __init__(self, acl_mgr, agent_mgr, class_map_mgr,
policy_map_mgr, intf_mgr, nexthop_group_mgr, tag='PR'):
self.config_ = None
self.t_ = tag or ''
self.built_ = False
self.acl_mgr = acl_mgr
self.agent_mgr = agent_mgr
self.class_map_mgr = class_map_mgr
self.policy_map_mgr = policy_map_mgr
self.intf_mgr = intf_mgr
self.nexthop_group_mgr = nexthop_group_mgr
self.actions_ = {}
@property
def config(self):
return self.config_
def config_is(self, value):
if value != self.config_:
self.config_ = value
if self.config_:
self.buildPolicy()
def _buildAcls(self):
for aclname, aclrules in self.config.match.iteritems():
key = eossdk.AclKey(aclname, eossdk.ACL_TYPE_IPV4)
# todo support ipv6 also
for i, rule in enumerate(aclrules):
aclRule = eossdk.AclRuleIp()
if rule.proto:
pr = PROTOCOLS.get(rule.proto.lower())
if pr:
aclRule.ip_protocol_is(pr)
else:
sys.stderr.write('Invalid protocol name "%s"', rule.proto)
if rule.src_ip:
try:
srcPfx = eossdk.IpPrefix(rule.src_ip)
addr = eossdk.IpAddrMask(srcPfx.network(),
srcPfx.prefix_length())
aclRule.source_addr_is(addr)
except eossdk.Error:
sys.stderr.write('bad IP address: %s\n', rule.src_ip)
continue
if rule.dst_ip:
try:
dstPfx = eossdk.IpPrefix(rule.dst_ip)
addr = eossdk.IpAddrMask(dstPfx.network(),
dstPfx.prefix_length())
aclRule.destination_addr_is(addr)
except eossdk.Error:
sys.stderr.write('bad IP address: %s\n', rule.dst_ip)
continue
if rule.sport:
try:
spec = self._buildPortSpec(rule.sport)
except ConfigError as e:
sys.stderr.write('Invalid port spec %r: %s\n', spec, e)
else:
aclRule.source_port_is(spec)
if rule.dport:
try:
spec = self._buildPortSpec(rule.dport)
except ConfigError as e:
sys.stderr.write('Invalid port spec %r: %s\n', spec, e)
else:
aclRule.destination_port_is(spec)
self.acl_mgr.acl_rule_set(key, i, aclRule)
self.acl_mgr.acl_commit()
def _buildPortSpec(self, portspec):
op = portspec.get('op', 'eq').lower() # default to port equals
ports = portspec.get('ports', [])
if op == 'eq':
return eossdk.AclPortSpecEq(ports)
elif op == 'neq':
return eossdk.AclPortSpecNeq(ports)
elif op == 'range':
if len(ports) != 2:
raise ConfigError('Must provide exactly two ports for "eq"')
return eossdk.AclPortSpecRange(ports[0], ports[1])
elif op == 'gt':
if len(ports) != 1:
raise ConfigError('Must provide only one port for "gt"')
return eossdk.AclPortSpecGt(ports[0])
elif op == 'lt':
if len(ports) != 1:
raise ConfigError('Must provide only one port for "lt"')
return eossdk.AclPortSpecLt(ports[0])
else:
raise ConfigError('Unknown port match operation "%s"' % op)
def _buildClassMaps(self):
classifiers = self.config_.classifiers
for name, classifier in classifiers.iteritems():
key = eossdk.PolicyMapKey(name, eossdk.POLICY_FEATURE_PBR)
class_map = eossdk.ClassMap(key)
for rule_index, match in enumerate(classifier.matches):
print 'Adding to class map:', name, 'seq:', str(rule_index + 1), match
rule_key = eossdk.AclKey(match.acl_name, eossdk.ACL_TYPE_IPV4)
rule = eossdk.ClassMapRule(rule_key)
# Set the action for the rule
class_map.rule_set(rule_index + 1, rule)
self.class_map_mgr.class_map_is(class_map)
cm = self.class_map_mgr.class_map(key)
print 'Set class map:', name, 'now with', len(cm.rules()), 'rules'
def _buildActions(self):
for name, action in self.config_.actions.iteritems():
if action.type == 'drop':
act = eossdk.PolicyMapAction(eossdk.POLICY_ACTION_DROP)
elif action.type == 'nexthop_group' and action.nexthop_group:
act = eossdk.PolicyMapAction(eossdk.POLICY_ACTION_NEXTHOP_GROUP)
act.nexthop_group_name_is(action.nexthop_group)
elif action.type == 'nexthop' and action.nexthops:
act = eossdk.PolicyMapAction(eossdk.POLICY_ACTION_NEXTHOP)
for nh in action.nexthops:
hop = get_ip_addr(nh)
if hop is not None:
act.nexthop_set(hop)
else:
raise ConfigError('Action type="%s" not supported' % action.type)
self.actions_[name] = act
def _buildNexthopGroups(self):
groups = self.config_.nexthop_groups
for name, data in groups.iteritems():
if data.type not in NEXTHOP_GROUP_TYPE:
sys.stderr.write('Unknown nexthop group type="%s"' % data.type)
continue
t = data.type.lower()
group = eossdk.NexthopGroup(name, NEXTHOP_GROUP_TYPE.get(t))
# Set common parameters
for i, dst in enumerate(data.dst_ips):
ip = get_ip_addr(dst)
if ip is not None:
print 'Adding IP'
group.destination_ip_set(i, ip)
if t == 'ipinip' or t == 'gre':
if data.src_intf:
if self.intf_mgr.exists(eossdk.IntfId(data.src_intf)):
group.source_intf_is(eossdk.IntfId(data.src_intf))
elif data.src_ips:
pass # not yet supported
elif t == 'mpls':
sys.stderr.write('MPLS nexthop-groups are not yet supported\n')
# Set the nexthop group
print 'Setting nexthop group:', name
self.nexthop_group_mgr.nexthop_group_set(group)
def _buildPolicyMaps(self):
policies = self.config_.policy
for name, data in policies.iteritems():
# add the class map
rule_key = eossdk.PolicyMapKey(data.classifier,
eossdk.POLICY_FEATURE_PBR)
rule = eossdk.PolicyMapRule(rule_key)
for actionName in data.actions:
# raises KeyError on uknown action names
action = self.actions_[actionName]
rule.action_set(action)
key = eossdk.PolicyMapKey(name, eossdk.POLICY_FEATURE_PBR)
policy_map = eossdk.PolicyMap(key)
policy_map.rule_set(1, rule)
self.policy_map_mgr.policy_map_is(policy_map)
def _applyToInterfaces(self):
interface_policy = self.config_.interface_policy
for intf_name, data in interface_policy.iteritems():
policy_map_key = eossdk.PolicyMapKey(data.policy, eossdk.POLICY_FEATURE_PBR)
intf_id = eossdk.IntfId(intf_name)
if self.intf_mgr.exists(intf_id):
print 'Interface %s exists, applying policy' % intf_id.to_string()
self.policy_map_mgr.policy_map_apply(
policy_map_key, intf_id, eossdk.ACL_IN, True)
else:
print 'Interface %s does not exist' % intf_id.to_string()
print 'Finished applying policy'
def buildPolicy(self):
assert self.config_
self.built_ = False
print 'Building ACLs'
self._buildAcls()
print 'Building class maps'
self._buildClassMaps()
print 'Building actions for policy maps'
self._buildActions()
print 'Building nexthop groups'
self._buildNexthopGroups()
print 'Building policy maps'
self._buildPolicyMaps()
print 'Applying policy to interfaces'
self._applyToInterfaces()
self.built_ = True
print 'Finished building policy'
@property
def built(self):
return self.built_
def applyPolicy(self):
assert self.built_
self._applyToInterfaces()
class InotifyPoller(eossdk.TimeoutHandler):
def __init__(self, sdk, config_file, policy_handler, poll_interval=0.5):
self.config_file_ = config_file
self.sdk_ = sdk
self.policy_handler_ = policy_handler
self.poll_interval_ = poll_interval
self.wm_ = pyinotify.WatchManager()
mask = pyinotify.IN_MODIFY | pyinotify.IN_CREATE | pyinotify.IN_DELETE
handler = functools.partial(InotifyHandler, handler=policy_handler)
# Allow coalescing, so that delete/recreate (as opposed to modify) doesn't
# cause us to delete the policy.
self.notifier_ = pyinotify.Notifier(self.wm_, handler, timeout=10)
self.notifier_.coalesce_events()
self.watch_ = self.wm_.watch_transient_file(self.config_file_, mask, handler)
eossdk.TimeoutHandler.__init__(self, self.sdk_.get_timeout_mgr())
self.timeout_time_is(eossdk.now())
def poll(self):
self.notifier_.process_events()
while self.notifier_.check_events():
self.notifier_.read_events()
self.notifier_.process_events()
def on_timeout(self):
self.poll()
self.timeout_time_is(eossdk.now() + self.poll_interval_)
def cancel(self):
self.timeout_time_is(eossdk.never)
class PolicyHandler(eossdk.AgentHandler, eossdk.PolicyMapHandler, eossdk.AclHandler):
def __init__(self, sdk, config_file=None):
self.config_file_ = config_file
self.sdk_ = sdk
self.config_ = None
self.acl_mgr = sdk.get_acl_mgr()
self.agent_mgr = sdk.get_agent_mgr()
self.policy_map_mgr = sdk.get_policy_map_mgr()
self.router_ = PolicyRouter(
self.acl_mgr, self.agent_mgr, sdk.get_class_map_mgr(),
self.policy_map_mgr, sdk.get_intf_mgr(), sdk.get_nexthop_group_mgr())
self.timeout_ = None
self.watches_ = frozenset()
eossdk.PolicyMapHandler.__init__(self, self.policy_map_mgr)
eossdk.AclHandler.__init__(self, self.acl_mgr)
eossdk.AgentHandler.__init__(self, self.agent_mgr)
@property
def config(self):
return self.config_
def config_is(self, config):
self.config_ = config
self.router_.config_is(config)
self.watch_policy()
def watch_policy(self):
print 'Removing all watches for %s' % self.watches_
for name in self.watches_:
self.watch_policy_map(
eossdk.PolicyMapKey(name, eossdk.POLICY_FEATURE_PBR), False)
self.watches_ = frozenset(self.config_.policy.iterkeys())
print 'Adding new watches for %s' % self.config_.policy.keys()
for name in self.config_.policy:
self.watch_policy_map(
eossdk.PolicyMapKey(name, eossdk.POLICY_FEATURE_PBR), True)
def on_initialized(self):
print self.__class__.__name__, 'was initialized by the SDK'
print 'Loading initial config'
self.on_agent_option('config_file',
self.agent_mgr.agent_option('config_file')
or self.config_file_)
print 'Finished loading initial config'
def on_agent_option(self, name, value):
if name == 'config_file':
config = load_config_file(value)
if config != self.config:
self.agent_mgr.status_set('config_changed', datetime.datetime.today())
self.config_is(config)
if self.config_file_ != value:
if self.timeout_:
self.timeout_.cancel()
print 'Starting Inotify notifier'
self.timeout_ = InotifyPoller(self.sdk_, self.config_file_, self)
self.config_file_ = value
def on_policy_map_sync(self, key):
self.agent_mgr.status_set('last_policy_map_sync_state', 'PASS')
self.agent_mgr.status_set('last_policy_map_sync_at',
str(datetime.datetime.today()))
self.agent_mgr.status_set('last_policy_map_sync_key', str(key))
def on_policy_map_sync_fail(self, key, message):
self.agent_mgr.status_set('last_policy_map_sync_state', 'FAIL')
self.agent_mgr.status_set('last_policy_map_sync_key', str(key))
self.agent_mgr.status_set('last_policy_map_sync_error_message', message)
def on_agent_enabled(self, enabled):
self.agent_mgr.status_set('enabled', enabled)
def on_acl_sync(self):
self.agent_mgr.status_set('last_acl_sync', str(datetime.datetime.today()))
def on_acl_sync_fail(self, linecard, message):
self.agent_mgr.status_set('last_acl_sync_err_linecard', linecard)
self.agent_mgr.status_set('last_acl_sync_err_message', message)
class InotifyHandler(pyinotify.ProcessEvent):
"""Handles inotify events."""
def process_IN_CREATE(self, event):
print 'Config file created:', event.pathname
config = load_config_file(event.pathname)
if config:
self.handler_.config_is(config)
def process_IN_MODIFY(self, event):
config = load_config_file(event.pathname)
if config == self.handler_.config:
print 'Config file modified but not changed: ', event.pathname
else:
print 'Config file updated:', event.pathname
self.handler_.config_is(config)
def my_init(self, **kwargs):
self.handler_ = kwargs['handler']
def get_ip_addr(ip_addr):
try:
return eossdk.IpAddr(ip_addr)
except eossdk.Error as e:
sys.stderr.write('Invalid IP address: %s (%s)' % (ip_addr, e))
def main():
# Because we use `print' and we want our stuff to show up in the
# agent logs immediately.
os.environ['PYTHONUNBUFFERED'] = '1' # TODO: Use tracing instead.
# Config file path has to be provided by the environment variable
filename = os.environ.get('POLICY_ROUTER_CONFIG')
# Obtain a reference to the EOS SDK
sdk = eossdk.Sdk()
# Instantiate the policy router application
_ = PolicyHandler(sdk, filename)
# Run the agent until terminated by a signal
sdk.main_loop(['PolicyRouter'])
if __name__ == '__main__':
sys.exit(main())
|
|
from datetime import datetime
import pandas as pd
import numpy as np
from ..timeframe import merge_timeframes, TimeFrame
from disaggregator import Disaggregator
from matplotlib import pyplot as plt
from datetime import timedelta
from scipy.stats import poisson, norm
from sklearn import mixture
class MLE(Disaggregator):
"""
Disaggregation of a single appliance based on its features and
using the maximum likelihood of all features.
Attributes
----------
appliance: str
Name of the appliance
stats: list of dicts
One dict for feature with:
units: tuple
For instance: ('power','active')
resistive: boolean
To decide if 'apparent' == 'active'
thDelta: int
Treshold for delta values on the power. Used on train_on_chunk method
thLikelihood: int
Treshold for the maximum likelihood
sample_period: str
For resampling in training and disaggregate methods
sample_method: str
Pandas method for resampling
onpower: dict
{'name':str, 'gmm': str, 'model': sklearn model}
offpower: dict
{'name':str, 'gmm': str, 'model': sklearn model}
duration: dict
{'name':str, 'gmm': str, 'model': sklearn model}
onpower_train: pandas.Dataframe()
Training samples of onpower
offpower_train: pandas.Dataframe()
Training samples of offpower
duaration_train: pandas.Dataframe()
Training samples of duration
powerNoise: int
For the disaggregate_chunk method, minimum delta value of a event to be
considered, otherwise is noise.
powerPair: int
For the disaggregate_chunk method, max delta value difference between
onpower and offpower
timeWindow: int
For the disaggregate_chunk method, a time frame to speed up
disaggregate_chunk method.
TODO:
-----
* Build a method for choosing thLikelihood automatically based on its
optimization using ROC curve.
* Method for measuring ROC curve.
"""
def __init__(self):
"""
Inizialise of the model by default
"""
super(MLE, self).__init__()
# Metadata
self.appliance = None
self.stats = []
self.units = None
self.resistive = False
self.thDelta = 0
self.thLikelihood = 0
self.sample_period = None
self.sampling_method = None
# FEATURES:
self.onpower = {'name': 'gmm', 'model': mixture.GMM(n_components=1)}
self.offpower = {'name': 'gmm', 'model': mixture.GMM(n_components=1)}
self.duration = {'name': 'poisson', 'model': poisson(0)}
# Trainings:
self.onpower_train = pd.DataFrame(columns=['onpower'])
self.offpower_train = pd.DataFrame(columns=['offpower'])
self.duration_train = pd.DataFrame(columns=['duration'])
# Constrains
self.powerNoise = 0 # Background noise in the main
self.powerPair = 0 # Max diff between onpower and offpower
self.timeWindow = 0 # To avoid high computation
def __retrain(self, feature, feature_train):
print "Training " + feature_train.columns[0]
mu, std = norm.fit(feature_train)
feature['model'] = norm(loc=mu, scale=std)
'''if feature['name'] == 'gmm':
feature['model'].fit(feature_train)
elif feature['name'] == 'norm':
mu, std = norm.fit(feature_train)
feature['model'] = norm(loc=mu, scale=std)
elif feature['name'] == 'poisson':
self.onpower['model'] = poisson(feature_train.mean())
else:
raise NameError(
"Name of the model for " +
str(feature_train.columns[0]) +
" unknown or not implemented") '''
def __physical_quantity(self, chunk):
if not self.resistive:
print "Checking units"
units_mismatched = True
for name in chunk.columns:
if name == self.units:
units = name
units_mismatched = False
if units_mismatched:
stringError = self.appliance + " cannot be disaggregated. " + self.appliance + \
" is a non-resistive element and units mismatches: disaggregated data is in " + \
str(self.units) + \
" and aggregated data is " + str(units)
raise ValueError(stringError)
else:
units = chunk.columns[0]
return units
def __pdf(self, feature, delta):
if feature['name'] == 'norm':
score = feature['model'].pdf(delta)
elif feature['name'] == 'gmm':
#score = np.exp(feature['model'].score([delta]))[0]
score = feature['model'].pdf(delta)
elif feature['name'] == 'poisson':
# Decimal values produce odd values in poisson (bug)
delta = np.round(delta)
#score = feature['model'].pmf(delta)
score = feature['model'].pdf(delta)
else:
raise AttributeError("Wrong model for" + feature['name'] +
" It must be: gmm, norm or poisson")
return score
def __pdf2(self, feature, delta):
if feature['name'] == 'norm':
score = feature['model'].pdf(delta)
elif feature['name'] == 'gmm':
score = np.exp(feature['model'].score([delta]))
elif feature['name'] == 'poisson':
# Decimal values produce odd values in poisson (bug)
delta = np.round(delta)
score = feature['model'].pmf(delta)
else:
raise AttributeError("Wrong model for" + feature['name'] +
" It must be: gmm, norm or poisson")
return score
def update(self, **kwargs):
"""
This method will update attributes of the model passed by kwargs.
Parameters
----------
kwargs : key word arguments
Notes
-----
"""
print "Updating model"
print kwargs
for key in kwargs:
setattr(self, key, kwargs[key])
def train(self, metergroup):
"""
Train using ML.
Call disaggregate_chunk method
Parameters
----------
metergroup : a nilmtk.MeterGroup object
Notes
-----
* Inizialise "stats" and "feature_train" on the model.
* Instance is initialised to 1. Use meter.instance to provide more
information (TODO)
"""
# Inizialise stats and training data:
self.stats = []
self.onpower_train = pd.DataFrame(columns=['onpower'])
self.offpower_train = pd.DataFrame(columns=['offpower'])
self.duration_train = pd.DataFrame(columns=['duration'])
# Calling train_on_chunk by meter:
instance = 1 # initial instance.
for meter in metergroup.meters:
for chunk in meter.power_series():
if chunk.empty:
print "Chunk empty"
else:
print "Training on chunk"
self.train_on_chunk(pd.DataFrame(chunk.resample(
self.sample_period,
how=self.sampling_method)),
meter
)
instance += 1
def train_on_chunk(self, chunk, meter):
"""
Extracts features from chunk, concatenates feature_train
(onpower_train, offpower_train and duration_train) with new features
and retrains feature
models.
Updates stats attribute.
Parameters
----------
chunk : pd.DataFrame where each column represents a disaggregated
meter : ElecMeter for this chunk
Notes
-----
* Disaggregates only the selected appliance.(TODO: Disaggregates many)
"""
# EXTRACT FEATURES:
# find units:
self.__setattr__('units', chunk.columns[0])
# Loading treshold for getting events:
thDelta = getattr(self, 'thDelta')
chunk.index.name = 'date_time'
# To prevent learning many samples at the middle of a edge:
chunk.ix[:, 0][chunk.ix[:, 0] < thDelta] = 0
# Learning edges
chunk['delta'] = chunk.ix[:, 0].diff()
chunk.delta.fillna(0, inplace=True)
edges = chunk[np.abs(chunk['delta']) > thDelta].delta
# Pairing on/off events
#print(chunk)
if len(edges) > 1:
offpower = edges[edges.apply(np.sign).diff() == -2]
onpower = edges[edges.apply(np.sign).diff(-1) == 2]
duration = offpower.reset_index().date_time - \
onpower.reset_index().date_time
duration = duration.astype('timedelta64[s]')
# Set consistent index for concatenation:
onpower = pd.DataFrame(onpower).reset_index(drop=True)
onpower.columns = ['onpower']
offpower = pd.DataFrame(offpower).reset_index(drop=True)
offpower.columns = ['offpower']
duration = pd.DataFrame(duration).reset_index(drop=True)
duration.columns = ['duration']
# Len of samples:
print "Samples of onpower: " + str(len(onpower))
print "Samples of offpower: " + str(len(offpower))
print "Samples of duration: " + str(len(duration))
number_of_events = len(onpower)
# Features (concatenation)
self.onpower_train = pd.concat(
[self.onpower_train, onpower]).reset_index(drop=True)
self.offpower_train = pd.concat(
[self.offpower_train, offpower]).reset_index(drop=True)
self.duration_train = pd.concat(
[self.duration_train, duration]).reset_index(drop=True)
else:
number_of_events = 0
print """WARNING: No paired events found on this chunk.
Is it thDelta too high?"""
self.duration_train = self.duration_train[self.duration_train.duration<400]
# RE-TRAIN FEATURE MODELS:
self.__retrain(self.onpower, self.onpower_train)
self.__retrain(self.offpower, self.offpower_train)
self.__retrain(self.duration, self.duration_train)
# UPDATE STATS:
stat_dict = {'appliance': meter.identifier[
0], 'instance': meter.identifier[1], 'Nevents': number_of_events}
instanceFound = False
if len(self.stats) == 0:
self.stats.append(stat_dict)
else:
for stat in self.stats:
if ((stat['appliance'] == stat_dict['appliance']) and
(stat['instance'] == stat_dict['instance'])):
index = self.stats.index(stat)
self.stats[index]['Nevents'] = self.stats[
index]['Nevents'] + number_of_events
instanceFound = True
if not instanceFound:
self.stats.append(stat_dict)
def disaggregate(self, mains, output_datastore):
"""
Passes each chunk from mains generator to disaggregate_chunk()
and passes the output to _write_disaggregated_chunk_to_datastore()
Will have a default implementation in super class.
Can be overridden for more simple in-memory disaggregation,
or more complex out-of-core disaggregation.
Parameters
----------
mains : nilmtk.ElecMeter (single-phase) or nilmtk.MeterGroup (multi-phase)
output_datastore : instance of nilmtk.DataStore or str of datastore location
"""
building_path = '/building{}'.format(mains.building())
# only writes one appliance and meter per building
meter_instance = 2
mains_data_location = '{}/elec/meter1'.format(building_path)
#dis_main = pd.DataFrame()
chunk_number = 0
timeframes = []
for chunk in mains.power_series():
# Record metadata
timeframes.append(chunk.timeframe)
measurement = chunk.name
cols = pd.MultiIndex.from_tuples([chunk.name])
dis_chunk = self.disaggregate_chunk(
pd.DataFrame(chunk.resample(self.sample_period, how=self.sampling_method)))
#dis_main = pd.concat([dis_main, dis_chunk])
chunk_number += 1
print str(chunk_number) + " chunks disaggregated"
# Write appliance data to disag output
key = '{}/elec/meter{}'.format(building_path, meter_instance)
df = pd.DataFrame(
dis_chunk.values, index=dis_chunk.index,
columns=cols)
output_datastore.append(key, df)
# Copy mains data to disag output
output_datastore.append(key=mains_data_location,
value=pd.DataFrame(chunk, columns=cols))
# Saving output datastore:
#output_datastore.append(key=mains.key, value=dis_main)
##################################
# Add metadata to output_datastore
# TODO: `preprocessing_applied` for all meters
# TODO: split this metadata code into a separate function
# TODO: submeter measurement should probably be the mains
# measurement we used to train on, not the mains measurement.
date_now = datetime.now().isoformat().split('.')[0]
output_name = 'NILMTK_MLE_' + date_now
resample_seconds = 10
mains_data_location = '{}/elec/meter1'.format(building_path)
# DataSet and MeterDevice metadata:
meter_devices = {
'MLE': {
'model': 'MLE',
'sample_period': resample_seconds,
'max_sample_period': resample_seconds,
'measurements': [{
'physical_quantity': measurement[0],
'type': measurement[1]
}]
},
'mains': {
'model': 'mains',
'sample_period': resample_seconds,
'max_sample_period': resample_seconds,
'measurements': [{
'physical_quantity': measurement[0],
'type': measurement[1]
}]
}
}
merged_timeframes = merge_timeframes(timeframes, gap=resample_seconds)
total_timeframe = TimeFrame(merged_timeframes[0].start,
merged_timeframes[-1].end)
dataset_metadata = {'name': output_name, 'date': date_now,
'meter_devices': meter_devices,
'timeframe': total_timeframe.to_dict()}
output_datastore.save_metadata('/', dataset_metadata)
# Building metadata
# Mains meter:
elec_meters = {
1: {
'device_model': 'mains',
'site_meter': True,
'data_location': mains_data_location,
'preprocessing_applied': {}, # TODO
'statistics': {
'timeframe': total_timeframe.to_dict()
}
}
}
# Appliances and submeters:
appliances = []
appliance = {
'meters': [meter_instance],
'type': 'kettle',
'instance': 1
# TODO this `instance` will only be correct when the
# model is trained on the same house as it is tested on.
# https://github.com/nilmtk/nilmtk/issues/194
}
appliances.append(appliance)
elec_meters.update({
meter_instance: {
'device_model': 'MLE',
'submeter_of': 1,
'data_location': ('{}/elec/meter{}'
.format(building_path, meter_instance)),
'preprocessing_applied': {}, # TODO
'statistics': {
'timeframe': total_timeframe.to_dict()
}
}
})
elec_meters[meter_instance]['name'] = 'kettle'
building_metadata = {
'instance': mains.building(),
'elec_meters': elec_meters,
'appliances': appliances
}
output_datastore.save_metadata(building_path, building_metadata)
def disaggregate_chunk(self, chunk):
"""
Checks units.
Disaggregates "chunk" with MaximumLikelihood algorithm.
Optimization:
Filters events with powerNoise.
Filters paired-events with powerPair.
Windowing with timeWindow for speeding up.
Parameters
----------
chunk : pd.DataFrame (in NILMTK format)
Returns
-------
chunk : pd.DataFrame where each column represents a disaggregated appliance
Notes
-----
* Disaggregation is not prooved. (TODO: verify the process with the Groundtruth)
* Disaggregates only the selected appliance.(TODO: Disaggregates many)
"""
# An resistive element has active power equal to apparent power.
# Checking power units.
units = self.__physical_quantity(chunk)
# EVENTS OUT OF THE CHUNK:
# Delta values:
column_name = 'diff_' + units[1]
chunk[column_name] = chunk.loc[:, units].diff()
# Filter the noise.
chunk['onpower'] = (chunk[column_name] > self.powerNoise)
chunk['offpower'] = (chunk[column_name] < -self.powerNoise)
events = chunk[(chunk.onpower == True) | (chunk.offpower == True)]
detection_list = []
singleOnevent = 0
# Max Likelihood algorithm (optimized):
for onevent in events[events.onpower == True].iterrows():
# onTime = onevent[0]
# deltaOn = onevent[1][1]
# windowning:
offevents = events[(events.offpower == True) & (events.index > onevent[0]) & (
events.index < onevent[0] + timedelta(seconds=self.timeWindow))]
# Filter paired events:
offevents = offevents[
abs(onevent[1][1] - offevents[column_name].abs()) < self.powerPair]
# Max likelihood computation:
if not offevents.empty:
# pon = self.__pdf(self.onpower, onevent[1][1])
for offevent in offevents.iterrows():
# offTime = offevent[0]
# deltaOff = offevent[1][1]
# poff = self.__pdf(self.offpower, offevent[1][1])
# duration = offevent[0] - onTime
# pduration = self.__pdf(self.duration, (offevent[0] - onTime).total_seconds())
likelihood = self.__pdf(self.onpower, onevent[1][1]) * \
self.__pdf(self.offpower, offevent[1][1]) * \
self.__pdf(self.duration, (offevent[0] - \
onevent[0]).total_seconds())
detection_list.append(
{'likelihood': likelihood, 'onTime': onevent[0],
'offTime': offevent[0], 'deltaOn': onevent[1][1]})
else:
singleOnevent += 1
# Passing detections to a pandas.DataFrame
detections = pd.DataFrame(
columns=('onTime', 'offTime', 'likelihood', 'deltaOn'))
for i in range(len(detection_list)):
detections.loc[i] = [detection_list[i]['onTime'], detection_list[i][
'offTime'], detection_list[i]['likelihood'], detection_list[i]['deltaOn']]
detections = detections[detections.likelihood >= self.thLikelihood]
# Constructing dis_chunk (power of disaggregated appliance)
dis_chunk = pd.DataFrame(
index=chunk.index, columns=[str(units[0]) + '_' + str(units[1])])
dis_chunk.fillna(0, inplace=True)
# Ruling out overlapped detecttions ordering by likelihood value.
detections = detections.sort('likelihood', ascending=False)
for row in detections.iterrows():
# onTime = row[1][0] offTime = row[1][1] deltaOn = row[1][3]
#import ipdb
#ipdb.set_trace()
if ((dis_chunk[(dis_chunk.index >= row[1][0]) &
(dis_chunk.index < row[1][1])].sum().values[0]) == 0):
# delta = chunk[chunk.index == onTime][column_name].values[0]
dis_chunk[(dis_chunk.index >= row[1][0]) & (
dis_chunk.index < row[1][1])] = row[1][3]
# Stat information:
print str(len(events)) + " events found."
print str(len(events[events.onpower == True])) + " onEvents found"
print str(singleOnevent) + " onEvents no paired."
return dis_chunk
def no_overfitting(self):
"""
Crops feature_train(onpower_train, offpower_train and duration_train)
to get same samples from different appliances(same model-appliance)
and avoids overfittings to a many samples appliance.
Updates stats attribute.
Does the retraining.
"""
# Instance with minimun length should be the maximum length
train_len = []
[train_len.append(st['Nevents']) for st in self.stats]
train_len = np.array(train_len)
max_len = train_len[train_len != 0].min()
# CROPS FEATURE SAMPLES
onpower_train = pd.DataFrame()
offpower_train = pd.DataFrame()
duration_train = pd.DataFrame()
start = 0
end = 0
for ind in np.arange(len(self.stats)):
if self.stats[ind]['Nevents'] != 0:
if ind == 0:
start = 0
else:
start = end
end += self.stats[ind]['Nevents']
aux = self.onpower_train[start:end]
aux = aux[:max_len]
onpower_train = pd.concat([onpower_train, aux])
aux = self.offpower_train[start:end]
aux = aux[:max_len]
offpower_train = pd.concat([offpower_train, aux])
aux = self.duration_train[start:end]
aux = aux[:max_len]
duration_train = pd.concat([duration_train, aux])
# udating stats:
self.stats[ind]['Nevents'] = max_len
self.onpower_train = onpower_train
self.offpower_train = offpower_train
self.duration_train = duration_train
# RE-TRAINS FEATURES:
self.__retrain(self.onpower, self.onpower_train)
self.__retrain(self.offpower, self.offpower_train)
self.__retrain(self.duration, self.duration_train)
def check_cdfIntegrity(self, step):
"""
Cheks integrity of feature model distributions.
CDF has to be bounded by one.
Parameters
----------
step: resolution step size on the x-axis for pdf and cdf functions.
"""
# Selecting bins automatically:
x_max = self.onpower_train.max().values[0]
x_min = 0
step = 1
x_onpower = np.arange(x_min, x_max, step).reshape(-1, 1)
x_max = 0
x_min = self.offpower_train.min().values[0]
step = 1
x_offpower = np.arange(x_min, x_max, step).reshape(-1, 1)
x_max = self.duration_train.max().values[0]
x_min = 0
step = 1
x_duration = np.arange(x_min, x_max, step).reshape(-1, 1)
# Evaluating score for:
# Onpower
y_onpower = self.__pdf2(self.onpower, x_onpower)
print "Onpower cdf: " + str(y_onpower.sum())
# Offpower
y_offpower = self.__pdf2(self.offpower, x_offpower)
print "Offpower cdf: " + str(y_offpower.sum())
# duration
y_duration = self.__pdf2(self.duration, x_duration)
print "Duration cdf: " + str(y_duration.sum())
# Plots:
# fig1 = plt.figure()
# ax1 = fig1.add_subplot(311)
# ax2 = fig1.add_subplot(312)
# ax3 = fig1.add_subplot(313)
# ax1.plot(x_onpower, y_onpower)
# ax1.set_title("PDF CDF: Onpower")
# ax1.set_ylabel("density")
# ax1.set_xlabel("Watts")
# ax2.plot(x_offpower, y_offpower)
# ax2.set_title(" PDF CDF: Offpower")
# ax2.set_ylabel("denisty")
# ax2.set_xlabel("Watts")
# ax3.plot(x_duration, y_duration)
# ax3.set_title("PDF CDF: Duration")
# ax3.set_ylabel("density")
# ax3.set_xlabel("Seconds")
def featuresHist(self, **kwargs):
"""
Visualization tool to check if feature model distributions fit
to samples for feature training (onpower_train, offpower_train
and duration_train)
Parameters
----------
kwargs : keyword arguments list with bins_onpower, bins_offpower and bin_duration.
bins_feature: numpy.arange for plotting the hist with specified bin sizes.
"""
# Selecting bins automatically:
bins_onpower = np.arange(self.onpower_train.min().values[0],
self.onpower_train.max().values[0],
(self.onpower_train.max().values[0] -
self.onpower_train.min().values[0]) / 50)
bins_offpower = np.arange(self.offpower_train.min().values[0],
self.offpower_train.max().values[0],
(self.offpower_train.max().values[0] -
self.offpower_train.min().values[0]) / 50)
bins_duration = np.arange(self.duration_train.min().values[0],
self.duration_train.max().values[0],
(self.duration_train.max().values[0] -
self.duration_train.min().values[0]) / 50)
# If a bin has been specified update the bin sizes.
for key in kwargs:
if key == 'bins_onpower':
bins_onpower = kwargs[key]
elif key == 'bins_offpower':
bins_offpower = kwargs[key]
elif key == 'bins_duration':
bins_duration = kwargs[key]
else:
print "Non valid kwarg"
# Plot structure:
fig = plt.figure()
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
# Evaluating score for:
# Onpower
x = np.arange(bins_onpower.min(), bins_onpower.max() + \
np.diff(bins_onpower)[0], np.diff(bins_onpower)[0] / float(1000)).reshape(-1, 1)
y = self.__pdf(self.onpower, x)
norm = pd.cut(
self.onpower_train.onpower, bins=bins_onpower).value_counts().max() / max(y)
# Plots for Onpower
ax1.hist(
self.onpower_train.onpower.values, bins=bins_onpower, alpha=0.5)
ax1.plot(x, y * norm)
#ax1.set_title("Feature: Onpower")
#ax1.set_ylabel("Counts")
#ax1.set_xlabel("On power (W)")
ax1.set_ylabel("On power counts")
# Offpower
x = np.arange(bins_offpower.min(), bins_offpower.max() + \
np.diff(bins_offpower)[0], np.diff(bins_offpower)[0] / float(1000)).reshape(-1, 1)
y = self.__pdf(self.offpower, x)
norm = pd.cut(self.offpower_train.offpower,
bins=bins_offpower).value_counts().max() / max(y)
# Plots for Offpower
ax2.hist(self.offpower_train.offpower.values,
bins=bins_offpower, alpha=0.5)
ax2.plot(x, y * norm)
#ax2.set_title("Feature: Offpower")
#ax2.set_ylabel("Counts")
#ax2.set_xlabel("Off power (W)")
ax2.set_ylabel("Off power counts")
# Duration
x = np.arange(bins_duration.min(), bins_duration.max() + \
np.diff(bins_duration)[0], np.diff(bins_duration)[0] / float(1000)).reshape(-1, 1)
y = self.__pdf(self.duration, x)
norm = pd.cut(self.duration_train.duration,
bins=bins_duration).value_counts().max() / max(y)
# Plots for duration
ax3.hist(self.duration_train.duration.values,
bins=bins_duration, alpha=0.5)
ax3.plot(x, y * norm)
#ax3.set_title("Feature: Duration")
#ax3.set_ylabel("Counts")
#ax3.set_xlabel("Duration (seconds)")
ax3.set_ylabel("Duration counts")
def featuresHist_colors(self, **kwargs):
"""
Visualization tool to check if samples for feature training
(onpower_train, offpower_train and duration_train) are equal
for each appliance (same model appliance).
Each appliance represented by a different color.
Parameters
----------
kwargs : keyword arguments list with bins_onpower, bins_offpower and bin_duration.
bins_feature: numpy.arange for plotting the hist with specified bin sizes.
"""
# Selecting bins automatically:
bins_onpower = np.arange(self.onpower_train.min().values[0],
self.onpower_train.max().values[0],
(self.onpower_train.max().values[0] -
self.onpower_train.min().values[0]) / 50)
bins_offpower = np.arange(self.offpower_train.min().values[0],
self.offpower_train.max().values[0],
(self.offpower_train.max().values[0] -
self.offpower_train.min().values[0]) / 50)
bins_duration = np.arange(self.duration_train.min().values[0],
self.duration_train.max().values[0],
(self.duration_train.max().values[0] -
self.duration_train.min().values[0]) / 50)
# If a bin has been specified update the bin sizes.
# Updating bins with specified values.
for key in kwargs:
if key == 'bins_onpower':
bins_onpower = kwargs[key]
elif key == 'bins_offpower':
bins_offpower = kwargs[key]
elif key == 'bins_duration':
bins_duration = kwargs[key]
else:
print "Non valid kwarg"
# Plot:
fig1 = plt.figure()
ax1 = fig1.add_subplot(311)
ax2 = fig1.add_subplot(312)
ax3 = fig1.add_subplot(313)
start = 0
end = 0
for ind in np.arange(len(self.stats)):
if self.stats[ind]['Nevents'] != 0:
if ind == 0:
start = 0
else:
start = end
end += self.stats[ind]['Nevents']
ax1.hist(
self.onpower_train[start:end].onpower.values, bins=bins_onpower, alpha=0.5)
ax2.hist(
self.offpower_train[start:end].offpower.values, bins=bins_offpower, alpha=0.5)
ax3.hist(
self.duration_train[start:end].duration.values, bins=bins_duration, alpha=0.5)
ax1.set_title("Feature: Onpower")
ax1.set_xlabel("Watts")
ax1.set_ylabel("Counts")
ax2.set_title("Feature: Offpower")
ax2.set_xlabel("Watts")
ax2.set_ylabel("Counts")
ax3.set_title("Feature: Duration")
ax3.set_xlabel("Seconds")
ax3.set_ylabel("Counts")
|
|
from datetime import datetime
from decimal import Decimal
from django import forms
from django.conf import settings
from django.contrib.admin import helpers
from django.contrib.admin.utils import (
NestedObjects, display_for_field, display_for_value, flatten,
flatten_fieldsets, label_for_field, lookup_field, quote,
)
from django.db import DEFAULT_DB_ALIAS, models
from django.test import SimpleTestCase, TestCase, override_settings
from django.utils.formats import localize
from django.utils.safestring import mark_safe
from .models import (
Article, Car, Count, Event, EventGuide, Location, Site, Vehicle,
)
class NestedObjectsTests(TestCase):
"""
Tests for ``NestedObject`` utility collection.
"""
def setUp(self):
self.n = NestedObjects(using=DEFAULT_DB_ALIAS)
self.objs = [Count.objects.create(num=i) for i in range(5)]
def _check(self, target):
self.assertEqual(self.n.nested(lambda obj: obj.num), target)
def _connect(self, i, j):
self.objs[i].parent = self.objs[j]
self.objs[i].save()
def _collect(self, *indices):
self.n.collect([self.objs[i] for i in indices])
def test_unrelated_roots(self):
self._connect(2, 1)
self._collect(0)
self._collect(1)
self._check([0, 1, [2]])
def test_siblings(self):
self._connect(1, 0)
self._connect(2, 0)
self._collect(0)
self._check([0, [1, 2]])
def test_non_added_parent(self):
self._connect(0, 1)
self._collect(0)
self._check([0])
def test_cyclic(self):
self._connect(0, 2)
self._connect(1, 0)
self._connect(2, 1)
self._collect(0)
self._check([0, [1, [2]]])
def test_queries(self):
self._connect(1, 0)
self._connect(2, 0)
# 1 query to fetch all children of 0 (1 and 2)
# 1 query to fetch all children of 1 and 2 (none)
# Should not require additional queries to populate the nested graph.
self.assertNumQueries(2, self._collect, 0)
def test_on_delete_do_nothing(self):
"""
The nested collector doesn't query for DO_NOTHING objects.
"""
n = NestedObjects(using=DEFAULT_DB_ALIAS)
objs = [Event.objects.create()]
EventGuide.objects.create(event=objs[0])
with self.assertNumQueries(2):
# One for Location, one for Guest, and no query for EventGuide
n.collect(objs)
def test_relation_on_abstract(self):
"""
NestedObjects.collect() doesn't trip (AttributeError) on the special
notation for relations on abstract models (related_name that contains
%(app_label)s and/or %(class)s) (#21846).
"""
n = NestedObjects(using=DEFAULT_DB_ALIAS)
Car.objects.create()
n.collect([Vehicle.objects.first()])
class UtilsTests(SimpleTestCase):
empty_value = '-empty-'
def test_values_from_lookup_field(self):
"""
Regression test for #12654: lookup_field
"""
SITE_NAME = 'example.com'
TITLE_TEXT = 'Some title'
CREATED_DATE = datetime.min
ADMIN_METHOD = 'admin method'
SIMPLE_FUNCTION = 'function'
INSTANCE_ATTRIBUTE = 'attr'
class MockModelAdmin:
def get_admin_value(self, obj):
return ADMIN_METHOD
def simple_function(obj):
return SIMPLE_FUNCTION
site_obj = Site(domain=SITE_NAME)
article = Article(
site=site_obj,
title=TITLE_TEXT,
created=CREATED_DATE,
)
article.non_field = INSTANCE_ATTRIBUTE
verifications = (
('site', SITE_NAME),
('created', localize(CREATED_DATE)),
('title', TITLE_TEXT),
('get_admin_value', ADMIN_METHOD),
(simple_function, SIMPLE_FUNCTION),
('test_from_model', article.test_from_model()),
('non_field', INSTANCE_ATTRIBUTE)
)
mock_admin = MockModelAdmin()
for name, value in verifications:
field, attr, resolved_value = lookup_field(name, article, mock_admin)
if field is not None:
resolved_value = display_for_field(resolved_value, field, self.empty_value)
self.assertEqual(value, resolved_value)
def test_null_display_for_field(self):
"""
Regression test for #12550: display_for_field should handle None
value.
"""
display_value = display_for_field(None, models.CharField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
display_value = display_for_field(None, models.CharField(
choices=(
(None, "test_none"),
)
), self.empty_value)
self.assertEqual(display_value, "test_none")
display_value = display_for_field(None, models.DateField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
display_value = display_for_field(None, models.TimeField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
# Regression test for #13071: NullBooleanField has special
# handling.
display_value = display_for_field(None, models.NullBooleanField(), self.empty_value)
expected = '<img src="%sadmin/img/icon-unknown.svg" alt="None" />' % settings.STATIC_URL
self.assertHTMLEqual(display_value, expected)
display_value = display_for_field(None, models.DecimalField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
display_value = display_for_field(None, models.FloatField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
def test_number_formats_display_for_field(self):
display_value = display_for_field(12345.6789, models.FloatField(), self.empty_value)
self.assertEqual(display_value, '12345.6789')
display_value = display_for_field(Decimal('12345.6789'), models.DecimalField(), self.empty_value)
self.assertEqual(display_value, '12345.6789')
display_value = display_for_field(12345, models.IntegerField(), self.empty_value)
self.assertEqual(display_value, '12345')
@override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True)
def test_number_formats_with_thousand_separator_display_for_field(self):
display_value = display_for_field(12345.6789, models.FloatField(), self.empty_value)
self.assertEqual(display_value, '12,345.6789')
display_value = display_for_field(Decimal('12345.6789'), models.DecimalField(), self.empty_value)
self.assertEqual(display_value, '12,345.6789')
display_value = display_for_field(12345, models.IntegerField(), self.empty_value)
self.assertEqual(display_value, '12,345')
def test_list_display_for_value(self):
display_value = display_for_value([1, 2, 3], self.empty_value)
self.assertEqual(display_value, '1, 2, 3')
display_value = display_for_value([1, 2, 'buckle', 'my', 'shoe'], self.empty_value)
self.assertEqual(display_value, '1, 2, buckle, my, shoe')
def test_label_for_field(self):
"""
Tests for label_for_field
"""
self.assertEqual(
label_for_field("title", Article),
"title"
)
self.assertEqual(
label_for_field("hist", Article),
"History"
)
self.assertEqual(
label_for_field("hist", Article, return_attr=True),
("History", None)
)
self.assertEqual(
label_for_field("__str__", Article),
"article"
)
with self.assertRaisesMessage(AttributeError, "Unable to lookup 'unknown' on Article"):
label_for_field("unknown", Article)
def test_callable(obj):
return "nothing"
self.assertEqual(
label_for_field(test_callable, Article),
"Test callable"
)
self.assertEqual(
label_for_field(test_callable, Article, return_attr=True),
("Test callable", test_callable)
)
self.assertEqual(
label_for_field("test_from_model", Article),
"Test from model"
)
self.assertEqual(
label_for_field("test_from_model", Article, return_attr=True),
("Test from model", Article.test_from_model)
)
self.assertEqual(
label_for_field("test_from_model_with_override", Article),
"not What you Expect"
)
self.assertEqual(
label_for_field(lambda x: "nothing", Article),
"--"
)
self.assertEqual(label_for_field('site_id', Article), 'Site id')
class MockModelAdmin:
def test_from_model(self, obj):
return "nothing"
test_from_model.short_description = "not Really the Model"
self.assertEqual(
label_for_field("test_from_model", Article, model_admin=MockModelAdmin),
"not Really the Model"
)
self.assertEqual(
label_for_field("test_from_model", Article, model_admin=MockModelAdmin, return_attr=True),
("not Really the Model", MockModelAdmin.test_from_model)
)
def test_label_for_property(self):
# NOTE: cannot use @property decorator, because of
# AttributeError: 'property' object has no attribute 'short_description'
class MockModelAdmin:
def my_property(self):
return "this if from property"
my_property.short_description = 'property short description'
test_from_property = property(my_property)
self.assertEqual(
label_for_field("test_from_property", Article, model_admin=MockModelAdmin),
'property short description'
)
def test_related_name(self):
"""
Regression test for #13963
"""
self.assertEqual(
label_for_field('location', Event, return_attr=True),
('location', None),
)
self.assertEqual(
label_for_field('event', Location, return_attr=True),
('awesome event', None),
)
self.assertEqual(
label_for_field('guest', Event, return_attr=True),
('awesome guest', None),
)
def test_safestring_in_field_label(self):
# safestring should not be escaped
class MyForm(forms.Form):
text = forms.CharField(label=mark_safe('<i>text</i>'))
cb = forms.BooleanField(label=mark_safe('<i>cb</i>'))
form = MyForm()
self.assertHTMLEqual(helpers.AdminField(form, 'text', is_first=False).label_tag(),
'<label for="id_text" class="required inline"><i>text</i>:</label>')
self.assertHTMLEqual(helpers.AdminField(form, 'cb', is_first=False).label_tag(),
'<label for="id_cb" class="vCheckboxLabel required inline"><i>cb</i></label>')
# normal strings needs to be escaped
class MyForm(forms.Form):
text = forms.CharField(label='&text')
cb = forms.BooleanField(label='&cb')
form = MyForm()
self.assertHTMLEqual(helpers.AdminField(form, 'text', is_first=False).label_tag(),
'<label for="id_text" class="required inline">&text:</label>')
self.assertHTMLEqual(helpers.AdminField(form, 'cb', is_first=False).label_tag(),
'<label for="id_cb" class="vCheckboxLabel required inline">&cb</label>')
def test_flatten(self):
flat_all = ['url', 'title', 'content', 'sites']
inputs = (
((), []),
(('url', 'title', ('content', 'sites')), flat_all),
(('url', 'title', 'content', 'sites'), flat_all),
((('url', 'title'), ('content', 'sites')), flat_all)
)
for orig, expected in inputs:
self.assertEqual(flatten(orig), expected)
def test_flatten_fieldsets(self):
"""
Regression test for #18051
"""
fieldsets = (
(None, {
'fields': ('url', 'title', ('content', 'sites'))
}),
)
self.assertEqual(flatten_fieldsets(fieldsets), ['url', 'title', 'content', 'sites'])
fieldsets = (
(None, {
'fields': ('url', 'title', ['content', 'sites'])
}),
)
self.assertEqual(flatten_fieldsets(fieldsets), ['url', 'title', 'content', 'sites'])
def test_quote(self):
self.assertEqual(quote('something\nor\nother'), 'something_0Aor_0Aother')
|
|
# Copyright (c) 2010, 2011 Arek Korbik
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from zope.interface import implements
from twisted.internet import defer
# try:
# import tasks
# HAVE_TASKS = 1
# except ImportError:
# tasks = None
# HAVE_TASKS = 0
from twimp.server.interfaces import IStream, ILiveStream, IStreamGroup
from twimp.server.interfaces import IStreamServer
from twimp.server.errors import InvalidFrameNumber, StreamNotFoundError
from twimp.server.errors import NamespaceNotFoundError, StreamExistsError
class IMServerStream(object):
def __init__(self):
self.meta = {}
self.params = {}
self.headers = []
self.data = []
self.data_offset = 0
self.data_listeners = set()
self.state = None # ???
class IMServerStreamGroup(object):
def __init__(self, name=None, namespace=None):
self.meta = {}
self.streams = []
self.name = name
self.namespace = namespace
class IMStream(object):
implements(IStream)
def __init__(self, server_stream):
self._s = server_stream
self._pos = 0
self._grpos = 0
# 'protected' helpers for easier subclassing
def notify_write_listeners(self, grpos, flags, data):
for c in self._s.data_listeners:
try:
c(grpos, flags, data)
except:
defer.fail() # this should end up in logs somewhere... :/
##
# IStreamGroup interface implementation
def params(self):
return defer.succeed(self._s.params.copy())
def set_params(self, params):
self._s.params = params.copy()
return defer.succeed(None)
def meta(self):
return defer.succeed(self._s.meta.copy())
def set_meta(self, meta):
self._s.meta = meta.copy()
return defer.succeed(None)
def read_headers(self, callback):
# [the immediate direct approach]:
# for f in self._s.headers:
# callback(*f)
#
# return defer.succeed(None) # number of frames?
# [the cancellable task approach]:
# class SequenceIterTask(tasks.CompTask):
# def __init__(self, seq, callback):
# tasks.CompTask.__init__(self)
# self._iter = iter(seq)
# self._elt_callback = callback
#
# def _call_read_callback(self, elt):
# self._elt_callback(*elt)
#
# def do(self, input):
# try:
# elt = self._iter.next()
# except:
# return
#
# return tasks.GenericTask(self._call_read_callback)(elt)
#
# t = SequenceIterTask(self._s.headers, callback)
# return t, t.run()
# [the immediate direct + non-cancellable task compromise approach]:
for f in self._s.headers:
callback(*f)
# if HAVE_TASKS:
# t = tasks.CompTask()
# return t, t.run()
return None, defer.succeed(None)
def write_headers(self, data, grpos=0, flags=0):
self._s.headers.append((grpos, flags, data))
return defer.succeed(None)
def seek(self, offset, whence=0, frames=None):
raise NotImplementedError('FIXME!!!')
def pseek(self, offset, whence=0, frames=None, flag_mask=0):
raise NotImplementedError('FIXME!!!')
def read(self, callback, grpos_range, frames=None):
if grpos_range:
# here we only handle the case of data shrinking from the
# left / earlier side...
end_grpos = self._grpos + grpos_range
pos = self._pos
grpos = self._grpos
while 1:
idx = pos - self._s.data_offset
if idx < 0:
pos -= idx
idx = 0
try:
f = self._s.data[idx]
except IndexError:
# we're behind the last frame -> no read
break
grpos = f[0]
if grpos >= end_grpos:
grpos = end_grpos
break
callback(*f)
pos += 1
self._pos = pos
self._grpos = grpos
elif frames:
pos = self._pos
grpos = self._grpos
while 1:
idx = pos - self._s.data_offset
if idx < 0:
pos -= idx
frames += idx
if frames < 1:
break
try:
f = self._s.data[idx]
except IndexError:
# we're behind the last frame -> no more read
break
grpos = f[0]
callback(*f)
pos += 1
frames -= 1
self._pos = pos
self._grpos = grpos
# if HAVE_TASKS:
# t = tasks.CompTask()
# return t, t.run()
return None, defer.succeed(None)
def write(self, grpos, flags, data):
self._s.data.append((grpos, flags, data))
self.notify_write_listeners(grpos, flags, data)
return defer.succeed(None)
def _scan_from_end(self, grpos_range, frames=None, flag_mask=0):
if not self._s.data:
return None
pos = len(self._s.data) - 1
if grpos_range > 0:
grpos = self._s.data[pos][0]
target_grpos = grpos - grpos_range
while pos > 0:
pos -= 1
f = self._s.data[pos]
if f[0] < target_grpos:
pos += 1
break
elif frames > 0:
pos = max(0, len(self._s.data) - frames)
if flag_mask < 0:
mask = - flag_mask
fpos = pos
while fpos >= 0:
f = self._s.data[fpos]
if f[1] & mask:
break
fpos -= 1
if fpos >= 0:
pos = fpos
elif flag_mask > 0:
mask = flag_mask
fpos = pos
end_pos = len(self._s.data) - 1
while fpos <= end_pos:
f = self._s.data[fpos]
if f[1] & mask:
break
fpos += 1
if fpos <= end_pos:
pos = fpos
return self._s.data_offset + pos
def trim(self, grpos_range, frames=None, flag_mask=0):
raw_pos = self._scan_from_end(grpos_range, frames=frames,
flag_mask=flag_mask)
if raw_pos is not None:
pos = raw_pos - self._s.data_offset
if pos > 0:
self._s.data_offset += pos
self._s.data[:pos] = []
return defer.succeed(None)
def subscribe(self, callback, preroll_grpos_range=0, preroll_frames=0,
preroll_from_frame=None, flag_mask=0):
pos = None
if preroll_grpos_range > 0 or preroll_frames > 0:
raw_pos = self._scan_from_end(preroll_grpos_range,
frames=preroll_frames,
flag_mask=flag_mask)
if raw_pos is not None:
pos = raw_pos - self._s.data_offset
elif preroll_from_frame is not None:
pos = preroll_from_frame - self._s.data_offset
if not (0 <= pos < len(self._s.data)):
e = InvalidFrameNumber('frame %r' % (preroll_from_frame))
return defer.fail(e)
if pos is not None:
for f in self._s.data[pos:]:
callback(*f)
self._s.data_listeners.add(callback)
return defer.succeed(callback)
def unsubscribe(self, subscription):
self._s.data_listeners.remove(subscription)
return defer.succeed(None)
def find_frame_backward(self, grpos_range, frames=None, flag_mask=0):
pos = self._scan_from_end(grpos_range, frames=frames,
flag_mask=flag_mask)
return defer.succeed(pos)
def frame_to_grpos(self, frame):
data_len = len(self._s.data)
if frame < 0:
frame = self._s.data_offset + data_len + frame
raw_frame = frame - self._s.data_offset
if 0 <= raw_frame < data_len:
return defer.succeed(self._s.data[raw_frame][0])
return defer.fail(InvalidFrameNumber('frame %r' % (frame,)))
class IMLiveStream(IMStream):
implements(ILiveStream)
def __init__(self, server_stream):
IMStream.__init__(self, server_stream)
self._buffer_grpos = 0
self._buffer_frames = 0
self._buffer_flagmask = 0
self._grpos_last = None
self._grpos_first = None
self._index = None
if self._s.data:
self._grpos_first = self._s.data[0][0]
self._grpos_last = self._s.data[-1][0]
self._set_buffering(grpos_range=0, frames=0, flag_mask=0)
def write(self, grpos, flags, data):
self._write_selected(grpos, flags, data)
self.notify_write_listeners(grpos, flags, data)
return defer.succeed(None)
def set_buffering(self, grpos_range=0, frames=0, flag_mask=0):
self._set_buffering(grpos_range=grpos_range, frames=frames,
flag_mask=flag_mask)
return defer.succeed(None)
def _set_buffering(self, grpos_range=0, frames=0, flag_mask=0):
if grpos_range > 0:
self._buffer_grpos = grpos_range
self._buffer_frames = 0
if flag_mask != 0:
self._write_selected = self._write_buffering_with_index
self._cut_selected = self._cut_grpos_flagmask
self._buffer_flagmask = abs(flag_mask)
self._init_index()
else:
self._write_selected = self._write_buffering_no_index
self._cut_selected = self._cut_grpos
self._buffer_flagmask = 0
self._index = None
elif frames > 0:
self._buffer_frames = frames
self._buffer_grpos = 0
if flag_mask != 0:
self._write_selected = self._write_buffering_with_index
self._cut_selected = self._cut_frames_flagmask
self._buffer_flagmask = abs(flag_mask)
self._init_index()
else:
self._write_selected = self._write_buffering_no_index
self._cut_selected = self._cut_frames
self._buffer_flagmask = 0
self._index = None
else:
self._buffer_grpos = 0
self._buffer_frames = 0
self._buffer_flagmask = 0
self._write_selected = self._write_no_buffering
def _init_index(self):
index = []
if self._s.data:
offset = self._s.data_offset
for i, (grpos, flags, data) in enumerate(self._s.data):
if self._buffer_flagmask & flags:
index.append((i + offset, grpos))
self._index = index
def _cut_grpos(self):
d = self._s.data
target_grpos = self._grpos_last - self._buffer_grpos
pos, grpos, l = 0, self._grpos_first, len(d)
while grpos < target_grpos and pos < l:
pos += 1
grpos = d[pos][0]
if pos > 0:
self._s.data_offset, d[:pos] = self._s.data_offset + pos, []
self._grpos_first = d[0][0]
def _cut_frames(self):
d = self._s.data
l = len(d)
pos = l - self._buffer_frames
if pos > 0:
self._s.data_offset, d[:pos] = self._s.data_offset + pos, []
self._grpos_first = d[0][0]
def _cut_grpos_flagmask(self):
d = self._s.data
target_grpos = self._grpos_last - self._buffer_grpos
i_pos, i_len = 0, len(self._index)
while i_pos < i_len:
abs_pos, grpos = self._index[i_pos]
if grpos > target_grpos:
break
i_pos += 1
if i_pos > 0:
i_pos -= 1
offset = self._s.data_offset
pos, self._index[:i_pos] = self._index[i_pos][0] - offset, []
self._s.data_offset, d[:pos] = offset + pos, []
self._grpos_first = d[0][0]
def _cut_frames_flagmask(self):
offset = self._s.data_offset
target_pos = len(self._s.data) - self._buffer_frames
if target_pos < 1:
return
i_pos, i_len = 0, len(self._index)
target_pos += offset
while i_pos < i_len:
abs_pos, grpos = self._index[i_pos]
if abs_pos > target_pos:
break
i_pos += 1
if i_pos > 0:
i_pos -= 1
pos, self._index[:i_pos] = self._index[i_pos][0] - offset, []
d = self._s.data
self._s.data_offset, d[:pos] = offset + pos, []
self._grpos_first = d[0][0]
def _write_no_buffering(self, grpos, flags, data):
if self._s.data:
self._s.data[0] = (grpos, flags, data)
else:
self._s.data.append((grpos, flags, data))
self._s.data_offset += 1
self._grpos_first = self._grpos_last = grpos
def _write_buffering_no_index(self, grpos, flags, data):
self._s.data.append((grpos, flags, data))
if self._grpos_first is None:
self._grpos_first = self._s.data[0][0]
self._grpos_last = grpos
self._cut_selected()
def _update_index(self, frame, grpos, flags):
if flags & self._buffer_flagmask:
self._index.append((frame, grpos))
def _write_buffering_with_index(self, grpos, flags, data):
self._s.data.append((grpos, flags, data))
if self._grpos_first is None:
self._grpos_first = self._s.data[0][0]
self._grpos_last = grpos
self._update_index(len(self._s.data) + self._s.data_offset - 1,
grpos, flags)
self._cut_selected()
class IMStreamGroup(object):
implements(IStreamGroup)
def __init__(self, server_streamgroup):
self._g = server_streamgroup
self._streams = self.build_streams(self._g.streams)
# 'protected' helpers for easier subclassing
def build_stream(self, server_stream):
return IMStream(server_stream)
def build_streams(self, server_streams):
return [self.build_stream(s) for s in server_streams]
##
# IStreamGroup interface implementation
def meta(self):
return defer.succeed(self._g.meta.copy())
def set_meta(self, meta):
self._g.meta = meta.copy()
return defer.succeed(None)
def streams(self):
return defer.succeed(self._streams[:])
def streams_by_params(self, template):
def got_params(results):
t = frozenset(template.iteritems())
return [s for (s, params) in zip(self._streams,
[r[1] for r in results])
if t <= frozenset(params.iteritems())]
dl = defer.DeferredList([s.params() for s in self._streams],
fireOnOneErrback=1, consumeErrors=1)
dl.addCallback(got_params)
return dl
def add_stream(self, stream):
raise NotImplementedError('FIXME!!!')
def make_stream(self):
ss = IMServerStream()
self._g.streams.append(ss)
s = self.build_stream(ss)
self._streams.append(s)
return defer.succeed(s)
def seek(self, offset, whence=0):
raise NotImplementedError('FIXME!!!')
def read_to(self, callback, grpos, cb_args_map=None):
raise NotImplementedError('FIXME!!!')
def subscribe(self, callback, preroll_grpos_range=0,
preroll_from_frames=None, cb_args_map=None):
pos_frames = None
if not preroll_grpos_range and preroll_from_frames is not None:
pos_frames = [(s, preroll_from_frames[s]) for s in self._streams]
def extra_args_cb_maker(callback, s):
if s in cb_args_map:
args = cb_args_map[s]
if not isinstance(args, (tuple, list)):
args = (args,)
else:
args = ()
def cb_wrapper(time, flags, data):
callback(time, flags, data, *args)
return cb_wrapper
def subscriptions_made(results, streams):
return [(s, r[1]) for (s, r) in zip(streams, results)]
if cb_args_map is None:
cb_args_map = {}
ds = []
if pos_frames is None:
for s in self._streams:
cb = extra_args_cb_maker(callback, s)
d = s.subscribe(cb, preroll_grpos_range=preroll_grpos_range)
ds.append(d)
else:
for s, from_frame in pos_frames:
cb = extra_args_cb_maker(callback, s)
d = s.subscribe(cb, preroll_from_frame=from_frame)
ds.append(d)
dl = defer.DeferredList(ds, fireOnOneErrback=1, consumeErrors=1)
dl.addCallback(subscriptions_made, self._streams)
return dl
def unsubscribe(self, subscription):
dl = defer.DeferredList([stream.unsubscribe(sub)
for (stream, sub) in subscription],
fireOnOneErrback=1, consumeErrors=1)
dl.addCallback(lambda results: None)
return dl
class IMLiveStreamGroup(IMStreamGroup):
implements(IStreamGroup)
def build_stream(self, server_stream):
return IMLiveStream(server_stream)
class IMServer(object):
implements(IStreamServer)
def __init__(self, namespaces=None):
# _store: { namespace => { name => stream_group } }
if namespaces:
self._store = dict((ns, {}) for ns in namespaces)
else:
self._store = {None: {}}
def open(self, name, mode='r', namespace=None):
if mode not in ('r', 'l'):
raise NotImplementedError('TBD later!')
if mode == 'l':
return defer.maybeDeferred(self._open_live, namespace, name)
elif mode == 'r':
return defer.maybeDeferred(self._open_readable, namespace, name)
def _open_readable(self, namespace, name):
ns = self._store.get(namespace, None)
if ns is None:
raise NamespaceNotFoundError('Unknown namespace %r' % namespace)
server_sg = ns.get(name, None)
if server_sg is None:
raise StreamNotFoundError('Unknown stream %r' % name)
sg = IMStreamGroup(server_sg)
return defer.succeed(sg)
def _open_live(self, namespace, name):
ns = self._store.get(namespace, None)
if ns is None:
raise NamespaceNotFoundError('Unknown namespace %r' % namespace)
server_sg = ns.get(name, None)
if server_sg is not None:
raise StreamExistsError('Stream already exists: %r' % name)
server_sg = IMServerStreamGroup(name, namespace)
ns[name] = server_sg
sg = IMLiveStreamGroup(server_sg)
return defer.succeed(sg)
def close(self, streamgroup):
if isinstance(streamgroup, IMLiveStreamGroup):
server_sg = streamgroup._g # cheating a bit...
ns = self._store[server_sg.namespace]
del ns[server_sg.name]
return defer.succeed(None)
def delete(self, streamgroup):
raise NotImplementedError('TBD later!')
|
|
import unittest
import importlib
#source: http://stackoverflow.com/a/11158224/5343977
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import ant_colony as module
class TestAntColonyInit(unittest.TestCase):
def test_correct(self):
module.debug = False
#setup
class test_empty_object(module.ant_colony):
#def __init__(self): pass
def _get_distance(self, start, end): pass
def _init_matrix(self, size, value=None): pass
def _init_ants(self, start=0): pass
def _update_pheromone_map(self): pass
def _populate_ant_updated_pheromone_map(self, ant): pass
def mainloop(self): pass
def _init_nodes(self, nodes): pass
testing_nodes = {
'a' : (1, 1),
15 : (0, 0),
'beaver' : (2, 2),
'yes we can' : (3, 3),
}
def test_distance_callback(self):
pass
#do this as _init_nodes in __init__ is expected to return two values, an iterable, giving the weird error: NoneType is not iterable
def test_init_nodes(self, nodes):
return 1, 2
test_empty_object._init_nodes = test_init_nodes
def test_init_ants(self, start=0):
return True
test_empty_object._init_ants = test_init_ants
test_object = test_empty_object(testing_nodes, test_distance_callback)
#testing
self.assertEqual(test_object.start, 0)
self.assertEqual(test_object.id_to_key, 1)
self.assertEqual(test_object.nodes, 2)
self.assertTrue(test_object.ants)
def test_start_set_correct(self):
module.debug = False
#setup
class test_empty_object(module.ant_colony):
#def __init__(self): pass
def _get_distance(self, start, end): pass
def _init_matrix(self, size, value=None): pass
def _init_ants(self, start=0): pass
def _update_pheromone_map(self): pass
def _populate_ant_updated_pheromone_map(self, ant): pass
def mainloop(self): pass
def _init_nodes(self, nodes): pass
testing_nodes = {
'a' : (1, 1),
15 : (0, 0),
'beaver' : (2, 2),
'yes we can' : (3, 3),
}
def test_distance_callback(self):
pass
#borrowing actual init nodes for this test
def test_init_nodes(self, nodes):
"""
create a mapping of internal id numbers (0 .. n) to the keys in the nodes passed
create a mapping of the id's to the values of nodes
we use id_to_key to return the route in the node names the caller expects in mainloop()
"""
id_to_key = dict()
id_to_values = dict()
id = 0
for key in sorted(nodes.keys()):
id_to_key[id] = key
id_to_values[id] = nodes[key]
id += 1
return id_to_key, id_to_values
test_empty_object._init_nodes = test_init_nodes
def test_init_ants(self, start=0):
return True
test_empty_object._init_ants = test_init_ants
test_object = test_empty_object(testing_nodes, test_distance_callback, start='a')
#testing
# import debug
# debug.DEBUG(test_object.start)
self.assertEqual(test_object.start, 1)
def test_start_set_value_not_found(self):
module.debug = False
#setup
class test_empty_object(module.ant_colony):
#def __init__(self): pass
def _get_distance(self, start, end): pass
def _init_matrix(self, size, value=None): pass
def _init_ants(self, start=0): pass
def _update_pheromone_map(self): pass
def _populate_ant_updated_pheromone_map(self, ant): pass
def mainloop(self): pass
def _init_nodes(self, nodes): pass
testing_nodes = {
'a' : (1, 1),
15 : (0, 0),
'beaver' : (2, 2),
'yes we can' : (3, 3),
}
def test_distance_callback(self):
pass
#borrowing actual init nodes for this test
def test_init_nodes(self, nodes):
"""
create a mapping of internal id numbers (0 .. n) to the keys in the nodes passed
create a mapping of the id's to the values of nodes
we use id_to_key to return the route in the node names the caller expects in mainloop()
"""
id_to_key = dict()
id_to_values = dict()
id = 0
for key in sorted(nodes.keys()):
id_to_key[id] = key
id_to_values[id] = nodes[key]
id += 1
return id_to_key, id_to_values
test_empty_object._init_nodes = test_init_nodes
def test_init_ants(self, start=0):
return True
test_empty_object._init_ants = test_init_ants
with self.assertRaisesRegexp(KeyError, 'not found in the nodes dict passed'):
test_empty_object(testing_nodes, test_distance_callback, start='b')
#invalid parameter testing
def test_ant_colony_ant_count_too_small(self):
module.debug = False
#setup
class test_empty_object(module.ant_colony):
#def __init__(self): pass
def _get_distance(self, start, end): pass
def _init_matrix(self, size, value=None): pass
def _init_ants(self, start=0): pass
def _update_pheromone_map(self): pass
def _populate_ant_updated_pheromone_map(self, ant): pass
def mainloop(self): pass
def _init_nodes(self, nodes): pass
testing_nodes = {
'a' : (1, 1),
15 : (0, 0),
'beaver' : (2, 2),
'yes we can' : (3, 3),
}
def test_distance_callback(self):
pass
#borrowing actual init nodes for this test
def test_init_nodes(self, nodes):
"""
create a mapping of internal id numbers (0 .. n) to the keys in the nodes passed
create a mapping of the id's to the values of nodes
we use id_to_key to return the route in the node names the caller expects in mainloop()
"""
id_to_key = dict()
id_to_values = dict()
id = 0
for key in sorted(nodes.keys()):
id_to_key[id] = key
id_to_values[id] = nodes[key]
id += 1
return id_to_key, id_to_values
test_empty_object._init_nodes = test_init_nodes
def test_init_ants(self, start=0):
return True
test_empty_object._init_ants = test_init_ants
with self.assertRaisesRegexp(ValueError, 'ant_count must be >= 1'):
test_object = test_empty_object(testing_nodes, test_distance_callback, ant_count=0)
def test_ant_colony_ant_count_invalid_type(self):
module.debug = False
#setup
class test_empty_object(module.ant_colony):
#def __init__(self): pass
def _get_distance(self, start, end): pass
def _init_matrix(self, size, value=None): pass
def _init_ants(self, start=0): pass
def _update_pheromone_map(self): pass
def _populate_ant_updated_pheromone_map(self, ant): pass
def mainloop(self): pass
def _init_nodes(self, nodes): pass
testing_nodes = {
'a' : (1, 1),
15 : (0, 0),
'beaver' : (2, 2),
'yes we can' : (3, 3),
}
def test_distance_callback(self):
pass
#borrowing actual init nodes for this test
def test_init_nodes(self, nodes):
"""
create a mapping of internal id numbers (0 .. n) to the keys in the nodes passed
create a mapping of the id's to the values of nodes
we use id_to_key to return the route in the node names the caller expects in mainloop()
"""
id_to_key = dict()
id_to_values = dict()
id = 0
for key in sorted(nodes.keys()):
id_to_key[id] = key
id_to_values[id] = nodes[key]
id += 1
return id_to_key, id_to_values
test_empty_object._init_nodes = test_init_nodes
def test_init_ants(self, start=0):
return True
test_empty_object._init_ants = test_init_ants
with self.assertRaisesRegexp(TypeError, 'ant_count must be int'):
test_object = test_empty_object(testing_nodes, test_distance_callback, ant_count=None)
def test_ant_colony_alpha_invalid_type(self):
module.debug = False
#setup
class test_empty_object(module.ant_colony):
#def __init__(self): pass
def _get_distance(self, start, end): pass
def _init_matrix(self, size, value=None): pass
def _init_ants(self, start=0): pass
def _update_pheromone_map(self): pass
def _populate_ant_updated_pheromone_map(self, ant): pass
def mainloop(self): pass
def _init_nodes(self, nodes): return 1, 2
testing_nodes = {
'a' : (1, 1),
15 : (0, 0),
'beaver' : (2, 2),
'yes we can' : (3, 3),
}
def test_distance_callback(self):
pass
#testing
with self.assertRaisesRegexp(TypeError, 'alpha must be int or float'):
test_object = test_empty_object(testing_nodes, test_distance_callback, alpha='a')
def test_ant_colony_alpha_too_small(self):
module.debug = False
#setup
class test_empty_object(module.ant_colony):
#def __init__(self): pass
def _get_distance(self, start, end): pass
def _init_matrix(self, size, value=None): pass
def _init_ants(self, start=0): pass
def _update_pheromone_map(self): pass
def _populate_ant_updated_pheromone_map(self, ant): pass
def mainloop(self): pass
def _init_nodes(self, nodes): return 1, 2
testing_nodes = {
'a' : (1, 1),
15 : (0, 0),
'beaver' : (2, 2),
'yes we can' : (3, 3),
}
def test_distance_callback(self):
pass
#testing
with self.assertRaisesRegexp(ValueError, 'alpha must be >= 0'):
test_object = test_empty_object(testing_nodes, test_distance_callback, alpha=-1)
def test_ant_colony_beta_invalid_type(self):
module.debug = False
#setup
class test_empty_object(module.ant_colony):
#def __init__(self): pass
def _get_distance(self, start, end): pass
def _init_matrix(self, size, value=None): pass
def _init_ants(self, start=0): pass
def _update_pheromone_map(self): pass
def _populate_ant_updated_pheromone_map(self, ant): pass
def mainloop(self): pass
def _init_nodes(self, nodes): return 1, 2
testing_nodes = {
'a' : (1, 1),
15 : (0, 0),
'beaver' : (2, 2),
'yes we can' : (3, 3),
}
def test_distance_callback(self):
pass
#testing
with self.assertRaisesRegexp(TypeError, 'beta must be int or float'):
test_object = test_empty_object(testing_nodes, test_distance_callback, beta='a')
def test_ant_colony_beta_too_small(self):
module.debug = False
#setup
class test_empty_object(module.ant_colony):
#def __init__(self): pass
def _get_distance(self, start, end): pass
def _init_matrix(self, size, value=None): pass
def _init_ants(self, start=0): pass
def _update_pheromone_map(self): pass
def _populate_ant_updated_pheromone_map(self, ant): pass
def mainloop(self): pass
def _init_nodes(self, nodes): return 1, 2
testing_nodes = {
'a' : (1, 1),
15 : (0, 0),
'beaver' : (2, 2),
'yes we can' : (3, 3),
}
def test_distance_callback(self):
pass
#testing
with self.assertRaisesRegexp(ValueError, 'beta must be >= 1'):
test_object = test_empty_object(testing_nodes, test_distance_callback, beta=0)
def test_pheromone_evaporation_coefficient_invalid_type(self):
module.debug = False
#setup
class test_empty_object(module.ant_colony):
#def __init__(self): pass
def _get_distance(self, start, end): pass
def _init_matrix(self, size, value=None): pass
def _init_ants(self, start=0): pass
def _update_pheromone_map(self): pass
def _populate_ant_updated_pheromone_map(self, ant): pass
def mainloop(self): pass
def _init_nodes(self, nodes): return 1, 2
testing_nodes = {
'a' : (1, 1),
15 : (0, 0),
'beaver' : (2, 2),
'yes we can' : (3, 3),
}
def test_distance_callback(self):
pass
#testing
with self.assertRaisesRegexp(TypeError, 'pheromone_evaporation_coefficient must be int or float'):
test_object = test_empty_object(testing_nodes, test_distance_callback, pheromone_evaporation_coefficient='a')
def test_pheromone_constant_invalid_type(self):
module.debug = False
#setup
class test_empty_object(module.ant_colony):
#def __init__(self): pass
def _get_distance(self, start, end): pass
def _init_matrix(self, size, value=None): pass
def _init_ants(self, start=0): pass
def _update_pheromone_map(self): pass
def _populate_ant_updated_pheromone_map(self, ant): pass
def mainloop(self): pass
def _init_nodes(self, nodes): return 1, 2
testing_nodes = {
'a' : (1, 1),
15 : (0, 0),
'beaver' : (2, 2),
'yes we can' : (3, 3),
}
def test_distance_callback(self):
pass
#testing
with self.assertRaisesRegexp(TypeError, 'pheromone_constant must be int or float'):
test_object = test_empty_object(testing_nodes, test_distance_callback, pheromone_constant='a')
def test_pheromone_iterations_invalid_type(self):
module.debug = False
#setup
class test_empty_object(module.ant_colony):
#def __init__(self): pass
def _get_distance(self, start, end): pass
def _init_matrix(self, size, value=None): pass
def _init_ants(self, start=0): pass
def _update_pheromone_map(self): pass
def _populate_ant_updated_pheromone_map(self, ant): pass
def mainloop(self): pass
def _init_nodes(self, nodes): return 1, 2
testing_nodes = {
'a' : (1, 1),
15 : (0, 0),
'beaver' : (2, 2),
'yes we can' : (3, 3),
}
def test_distance_callback(self):
pass
#testing
with self.assertRaisesRegexp(TypeError, 'iterations must be int'):
test_object = test_empty_object(testing_nodes, test_distance_callback, iterations='a')
def test_pheromone_iterations_too_small(self):
module.debug = False
#setup
class test_empty_object(module.ant_colony):
#def __init__(self): pass
def _get_distance(self, start, end): pass
def _init_matrix(self, size, value=None): pass
def _init_ants(self, start=0): pass
def _update_pheromone_map(self): pass
def _populate_ant_updated_pheromone_map(self, ant): pass
def mainloop(self): pass
def _init_nodes(self, nodes): return 1, 2
testing_nodes = {
'a' : (1, 1),
15 : (0, 0),
'beaver' : (2, 2),
'yes we can' : (3, 3),
}
def test_distance_callback(self):
pass
#testing
with self.assertRaisesRegexp(ValueError, 'iterations must be >= 0'):
test_object = test_empty_object(testing_nodes, test_distance_callback, iterations=-1)
def test_pheromone_nodes_too_small(self):
module.debug = False
#setup
class test_empty_object(module.ant_colony):
#def __init__(self): pass
def _get_distance(self, start, end): pass
def _init_matrix(self, size, value=None): pass
def _init_ants(self, start=0): pass
def _update_pheromone_map(self): pass
def _populate_ant_updated_pheromone_map(self, ant): pass
def mainloop(self): pass
def _init_nodes(self, nodes): return 1, 2
testing_nodes = dict()
def test_distance_callback(self):
pass
#testing
with self.assertRaisesRegexp(ValueError, 'there must be at least one node in dict nodes'):
test_object = test_empty_object(testing_nodes, test_distance_callback)
def test_pheromone_nodes_invalid_type(self):
module.debug = False
#setup
class test_empty_object(module.ant_colony):
#def __init__(self): pass
def _get_distance(self, start, end): pass
def _init_matrix(self, size, value=None): pass
def _init_ants(self, start=0): pass
def _update_pheromone_map(self): pass
def _populate_ant_updated_pheromone_map(self, ant): pass
def mainloop(self): pass
def _init_nodes(self, nodes): return 1, 2
testing_nodes = []
def test_distance_callback(self):
pass
#testing
with self.assertRaisesRegexp(TypeError, 'nodes must be dict'):
test_object = test_empty_object(testing_nodes, test_distance_callback)
def test_pheromone_nodes_invalid_type(self):
module.debug = False
#setup
class test_empty_object(module.ant_colony):
#def __init__(self): pass
def _get_distance(self, start, end): pass
def _init_matrix(self, size, value=None): pass
def _init_ants(self, start=0): pass
def _update_pheromone_map(self): pass
def _populate_ant_updated_pheromone_map(self, ant): pass
def mainloop(self): pass
def _init_nodes(self, nodes): return 1, 2
testing_nodes = testing_nodes = {
'a' : (1, 1),
15 : (0, 0),
'beaver' : (2, 2),
'yes we can' : (3, 3),
}
test_distance_callback = 0
#testing
with self.assertRaisesRegexp(TypeError, 'distance_callback is not callable, should be method'):
test_object = test_empty_object(testing_nodes, test_distance_callback)
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'MonsterAttackModifier.option2'
db.add_column('main_monsterattackmodifier', 'option2',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'MonsterAttackModifier.option2'
db.delete_column('main_monsterattackmodifier', 'option2')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'user_set'", 'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'user_set'", 'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'ordering': "('name',)", 'db_table': "'django_content_type'", 'object_name': 'ContentType'},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.aptitude': {
'Meta': {'object_name': 'Aptitude'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
'main.baseitem': {
'Meta': {'object_name': 'BaseItem'},
'armour': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'availability': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cost': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'damageAttribute': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'damageDieCount': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'damageDieSize': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'damageScale': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'evasion': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'itemSlot': ('django.db.models.fields.IntegerField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'itemType': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.ItemCategory']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'magicalArmour': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'magicalEvasion': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'tier': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'main.baseitemability': {
'Meta': {'object_name': 'BaseItemAbility'},
'baseItem': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'abilities'", 'to': "orm['main.BaseItem']", 'symmetrical': 'False'}),
'craftPoints': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'equippableTo': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tier': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'usedInCrafting': ('django.db.models.fields.BooleanField', [], {})
},
'main.basemonsterattackmodifier': {
'Meta': {'object_name': 'BaseMonsterAttackModifier'},
'attackSubType': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'attackType': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'cos': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'gilModifier': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isMultiplier': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'xpModifier': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '2'})
},
'main.baseskill': {
'Meta': {'object_name': 'BaseSkill'},
'aptitude': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Aptitude']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'attribute': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'halfRate': ('django.db.models.fields.BooleanField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isMonsterSkill': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'skillType': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'specialized': ('django.db.models.fields.BooleanField', [], {})
},
'main.character': {
'Meta': {'object_name': 'Character'},
'accessorySlot': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'equippedAccessories'", 'to': "orm['main.Item']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'accessorySlot2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'equippedAccessories2'", 'to': "orm['main.Item']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'age': ('django.db.models.fields.IntegerField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'agility': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'armourBase': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'baseHP': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'baseMP': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'blurb': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'bodySlot': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'equippedBodies'", 'to': "orm['main.Item']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'bonusAptitudes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Aptitude']", 'symmetrical': 'False'}),
'category': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'characterImage': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'characterImages'", 'to': "orm['main.ImageFile']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'encounterSizeMonsters': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'encounterSizePCs': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'family': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'gil': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'handSlot': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'equippedHands'", 'to': "orm['main.Item']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'headSlot': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'equippedHeads'", 'to': "orm['main.Item']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'hitBase': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intelligence': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'isMonster': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'characters'", 'to': "orm['main.Job']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'level': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'magic': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'magicArmourBase': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'magicBase': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'monsterType': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'characters'", 'to': "orm['main.Race']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'reaction': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'secondWeaponSlot': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'equippedSecondaryWeapons'", 'to': "orm['main.Item']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'speed': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'spirit': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'strength': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'traits': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Trait']", 'symmetrical': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'characters'", 'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'vitality': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'weaponSlot': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'equippedWeapons'", 'to': "orm['main.Item']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'xp': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'})
},
'main.imagefile': {
'Meta': {'object_name': 'ImageFile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '254'})
},
'main.item': {
'Meta': {'object_name': 'Item'},
'baseItem': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.BaseItem']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'character': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['main.Character']", 'null': 'True'}),
'damageAttribute': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'main.itemcategory': {
'Meta': {'object_name': 'ItemCategory'},
'baseSkill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.BaseSkill']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'craftPoints': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True'}),
'defaultItemSlot': ('django.db.models.fields.IntegerField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'subCategory': ('django.db.models.fields.IntegerField', [], {'max_length': '2'})
},
'main.job': {
'Meta': {'object_name': 'Job'},
'accuracyBonus': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'aptitude': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Aptitude']", 'null': 'True', 'blank': 'True', 'on_delete': 'models.PROTECT'}),
'expertiseAttribute': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'expertiseSkill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.BaseSkill']", 'null': 'True', 'blank': 'True', 'on_delete': 'models.PROTECT'}),
'hasMP': ('django.db.models.fields.BooleanField', [], {}),
'hpDie': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.ItemCategory']", 'symmetrical': 'False'}),
'maxAgility': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxMagic': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxSpeed': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxSpirit': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxStrength': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxVitality': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'mpDie': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'skillPoints': ('django.db.models.fields.IntegerField', [], {'max_length': '3'})
},
'main.monsterattack': {
'Meta': {'object_name': 'MonsterAttack'},
'actionType': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'attackType': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'bonusDamage': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'bonusDamagePercent': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'combatStat': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'combatStatModifier': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'damageAttribute': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'damageType': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isRandom': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'monster': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'monsterAttacks'", 'to': "orm['main.Character']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'statusStat': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'statusStatModifier': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'target': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'main.monsterattackmodifier': {
'Meta': {'object_name': 'MonsterAttackModifier'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifier': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.BaseMonsterAttackModifier']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'monsterAttack': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'modifiers'", 'to': "orm['main.MonsterAttack']", 'null': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'option2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'main.overviewbox': {
'Meta': {'object_name': 'OverviewBox'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'viewName': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.overviewboxsetting': {
'Meta': {'object_name': 'OverviewBoxSetting'},
'character': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'overviewBoxSettings'", 'to': "orm['main.Character']"}),
'enabled': ('django.db.models.fields.BooleanField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'overviewBox': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.OverviewBox']"}),
'sortOrder': ('django.db.models.fields.IntegerField', [], {}),
'spanFull': ('django.db.models.fields.BooleanField', [], {})
},
'main.race': {
'Meta': {'object_name': 'Race'},
'dayVision': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'hearing': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lifeSense': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'magicSense': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxAgility': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxMagic': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxSpeed': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxSpirit': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxStrength': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxVitality': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'nightVision': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'smell': ('django.db.models.fields.IntegerField', [], {'max_length': '3'})
},
'main.skill': {
'Meta': {'object_name': 'Skill'},
'baseSkill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.BaseSkill']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'character': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'skills'", 'to': "orm['main.Character']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {}),
'specialization': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
'main.trait': {
'Meta': {'object_name': 'Trait'},
'cost': ('django.db.models.fields.IntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'traitType': ('django.db.models.fields.IntegerField', [], {})
},
'main.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'canCreateMonsters': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'currentCharacter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Character']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['main']
|
|
"""
Load pp, plot and save
"""
import os, sys
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from matplotlib.colors import from_levels_and_colors
from mpl_toolkits.basemap import Basemap
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as mpl_cm
import numpy as np
import iris
import iris.coords as coords
import iris.quickplot as qplt
import iris.plot as iplt
import iris.coord_categorisation
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import datetime
from mpl_toolkits.basemap import cm
import imp
from textwrap import wrap
import re
import iris.analysis.cartography
import math
save_path='/nfs/a90/eepdw/Figures/EMBRACE/'
model_name_convert_title = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/model_name_convert_title.py')
unrotate = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/unrotate_pole.py')
pp_file1201 = '1201_mean'
pp_file2201 = '2201_mean'
pp_file3217 = '3217_mean'
pp_file3234 = '3234_mean'
pp_file='total_downward_durface_heat_flux_mean'
degs_crop_top = 1.7
degs_crop_bottom = 2.5
min_contour = -30
max_contour = 160
tick_interval=20
#
# cmap= cm.s3pcpn_l
divisor=10 # for lat/lon rounding
def main():
experiment_ids = ['djzny','djznq', 'djzns', 'djznw', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq' ]
#experiment_ids = ['djzny' ]
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
pfile1201 = '/nfs/a90/eepdw/Data/EMBRACE/Mean_State/pp_files/%s/%s/%s.pp' % (expmin1, experiment_id, pp_file1201)
pfile2201 = '/nfs/a90/eepdw/Data/EMBRACE/Mean_State/pp_files/%s/%s/%s.pp' % (expmin1, experiment_id, pp_file2201)
pfile3217 = '/nfs/a90/eepdw/Data/EMBRACE/Mean_State/pp_files/%s/%s/%s.pp' % (expmin1, experiment_id, pp_file3217)
pfile3234 = '/nfs/a90/eepdw/Data/EMBRACE/Mean_State/pp_files/%s/%s/%s.pp' % (expmin1, experiment_id, pp_file3234)
#pc = iris(pfile)
pcube1201 = iris.load_cube(pfile1201)
pcube2201 = iris.load_cube(pfile2201)
pcube3217 = iris.load_cube(pfile3217)
pcube3234 = iris.load_cube(pfile3234)
pcubetotal = pcube1201 + pcube2201 - pcube3217 - pcube3234
print pcubetotal
#print pc
# Get min and max latitude/longitude and unrotate to get min/max corners to crop plot automatically - otherwise end with blank bits on the edges
lats = pcubetotal.coord('grid_latitude').points
lons = pcubetotal.coord('grid_longitude').points
cs = pcubetotal.coord_system('CoordSystem')
if isinstance(cs, iris.coord_systems.RotatedGeogCS):
print 'Rotated CS %s' % cs
lon_low= np.min(lons)
lon_high = np.max(lons)
lat_low = np.min(lats)
lat_high = np.max(lats)
lon_corners, lat_corners = np.meshgrid((lon_low, lon_high), (lat_low, lat_high))
lon_corner_u,lat_corner_u = unrotate.unrotate_pole(lon_corners, lat_corners, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
lon_low = lon_corner_u[0,0]
lon_high = lon_corner_u[0,1]
lat_low = lat_corner_u[0,0]
lat_high = lat_corner_u[1,0]
else:
lon_low= np.min(lons)
lon_high = np.max(lons)
lat_low = np.min(lats)
lat_high = np.max(lats)
lon_low_tick=lon_low -(lon_low%divisor)
lon_high_tick=math.ceil(lon_high/divisor)*divisor
lat_low_tick=lat_low - (lat_low%divisor)
lat_high_tick=math.ceil(lat_high/divisor)*divisor
print lat_high_tick
print lat_low_tick
plt.figure(figsize=(8,8))
cmap=plt.cm.RdBu_r
ax = plt.axes(projection=ccrs.PlateCarree(), extent=(lon_low,lon_high,lat_low+degs_crop_bottom,lat_high-degs_crop_top))
clevs = np.linspace(min_contour, max_contour,256)
midpoint=0
midp = np.mean(np.c_[clevs[:-1], clevs[1:]], axis=1)
vals = np.interp(midp, [min_contour, midpoint, max_contour], [0, 0.5, 1])
cols = plt.cm.RdBu_r(vals)
clevs_extend = np.linspace(min_contour, max_contour,254)
cmap, norm = from_levels_and_colors(clevs_extend, cols, extend='both')
cont = iplt.contourf(pcubetotal, clevs, cmap=cmap, extend='both', norm=norm)
#cont = iplt.contourf(pcubetotal, cmap=cmap, extend='both')
#plt.clabel(cont, fmt='%d')
#ax.stock_img()
ax.coastlines(resolution='110m', color='#262626')
gl = ax.gridlines(draw_labels=True,linewidth=0.5, color='#262626', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
#gl.xlines = False
gl.xlocator = mticker.FixedLocator(range(int(lon_low_tick),int(lon_high_tick)+divisor,divisor))
gl.ylocator = mticker.FixedLocator(range(int(lat_low_tick),int(lat_high_tick)+divisor,divisor))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 12, 'color':'#262626'}
#gl.xlabel_style = {'color': '#262626', 'weight': 'bold'}
gl.ylabel_style = {'size': 12, 'color':'#262626'}
cbar = plt.colorbar(cont, orientation='horizontal', pad=0.05, extend='both', format = '$%d$')
cbar.set_label('$W m^{-2}$')
#cbar.set_label(pcubetotal.units, fontsize=10)
cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
cbar.set_ticklabels(['${%d}$' % i for i in ticks])
#main_title=pcubetotal.standard_name.title().replace('_',' ')
#main_title=('Total
#model_info=re.sub('(.{68} )', '\\1\n', str(model_name_convert_title.main(experiment_id)), 0, re.DOTALL)
#model_info = re.sub(r'[(\']', ' ', model_info)
#model_info = re.sub(r'[\',)]', ' ', model_info)
#print model_info
if not os.path.exists('%s%s/%s' % (save_path, experiment_id, pp_file)): os.makedirs('%s%s/%s' % (save_path, experiment_id, pp_file))
plt.savefig('%s%s/%s/%s_%s_notitle.png' % (save_path, experiment_id, pp_file, experiment_id, pp_file), format='png', bbox_inches='tight')
#plt.title('\n'.join(wrap('%s\n%s' % (main_title, model_info), 1000,replace_whitespace=False)), fontsize=16)
#plt.show()
#plt.savefig('%s%s/%s/%s_%s.png' % (save_path, experiment_id, pp_file, experiment_id, pp_file), format='png', bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
|
|
import cv2
import numpy as np
# Define color ranges in HSV with lower & upper ranges
# NOTE: HSV value range in OpenCV:
# H: 0 -> 180
# S: 0 -> 255
# V: 0 -> 255
#
# NOTE: In HSV, red wraps around the beginning and end of the hue range
#
# Major colors encompass a range of ~40 H values
# Minor colors encompass a range of ~20 H values
HSV_RANGES = {
# red is a major color
'red': [
{
'lower': np.array([0, 39, 64]),
'upper': np.array([20, 255, 255])
},
{
'lower': np.array([161, 39, 64]),
'upper': np.array([180, 255, 255])
}
],
# yellow is a minor color
'yellow': [
{
'lower': np.array([21, 39, 64]),
'upper': np.array([40, 255, 255])
}
],
# green is a major color
'green': [
{
'lower': np.array([41, 39, 64]),
'upper': np.array([80, 255, 255])
}
],
# cyan is a minor color
'cyan': [
{
'lower': np.array([81, 39, 64]),
'upper': np.array([100, 255, 255])
}
],
# blue is a major color
'blue': [
{
'lower': np.array([101, 39, 64]),
'upper': np.array([140, 255, 255])
}
],
# violet is a minor color
'violet': [
{
'lower': np.array([141, 39, 64]),
'upper': np.array([160, 255, 255])
}
],
# next are the monochrome ranges
# black is all H & S values, but only the lower 10% of V
'black': [
{
'lower': np.array([0, 0, 0]),
'upper': np.array([180, 255, 63])
}
],
# gray is all H values, lower 15% of S, & between 11-89% of V
'gray': [
{
'lower': np.array([0, 0, 64]),
'upper': np.array([180, 38, 228])
}
],
# white is all H values, lower 15% of S, & upper 10% of V
'white': [
{
'lower': np.array([0, 0, 229]),
'upper': np.array([180, 38, 255])
}
]
}
def find_regions(
src_img,
target_img,
bg_colors=None,
pre_erode=0,
dilate=2,
min_area=0.5,
max_area=2.0
):
"""
Finds regions in source image that are similar to the target image.
Args:
src_img: 3-D NumPy array of pixels in HSV (source image)
target_img: 3-D NumPy array of pixels in HSV (target image)
bg_colors: list of color names to use for background colors, if
None the dominant color in the source image will be used
pre_erode: # of erosion iterations performed on masked image
prior to any dilation iterations
dilate: # of dilation iterations performed on masked image
min_area: minimum area cutoff percentage (compared to target image)
for returning matching sub-regions
max_area: maximum area cutoff percentage (compared to target image)
for returning matching sub-regions
Returns:
Sub-region mask as 2-D NumPy array (unsigned 8-bit integers) with the
same width and height as the source image. Matching sub-regions have
pixel values of 255 and non-matching pixels are 0.
Raises:
tbd
"""
# if no bg colors are specified, determine dominant color range
# for the 'background' in the source image
if bg_colors is None:
bg_colors = [find_dominant_color(src_img)]
# determine # of pixels of each color range found in the target
target_color_profile = get_color_profile(target_img)
# find common color ranges in target (excluding the bg_colors)
feature_colors = get_common_colors(target_color_profile, bg_colors)
# create masks from feature colors
mask = create_mask(src_img, feature_colors)
target_mask = create_mask(target_img, feature_colors)
# define kernel used for erosion & dilation
kernel = np.ones((3, 3), np.uint8)
# erode masks
mask = cv2.erode(mask, kernel, iterations=pre_erode)
target_mask = cv2.erode(target_mask, kernel, iterations=pre_erode)
# dilate masks
mask = cv2.dilate(mask, kernel, iterations=dilate)
target_mask = cv2.dilate(target_mask, kernel, iterations=dilate)
# fill holes in mask using contours
mask = fill_holes(mask)
target_mask = fill_holes(target_mask)
# select largest blob from target mask
target_mask = filter_largest_blob(target_mask)
# determine target mask area
feature_area = np.sum(target_mask) / 255
# remove contours below min_area and above max_area
min_pixels = int(feature_area * min_area)
max_pixels = int(feature_area * max_area)
contours = filter_blobs_by_size(mask, min_pixels, max_pixels)
# return contours
return contours
def find_dominant_color(hsv_img):
"""
Finds dominant color in given HSV image array
Args:
hsv_img: HSV pixel data (3-D NumPy array)
Returns:
Text string for dominant color range (from HSV_RANGES keys)
Raises:
tbd
"""
color_profile = get_color_profile(hsv_img)
dominant_color = max(color_profile, key=lambda k: color_profile[k])
return dominant_color
def get_color_profile(hsv_img):
"""
Finds color profile as pixel counts for color ranges in HSV_RANGES
Args:
hsv_img: HSV pixel data (3-D NumPy array)
Returns:
Text string for dominant color range (from HSV_RANGES keys)
Raises:
tbd
"""
h, s, v = get_hsv(hsv_img)
color_profile = {}
for color, color_ranges in HSV_RANGES.items():
color_profile[color] = 0
for color_range in color_ranges:
pixel_count = np.sum(
np.logical_and(
h >= color_range['lower'][0],
h <= color_range['upper'][0]
) &
np.logical_and(
s >= color_range['lower'][1],
s <= color_range['upper'][1]
) &
np.logical_and(
v >= color_range['lower'][2],
v <= color_range['upper'][2]
)
)
color_profile[color] += pixel_count
return color_profile
def get_hsv(hsv_img):
"""
Returns flattened hue, saturation, and values from given HSV image.
"""
hue = hsv_img[:, :, 0].flatten()
sat = hsv_img[:, :, 1].flatten()
val = hsv_img[:, :, 2].flatten()
return hue, sat, val
def get_common_colors(color_profile, bg_colors, prevalence=0.1):
"""
Finds colors in a color profile (excluding bg colors) that exceed prevalence
"""
total = sum(color_profile.values())
for bg_color in bg_colors:
total -= color_profile[bg_color]
common_colors = []
for color, count in color_profile.items():
if color in bg_colors:
continue
if count > prevalence * total:
common_colors.append(color)
return common_colors
def create_mask(hsv_img, colors):
"""
Creates a binary mask from HSV image using given colors.
"""
mask = np.zeros((hsv_img.shape[0], hsv_img.shape[1]), dtype=np.uint8)
for color in colors:
for color_range in HSV_RANGES[color]:
mask += cv2.inRange(
hsv_img,
color_range['lower'],
color_range['upper']
)
return mask
def fill_holes(mask):
"""
Fills holes in a given binary mask.
"""
ret, thresh = cv2.threshold(mask, 1, 255, cv2.THRESH_BINARY)
new_mask, contours, hierarchy = cv2.findContours(
thresh,
cv2.RETR_CCOMP,
cv2.CHAIN_APPROX_SIMPLE
)
for cnt in contours:
cv2.drawContours(new_mask, [cnt], 0, 255, -1)
return new_mask
def filter_largest_blob(mask):
"""
Filters a given binary mask for the largest blob
"""
ret, thresh = cv2.threshold(mask, 1, 255, cv2.THRESH_BINARY)
mask, contours, hierarchy = cv2.findContours(
thresh,
cv2.RETR_CCOMP,
cv2.CHAIN_APPROX_SIMPLE
)
max_size = 0
max_contour = None
new_mask = np.zeros(mask.shape, dtype=mask.dtype)
for c in contours:
c_area = cv2.contourArea(c)
if c_area > max_size:
max_size = c_area
max_contour = c
cv2.drawContours(new_mask, [max_contour], 0, 255, -1)
return new_mask
def filter_blobs_by_size(mask, min_pixels, max_pixels):
"""
Filters a given binary mask keeping blobs within a min & max size
"""
ret, thresh = cv2.threshold(mask, 1, 255, cv2.THRESH_BINARY)
new_mask, contours, hierarchy = cv2.findContours(
thresh,
cv2.RETR_CCOMP,
cv2.CHAIN_APPROX_SIMPLE
)
good_contours = []
for c in contours:
c_area = cv2.contourArea(c)
if min_pixels <= c_area <= max_pixels:
good_contours.append(c)
return good_contours
|
|
# script to publish a level
import urlparse, urllib, datetime, os.path, hashlib, sys, getpass, uuid, zipfile
import xml.etree.ElementTree as ET
# usage:
# python2.7 publish.py <rss_feed> <level_dir>
# The script expects a manifest.xml file in the level dir, that is a stripped-down item entry for the rss feed (i.e. minus the automatically generated parts). If no manifest exists a template will be generated,
# and if no rss feed exists a template for that will be generated as well.
# Both should be edited before publishing. You need to change the descriptions, names and urls. You can change the guid as well if you don't want a random one, but it should stay unchanged after you publish a
# level.
# You will then have to upload the resulting .rss and .zip files to your web space.
RFC822format = "%a, %d %b %Y %H:%M:%S +0000"
# elementtree should really get a built-in pretty-print
def indent(elem, level=0):
i = "\n" + level * "\t"
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + "\t"
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def main(*argv):
utcnow = datetime.datetime.utcnow().strftime(RFC822format)
rssPath = os.path.abspath(argv[1])
levelPath = os.path.abspath(argv[2])
ET.register_namespace("media", "http://search.yahoo.com/mrss/") # requires Python2.7. Sorry.
# check if we need to publish a directory or a zip file
if os.path.isdir(levelPath):
# level pack is a directory
# minimal sanity check to detect a hurrican level. I could probably check that all levels referenced in levellist and all textures are present.
if not os.path.exists(os.path.join(levelPath, "levellist.dat")):
print "Not a hurrican level at %s" % levelPath
return
manifestPath = os.path.join(levelPath, "manifest.xml")
# process manifest or create template
dirty = False
if os.path.exists(manifestPath):
print "Reading manifest %s ..." % os.path.basename(manifestPath)
item = ET.parse(manifestPath).getroot()
else:
print "Creating manifest %s ..." % os.path.basename(manifestPath)
item = ET.Element("item")
dirty = True
title = item.find("title")
if title == None:
title = ET.SubElement(item, "title").text = "name of your level"
# title for level base
dirty = True
link = item.find("link")
if link == None:
link = ET.SubElement(item, "link").text = "http://link.to.your.website/#level_description"
# link to further information about your level
dirty = True
description = item.find("description")
if description == None:
description = ET.SubElement(item, "description").text = "short Description of your level"
# link to further information about your level
dirty = True
credit = item.find("{http://search.yahoo.com/mrss/}credit")
if credit == None:
credit = ET.SubElement(item, "{http://search.yahoo.com/mrss/}credit", role="author").text = getpass.getuser()
# Your name. You probably want to change it to something more different from your login name.
dirty = True
guid = item.find("guid")
if guid == None:
guid = ET.SubElement(item, "guid").text = uuid.uuid4().urn
# This is assigned once and should not change over the lifetime of a level.
# If you don't want a random guid you can change to something else, but it must be unique! You shouldn't change it after publishing a level to the world, because it may confuse downloaders!
dirty = True
enclosure = item.find("enclosure")
if enclosure == None:
zipPath = levelPath + ".zip"
zipName = os.path.basename(zipPath)
enclosure = ET.SubElement(item, "enclosure", type="application/x-hurrican-level", url=urlparse.urljoin("http://link.to.your.website/", zipName))
# url of where you'll upload the zip file of the level to. You're free to rename the zip file.
dirty = True
else:
enclosure.set("type", "application/x-hurrican-level")
zipRemote = urllib.unquote(urlparse.urlparse(enclosure.get("url")).path)
zipName = os.path.basename(zipRemote)
zipPath = os.path.join(os.path.dirname(levelPath), zipName);
if dirty:
print "Writing manifest %s ..." % os.path.basename(manifestPath)
print "You need to modify it before publishing it."
indent(item) # not really required, but nice for debugging
tree = ET.ElementTree(item)
tree.write(manifestPath, encoding="utf-8", xml_declaration=True)
# create zip file
print "Creating zip file %s ..." % os.path.basename(zipPath)
if os.path.exists(zipPath):
os.rename(zipPath, zipPath + "~")
zip = zipfile.ZipFile(zipPath, "w", zipfile.ZIP_DEFLATED, True)
for f in os.listdir(levelPath):
if f[0] != ".": # exclude hidden files, mostly SCM or .DS_Store files.
zip.write(os.path.join(levelPath, f), f)
zip.close()
else:
# level pack is a zip file
zipPath = levelPath
print "Opening zip %s ..." % zipPath
zip = zipfile.ZipFile(zipPath)
try:
zip.getinfo("levellist.dat")
except KeyError:
print "the archive doesn't seem to contain a hurrican level"
return
# we assume that the manifest is mostly correct. It would be possible to write one back into the zip, but then the user
# would have to unpack the zip for verification/correction.
print "Reading manifest %s ..." % "manifest.xml"
f = zip.open("manifest.xml")
item = ET.parse(f).getroot()
f.close()
enclosure = item.find("enclosure")
guid = item.find("guid")
print "closing zip %s ..." % zipPath
zip.close()
# get hash digest and length:
print "Calculating sha-1 hash ..."
f = open(zipPath, "rb")
data = f.read()
f.close()
sha1 = hashlib.sha1(data).hexdigest()
# store in item description
enclosure.set("length", str(len(data)))
hash = item.find("{http://search.yahoo.com/mrss/}hash")
if hash == None:
hash = ET.SubElement(item, "{http://search.yahoo.com/mrss/}hash", algo="sha-1").text = sha1
else:
hash.set("algo", "sha-1")
hash.text = sha1
pubDate = item.find("pubDate")
if pubDate == None:
pubDate = ET.SubElement(item, "pubDate").text = utcnow
else:
pubDate.text = utcnow
# process rss feed or create template
# The channel header is ediable, but items may be replaced.
if os.path.exists(rssPath):
print "Reading feed %s ..." % os.path.basename(rssPath)
rss = ET.parse(rssPath).getroot()
else:
print "Creating feed %s ..." % os.path.basename(rssPath)
print "You need to modify it before publishing it."
rss = ET.Element("rss", version="2.0")
channel = rss.find("channel")
if channel == None:
channel = ET.SubElement(rss, "channel")
title = channel.find("title")
if title == None:
title = ET.SubElement(channel, "title").text = "name of your feed"
# title for your level feed. Maybe something like "Jonny's cool levels".
link = channel.find("link")
if link == None:
link = ET.SubElement(channel, "link").text = "http://link.to.your.website/"
# link to website with further information about your levels.
description = channel.find("description")
if description == None:
description = ET.SubElement(channel, "description").text = "description of your feed"
# a short description of your level feed. What about "Cool levels for Hurrican!!!"?
lastBuildDate = channel.find("lastBuildDate")
ttl = channel.find("ttl")
if ttl == None:
ttl = ET.SubElement(channel, "ttl").text = "%i" % (7 * 24 * 60)
# time in minutes until the next check
# this is not actually checked by the hurrican launcher at the moment, but could be used to cut down traffic to the rss feed in the future.
# update/add build date
if lastBuildDate == None:
lastBuildDate = ET.SubElement(channel, "lastBuildDate")
pubDate = channel.find("pubDate")
if pubDate == None:
pubDate = ET.SubElement(channel, "pubDate")
lastBuildDate.text = utcnow
pubDate.text = utcnow
# find if we need to remove an old item
itemlist = channel.findall("item")
for feedItem in itemlist:
feedGuid = feedItem.find("guid")
if feedGuid != None:
if feedGuid.text == guid.text:
channel.remove(feedItem)
# and append the new item
channel.append(item)
# wrap it in an ElementTree instance and save as XML
print "Writing feed %s ..." % os.path.basename(rssPath)
indent(rss) # not really required, but nice for debugging
tree = ET.ElementTree(rss)
tree.write(rssPath, encoding="utf-8", xml_declaration=True)
print "Done!"
if __name__=="__main__":
main(*sys.argv)
|
|
########
##
## The arena which runs tournaments of bots
##
########
import random
import bot_player as bp
import tournament_results as tr
import morality_calculator as mc
class Arena(object):
"""
Hosts tournaments of bots
"""
def __init__(self):
pass
def generate_interaction_lengths(self, w, numMeetings):
"""
Based on a probability of continuing each step, generate
interaction lengths for the bot pairs
ARGS:
- w: probability of interaction continuing at each step
- numMeetings: number of interaction_lengths needed to be
generated
RETURNS:
- interaction_lengths: a list of integers representing how
long each meeting between bots will be (if the list is n
long, it is because each bot pair meets n times)
"""
interaction_lengths = []
i = 0
while i < numMeetings:
meeting_length = 1
while True:
r = random.random()
if r > w:
break
else:
meeting_length += 1
interaction_lengths.append(meeting_length)
i += 1
return interaction_lengths
def bot_interaction(self, bot1, bot2, interaction_length,
payoffs={'T': 5,'R': 3,'P': 1,'S': 0}, w=0.995):
"""
Two bots paired together interacting
ARGS:
- bot1, bot2: instances of BotPlayer (presumably subclasses
of BotPlayer), representing the two participating bots
- interaction_length: how many turns bot1 and bot2 play in
this interaction
RETURNS:
- past_moves: list of every move that occurred during the
interaction
"""
past_moves_1 = []
past_moves_2 = []
i = 0
while i < interaction_length:
bot1_move = bot1.getNextMove(past_moves_1,
payoffs=payoffs, w=w)
bot2_move = bot2.getNextMove(past_moves_2,
payoffs=payoffs, w=w)
next_moves_1 = (bot1_move, bot2_move)
next_moves_2 = (bot2_move, bot1_move)
past_moves_1.append(next_moves_1)
past_moves_2.append(next_moves_2)
i += 1
return past_moves_1
def validate_tournament_inputs(self, botList, numMeetings, payoffs, w):
"""
Make sure the inputs to runTournament make sense and if they do not,
say why in the list 'errors'
ARGS:
- botList: list of bots to participate in the tournament
- w: probability of interaction continuing at each step
- numMeetings: number of times each bot is paired with each
other bot
- payoffs: defines the scores for each Prisoner's Dilemma situation
RETURNS:
- errors: list or error messages to let the user know what is wrong
with the inputs, if anything
"""
errors = []
# botList has to be a list of BotPlayer instances
for bot in botList:
if not isinstance(bot, bp.BotPlayer):
errors.append("botList must be a list of BotPlayer objects")
break
if int(numMeetings) != numMeetings:
errors.append("numMeetings must represent an integer")
if numMeetings < 1:
errors.append("numMeetings must be at least 1")
if not (payoffs['T'] > payoffs['R'] > payoffs['P'] > payoffs['S']):
errors.append("payoffs must obey T > R > P > S")
if not (2*payoffs['R'] > payoffs['T'] + payoffs['S']):
errors.append("payoffs must obey 2*R > T + S")
if not (0 < w < 1):
errors.append("w must be a number between 0 and 1")
return errors
def runTournament(self, botList, numMeetings,
payoffs={'T':5,'R':3,'P':1,'S':0}, w=0.995):
"""
Main method, partners each bot with each other bot with
w probability of ending each turn (length of interactions
is determined (using w) before any pairings, so all
pairings use the same list of interaction lengths)
ARGS:
- botList: list of bots to participate in the tournament
- w: probability of interaction continuing at each step
- numMeetings: number of times each bot is paired with each
other bot
- payoffs: defines the scores for each Prisoner's Dilemma situation
RETURNS:
- tourney_res: TournamentResults object with all the info
"""
# validate inputs
error_messages =\
self.validate_tournament_inputs(botList, numMeetings, payoffs, w)
if error_messages:
print(error_messages)
return -1
# dictionary of interactions to pass to TournamentResults
interactions = {}
# determine length of each interaction based on w
interaction_lengths =\
self.generate_interaction_lengths(w, numMeetings)
# assign each bot a tournament id number
for t_id, bot in enumerate(botList):
bot.tournament_id = t_id
# pair each bot with each other bot and save the results
num_bots = len(botList)
for i in xrange(num_bots):
for j in xrange(i, num_bots):
bot1 = botList[i]
bot2 = botList[j]
meeting_results_list = []
for m in xrange(numMeetings):
interaction_length = interaction_lengths[m]
meeting_results =\
self.bot_interaction(bot1, bot2, interaction_length,\
payoffs=payoffs, w=w)
meeting_results_list.append(meeting_results)
interactions[(bot1.tournament_id, bot2.tournament_id)] =\
meeting_results_list
tourney_res = tr.TournamentResults(botList, interactions, payoffs)
return tourney_res
## TODO: add capability for error/noise
## TODO: extend to ecological (evolutionary) environment
if __name__ == "__main__":
import the_bots
a = Arena()
#----------#
num_meetings = 5
b1 = the_bots.ALL_D()
b2 = the_bots.ALL_C()
b3 = the_bots.RANDOM(p_cooperate=0.5)
b4 = the_bots.PAVLOV()
b5 = the_bots.TIT_FOR_TAT()
b6 = the_bots.TIT_FOR_TWO_TATS()
b7 = the_bots.TWO_TITS_FOR_TAT()
b8 = the_bots.SUSPICIOUS_TIT_FOR_TAT()
b9 = the_bots.GENEROUS_TIT_FOR_TAT(p_generous=0.1)
b10 = the_bots.GENEROUS_TIT_FOR_TAT(p_generous=0.3)
b11 = the_bots.JOSS(p_sneaky=0.1)
b12 = the_bots.JOSS(p_sneaky=0.3)
b13 = the_bots.MAJORITY(soft=True)
b14 = the_bots.MAJORITY(soft=False)
b15 = the_bots.TESTER()
b16 = the_bots.FRIEDMAN()
b17 = the_bots.EATHERLY()
b18 = the_bots.CHAMPION()
b19 = the_bots.RANDOM(p_cooperate=0.8)
b20 = the_bots.RANDOM(p_cooperate=0.2)
bot_list = [b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13,\
b14, b15, b16, b17, b18, b19, b20]
t = a.runTournament(bot_list, num_meetings)
print(t)
mc = mc.MoralityCalculator(t)
print(mc)
#----------#
|
|
from .models import Users
from .conftest import USER_CREDENTIALS
import pytest
PRIVATE_ROUTES = [
('/search'),
('/add/{name}/{id}'),
('/delete/{sym}'),
('/portfolio'),
('/details/{sym}'),
]
PUBLIC_ROUTES = [
('/login'),
('/about'),
('/new_user')
]
@pytest.mark.parametrize('route', PUBLIC_ROUTES)
def test_login_view_is_public(route, app):
'''Test that unathurized user can access public routes.'''
response = app.get(route, status='2*')
assert response.status_code == 200
@pytest.mark.parametrize('route', PRIVATE_ROUTES)
def test_no_access_to_private_views_if_no_auth(route, app):
"""Test that unathurized user can't access private routes."""
response = app.get(route, status='4*')
assert response.status_code == 403
def test_user_db_exists(new_session):
'''Tests that the use database is being setup'''
assert len(new_session.query(Users).all()) == 0
def test_user_gets_added_to_db(new_session):
'''Tests that a use can be added to the database'''
user = Users(username=USER_CREDENTIALS['username'], pass_hash='hashiehas')
new_session.add(user)
new_session.flush()
assert len(new_session.query(Users).all()) == 1
def test_login_correct_user_info(app_and_csrf_token, populated_db):
'''Checks the POST form submital for correct user login data'''
app, token = app_and_csrf_token
auth_data = {'username': 'fake',
'password': 'fake',
'csrf_token': token}
response = app.post('/login', auth_data, status='3*')
assert response.status_code == 302
def test_login_bad_user_info(app_and_csrf_token, populated_db):
'''Checks the POST form submital for correct user login data'''
app, token = app_and_csrf_token
auth_data = {'username': 'fake',
'password': 'not the correct password',
'csrf_token': token}
response = app.post('/login', auth_data, status='2*')
assert b"Username or Password Not Recognized" in response.body
def test_logout(app):
'''Tests that the logout page re-routes the user'''
response = app.get('/logout', status="3*")
assert response.status_code == 302
def test_home_public(app):
'''Test that the home view is publicly accessable'''
response = app.get('/', status="3*")
assert response.status_code == 302
def test_home_rederects_to_portfolio(auth_app, populated_db):
'''Tests that the "/" route re-routes the user'''
response = auth_app.get('/', status='3*')
assert response.status_code == 302
def test_admin_is_private(app):
'''Test that the admin page is not accessable if you are not logged in'''
response = app.get('/admin', status='4*')
assert response.status_code == 403
def test_admin_accessable_to_adim(admin_app, populated_db_admin):
'''Testing the admin view is accessable to admin users'''
app, token = admin_app
response = app.get('/admin', status='2*')
assert response.status_code == 200
def test_admin_not_accessable_to_non_admin(auth_app, populated_db):
'''Testing that the admin view is not accessable to non-admin'''
response = auth_app.get('/admin', status='4*')
assert response.status_code == 403
def test_admin_delet_post_request_csrf(admin_app, populated_db):
'''Checks admin view POST for correct CSRF Token'''
app, token = admin_app
auth_data = {'username': 'fake',
'csrf_token': token}
response = app.post('/admin', auth_data, status='2*')
assert response.status_code == 200
def test_new_user_is_public(app):
'''Checking that the new_user view is public'''
response = app.get('/new_user', status="2*")
assert response.status_code == 200
def test_new_user_post_missing_field(app_and_csrf_token, populated_db):
'''Checks the POST form missing fields'''
app, token = app_and_csrf_token
auth_data = {'username': '',
'password': 'fdsafdsa',
'password_verify': 'fdsafda',
'last_name': 'Smith',
'first_name': 'Zeek',
'email': 'fdsa@fdafd',
'phone_number': '43232423423',
'csrf_token': token}
response = app.post('/new_user', auth_data, status='2*')
assert b"Missing Required Fields" in response.body
def test_new_user_post_passwords_not_match(app_and_csrf_token, populated_db):
'''Checks the POST form missing fields'''
app, token = app_and_csrf_token
auth_data = {'username': 'jkfdajkfda',
'password': 'fakefakefake',
'password_verify': 'fdsafda',
'last_name': 'Smith',
'first_name': 'Zeek',
'email': 'fdsa@fdafd',
'phone_number': '43232423423',
'csrf_token': token}
response = app.post('/new_user', auth_data, status='2*')
assert b'Passwords do not match or password'\
b' is less than 6 characters' in response.body
def test_new_user_post_short_password(app_and_csrf_token, populated_db):
'''Checks the POST form missing fields'''
app, token = app_and_csrf_token
auth_data = {'username': 'jkfdajkfda',
'password': 'fake',
'password_verify': 'fake',
'last_name': 'Smith',
'first_name': 'Zeek',
'email': 'fdsa@fdafd',
'phone_number': '43232423423',
'csrf_token': token}
response = app.post('/new_user', auth_data, status='2*')
assert b'Passwords do not match or password'\
b' is less than 6 characters' in response.body
def test_new_user_post_user_name_exists(app_and_csrf_token, populated_db):
'''Checks the POST form missing fields'''
app, token = app_and_csrf_token
auth_data = {'username': 'fake',
'password': 'fakefake',
'password_verify': 'fakefake',
'last_name': 'Smith',
'first_name': 'Zeek',
'email': 'fdsa@fdafd',
'phone_number': '43232423423',
'csrf_token': token}
response = app.post('/new_user', auth_data, status='2*')
assert b'already exists.' in response.body
def test_new_user_post_valid(app_and_csrf_token, populated_db):
'''Checks the POST form missing fields'''
app, token = app_and_csrf_token
auth_data = {'username': 'fdsafdsafdsafsa',
'password': 'fakefake',
'password_verify': 'fakefake',
'last_name': 'Smith',
'first_name': 'Zeek',
'email': 'fdsa@fdafd',
'phone_number': '43232423423',
'csrf_token': token}
response = app.post('/new_user', auth_data, status='3*')
assert response.status_code == 302
def test_admin_delete_user_returns_message(admin_app, populated_db):
'''
Checks admin view DELETE button POST returns to admin with
conformation method
'''
app, token = admin_app
auth_data = {'username': 'fake',
'csrf_token': token}
response = app.post('/admin', auth_data, status='2*')
assert b'Are you sure you want to delete user fake?' in response.body
def test_admin_cancel_delete_user_returns_message(admin_app, populated_db):
'''
Checks admin view CANCEL button POST returns to admin without
conformation method
'''
app, token = admin_app
auth_data = {'username': 'CANCEL',
'csrf_token': token}
response = app.post('/admin', auth_data, status='2*')
assert b'Are you sure you want to delete user?' not in response.body
|
|
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import itertools
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import exceptions as lib_exc
from six import add_metaclass
from six import with_metaclass
from sqlalchemy import and_
from neutron._i18n import _
from neutron.common import exceptions as n_exc
from neutron.db import _utils as db_utils
from neutron.db import rbac_db_mixin
from neutron.db import rbac_db_models as models
from neutron.extensions import rbac as ext_rbac
from neutron.objects import base
from neutron.objects.db import api as obj_db_api
@add_metaclass(abc.ABCMeta)
class RbacNeutronDbObjectMixin(rbac_db_mixin.RbacPluginMixin,
base.NeutronDbObject):
rbac_db_cls = None
@classmethod
@abc.abstractmethod
def get_bound_tenant_ids(cls, context, obj_id):
"""Returns ids of all tenants depending on this db object.
Has to be implemented by classes using RbacNeutronMetaclass.
The tenants are the ones that need the sharing or 'visibility' of the
object to them. E.g: for QosPolicy that would be the tenants using the
Networks and Ports with the shared QosPolicy applied to them.
:returns: set -- a set of tenants' ids dependent on this object.
"""
@staticmethod
def is_network_shared(context, rbac_entries):
# NOTE(korzen) this method is copied from db_base_plugin_common.
# The shared attribute for a network now reflects if the network
# is shared to the calling tenant via an RBAC entry.
matches = ('*',) + ((context.tenant_id,) if context else ())
for entry in rbac_entries:
if (entry.action == models.ACCESS_SHARED and
entry.target_tenant in matches):
return True
return False
@staticmethod
def get_shared_with_tenant(context, rbac_db_cls, obj_id, tenant_id):
# NOTE(korzen) This method enables to query within already started
# session
rbac_db_model = rbac_db_cls.db_model
return (db_utils.model_query(context, rbac_db_model).filter(
and_(rbac_db_model.object_id == obj_id,
rbac_db_model.action == models.ACCESS_SHARED,
rbac_db_model.target_tenant.in_(
['*', tenant_id]))).count() != 0)
@classmethod
def is_shared_with_tenant(cls, context, obj_id, tenant_id):
ctx = context.elevated()
with cls.db_context_reader(ctx):
return cls.get_shared_with_tenant(ctx, cls.rbac_db_cls,
obj_id, tenant_id)
@classmethod
def is_accessible(cls, context, db_obj):
return (super(
RbacNeutronDbObjectMixin, cls).is_accessible(context, db_obj) or
cls.is_shared_with_tenant(context, db_obj.id,
context.tenant_id))
@classmethod
def _get_db_obj_rbac_entries(cls, context, rbac_obj_id, rbac_action):
rbac_db_model = cls.rbac_db_cls.db_model
return db_utils.model_query(context, rbac_db_model).filter(
and_(rbac_db_model.object_id == rbac_obj_id,
rbac_db_model.action == rbac_action))
@classmethod
def _get_tenants_with_shared_access_to_db_obj(cls, context, obj_id):
rbac_db_model = cls.rbac_db_cls.db_model
return set(itertools.chain.from_iterable(context.session.query(
rbac_db_model.target_tenant).filter(
and_(rbac_db_model.object_id == obj_id,
rbac_db_model.action == models.ACCESS_SHARED,
rbac_db_model.target_tenant != '*'))))
@classmethod
def _validate_rbac_policy_delete(cls, context, obj_id, target_tenant):
ctx_admin = context.elevated()
rb_model = cls.rbac_db_cls.db_model
bound_tenant_ids = cls.get_bound_tenant_ids(ctx_admin, obj_id)
db_obj_sharing_entries = cls._get_db_obj_rbac_entries(
ctx_admin, obj_id, models.ACCESS_SHARED)
def raise_policy_in_use():
raise ext_rbac.RbacPolicyInUse(
object_id=obj_id,
details='tenant_id={}'.format(target_tenant))
if target_tenant != '*':
# if there is a wildcard rule, we can return early because it
# shares the object globally
wildcard_sharing_entries = db_obj_sharing_entries.filter(
rb_model.target_tenant == '*')
if wildcard_sharing_entries.count():
return
if target_tenant in bound_tenant_ids:
raise_policy_in_use()
return
# for the wildcard we need to query all of the rbac entries to
# see if any allow the object sharing
other_target_tenants = cls._get_tenants_with_shared_access_to_db_obj(
ctx_admin, obj_id)
if not bound_tenant_ids.issubset(other_target_tenants):
raise_policy_in_use()
@classmethod
def validate_rbac_policy_delete(cls, resource, event, trigger, context,
object_type, policy, **kwargs):
"""Callback to handle RBAC_POLICY, BEFORE_DELETE callback.
:raises: RbacPolicyInUse -- in case the policy is in use.
"""
if policy['action'] != models.ACCESS_SHARED:
return
target_tenant = policy['target_tenant']
db_obj = obj_db_api.get_object(
cls, context.elevated(), id=policy['object_id'])
if db_obj.tenant_id == target_tenant:
return
cls._validate_rbac_policy_delete(context=context,
obj_id=policy['object_id'],
target_tenant=target_tenant)
@classmethod
def validate_rbac_policy_update(cls, resource, event, trigger, context,
object_type, policy, **kwargs):
"""Callback to handle RBAC_POLICY, BEFORE_UPDATE callback.
:raises: RbacPolicyInUse -- in case the update is forbidden.
"""
prev_tenant = policy['target_tenant']
new_tenant = kwargs['policy_update']['target_tenant']
if prev_tenant == new_tenant:
return
if new_tenant != '*':
return cls.validate_rbac_policy_delete(
resource, event, trigger, context, object_type, policy)
@classmethod
def validate_rbac_policy_change(cls, resource, event, trigger, context,
object_type, policy, **kwargs):
"""Callback to validate RBAC_POLICY changes.
This is the dispatching function for create, update and delete
callbacks. On creation and update, verify that the creator is an admin
or owns the resource being shared.
"""
# TODO(hdaniel): As this code was shamelessly stolen from
# NeutronDbPluginV2.validate_network_rbac_policy_change(), those pieces
# should be synced and contain the same bugs, until Network RBAC logic
# (hopefully) melded with this one.
if object_type != cls.rbac_db_cls.db_model.object_type:
return
db_obj = obj_db_api.get_object(
cls, context.elevated(), id=policy['object_id'])
if event in (events.BEFORE_CREATE, events.BEFORE_UPDATE):
if (not context.is_admin and
db_obj['tenant_id'] != context.tenant_id):
msg = _("Only admins can manipulate policies on objects "
"they do not own")
raise lib_exc.InvalidInput(error_message=msg)
callback_map = {events.BEFORE_UPDATE: cls.validate_rbac_policy_update,
events.BEFORE_DELETE: cls.validate_rbac_policy_delete}
if event in callback_map:
return callback_map[event](resource, event, trigger, context,
object_type, policy, **kwargs)
def attach_rbac(self, obj_id, project_id, target_tenant='*'):
obj_type = self.rbac_db_cls.db_model.object_type
rbac_policy = {'rbac_policy': {'object_id': obj_id,
'target_tenant': target_tenant,
'project_id': project_id,
'object_type': obj_type,
'action': models.ACCESS_SHARED}}
return self.create_rbac_policy(self.obj_context, rbac_policy)
def update_shared(self, is_shared_new, obj_id):
admin_context = self.obj_context.elevated()
shared_prev = obj_db_api.get_object(self.rbac_db_cls, admin_context,
object_id=obj_id,
target_tenant='*',
action=models.ACCESS_SHARED)
is_shared_prev = bool(shared_prev)
if is_shared_prev == is_shared_new:
return
# 'shared' goes False -> True
if not is_shared_prev and is_shared_new:
self.attach_rbac(obj_id, self.obj_context.tenant_id)
return
# 'shared' goes True -> False is actually an attempt to delete
# rbac rule for sharing obj_id with target_tenant = '*'
self._validate_rbac_policy_delete(self.obj_context, obj_id, '*')
return self.obj_context.session.delete(shared_prev)
def _update_post(self, obj_changes):
if "shared" in obj_changes:
self.update_shared(self.shared, self.id)
def _update_hook(self, update_orig):
with self.db_context_writer(self.obj_context):
# NOTE(slaweq): copy of object changes is required to pass it later to
# _update_post method because update() will reset all those changes
obj_changes = self.obj_get_changes()
update_orig(self)
_update_post(self, obj_changes)
def _create_post(self):
if self.shared:
self.attach_rbac(self.id, self.project_id)
def _create_hook(self, orig_create):
with self.db_context_writer(self.obj_context):
orig_create(self)
_create_post(self)
def _to_dict_hook(self, to_dict_orig):
dct = to_dict_orig(self)
if self.obj_context:
dct['shared'] = self.is_shared_with_tenant(self.obj_context,
self.id,
self.obj_context.tenant_id)
else:
# most OVO objects on an agent will not have a context set on the
# object because they will be generated from obj_from_primitive.
dct['shared'] = False
return dct
class RbacNeutronMetaclass(type):
"""Adds support for RBAC in NeutronDbObjects.
Injects code for CRUD operations and modifies existing ops to do so.
"""
@classmethod
def _get_attribute(mcs, attribute_name, bases):
for b in bases:
attribute = getattr(b, attribute_name, None)
if attribute:
return attribute
@classmethod
def get_attribute(mcs, attribute_name, bases, dct):
return (dct.get(attribute_name, None) or
mcs._get_attribute(attribute_name, bases))
@classmethod
def update_synthetic_fields(mcs, bases, dct):
if not dct.get('synthetic_fields', None):
synthetic_attr = mcs.get_attribute('synthetic_fields', bases, dct)
dct['synthetic_fields'] = synthetic_attr or []
if 'shared' in dct['synthetic_fields']:
raise n_exc.ObjectActionError(
action=_('shared attribute switching to synthetic'),
reason=_('already a synthetic attribute'))
dct['synthetic_fields'].append('shared')
@staticmethod
def subscribe_to_rbac_events(class_instance):
for e in (events.BEFORE_CREATE, events.BEFORE_UPDATE,
events.BEFORE_DELETE):
registry.subscribe(class_instance.validate_rbac_policy_change,
resources.RBAC_POLICY, e)
@staticmethod
def validate_existing_attrs(cls_name, dct):
if 'shared' not in dct['fields']:
raise KeyError(_('No shared key in %s fields') % cls_name)
if 'rbac_db_cls' not in dct:
raise AttributeError(_('rbac_db_cls not found in %s') % cls_name)
@staticmethod
def get_replaced_method(orig_method, new_method):
def func(self):
return new_method(self, orig_method)
return func
@classmethod
def replace_class_methods_with_hooks(mcs, bases, dct):
methods_replacement_map = {'create': _create_hook,
'update': _update_hook,
'to_dict': _to_dict_hook}
for orig_method_name, new_method in methods_replacement_map.items():
orig_method = mcs.get_attribute(orig_method_name, bases, dct)
hook_method = mcs.get_replaced_method(orig_method,
new_method)
dct[orig_method_name] = hook_method
def __new__(mcs, name, bases, dct):
mcs.validate_existing_attrs(name, dct)
mcs.update_synthetic_fields(bases, dct)
mcs.replace_class_methods_with_hooks(bases, dct)
cls = type(name, (RbacNeutronDbObjectMixin,) + bases, dct)
cls.add_extra_filter_name('shared')
mcs.subscribe_to_rbac_events(cls)
return cls
NeutronRbacObject = with_metaclass(RbacNeutronMetaclass, base.NeutronDbObject)
|
|
# Copyright 2013 Mirantis Inc.
# Copyright 2013 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import unittest
from unittest import mock
import django
from django import http
from django.urls import reverse
from horizon import exceptions
from openstack_auth import policy
from openstack_dashboard import api as dash_api
from troveclient import common
from trove_dashboard import api
from trove_dashboard.content.databases import forms
from trove_dashboard.content.databases import tables
from trove_dashboard.content.databases import views
from trove_dashboard.content.databases.workflows import create_instance
from trove_dashboard.test import helpers as test
from trove_dashboard.utils import common as common_utils
INDEX_URL = reverse('horizon:project:databases:index')
LAUNCH_URL = reverse('horizon:project:databases:launch')
DETAILS_URL = reverse('horizon:project:databases:detail', args=['id'])
class DatabaseTests(test.TestCase):
@test.create_mocks(
{api.trove: ('instance_get', 'instance_list', 'flavor_list')})
def test_index(self):
# Mock database instances
databases = common.Paginated(self.databases.list())
self.mock_instance_list.return_value = databases
# Mock flavors
self.mock_flavor_list.return_value = self.flavors.list()
res = self.client.get(INDEX_URL)
self.mock_instance_list.assert_called_once_with(
test.IsHttpRequest(), marker=None)
replica_id = databases[-1].replicas[0]['id']
self.mock_instance_get.assert_called_once_with(test.IsHttpRequest(),
replica_id)
self.mock_flavor_list.assert_called_once_with(test.IsHttpRequest())
self.assertTemplateUsed(res, 'project/databases/index.html')
# Check the Host column displaying ip or hostname
self.assertContains(res, '10.0.0.3')
self.assertContains(res, 'trove.instance-2.com')
@test.create_mocks(
{api.trove: ('instance_get', 'instance_list', 'flavor_list')})
def test_index_flavor_exception(self):
# Mock database instances
databases = common.Paginated(self.databases.list())
self.mock_instance_list.return_value = databases
# Mock flavors
self.mock_flavor_list.side_effect = self.exceptions.trove
res = self.client.get(INDEX_URL)
self.mock_instance_list.assert_called_once_with(
test.IsHttpRequest(), marker=None)
replica_id = databases[-1].replicas[0]['id']
self.mock_instance_get.assert_called_once_with(test.IsHttpRequest(),
replica_id)
self.mock_flavor_list.assert_called_once_with(test.IsHttpRequest())
self.assertTemplateUsed(res, 'project/databases/index.html')
self.assertMessageCount(res, error=1)
@test.create_mocks(
{api.trove: ('instance_get', 'instance_list',)})
def test_index_list_exception(self):
# Mock database instances
self.mock_instance_list.side_effect = self.exceptions.trove
res = self.client.get(INDEX_URL)
self.mock_instance_list.assert_called_once_with(
test.IsHttpRequest(), marker=None)
self.assertTemplateUsed(res, 'project/databases/index.html')
self.assertMessageCount(res, error=1)
@test.create_mocks(
{api.trove: ('instance_get', 'instance_list', 'flavor_list')})
def test_index_pagination(self):
# Mock database instances
databases = self.databases.list()
last_record = databases[-1]
databases = common.Paginated(databases, next_marker="foo")
self.mock_instance_list.return_value = databases
# Mock flavors
self.mock_flavor_list.return_value = self.flavors.list()
res = self.client.get(INDEX_URL)
self.mock_instance_list.assert_called_once_with(
test.IsHttpRequest(), marker=None)
replica_id = databases[-1].replicas[0]['id']
self.mock_instance_get.assert_called_once_with(test.IsHttpRequest(),
replica_id)
self.mock_flavor_list.assert_called_once_with(test.IsHttpRequest())
self.assertTemplateUsed(res, 'project/databases/index.html')
self.assertContains(
res, 'marker=' + last_record.id)
@test.create_mocks(
{api.trove: ('instance_get', 'instance_list', 'flavor_list')})
def test_index_flavor_list_exception(self):
# Mocking instances.
databases = common.Paginated(self.databases.list())
self.mock_instance_list.return_value = databases
# Mocking flavor list with raising an exception.
self.mock_flavor_list.side_effect = self.exceptions.trove
res = self.client.get(INDEX_URL)
self.mock_instance_list.assert_called_once_with(
test.IsHttpRequest(), marker=None)
replica_id = databases[-1].replicas[0]['id']
self.mock_instance_get.assert_called_once_with(test.IsHttpRequest(),
replica_id)
self.mock_flavor_list.assert_called_once_with(test.IsHttpRequest())
self.assertTemplateUsed(res, 'project/databases/index.html')
self.assertMessageCount(res, error=1)
@test.create_mocks({
api.trove: ('backup_list', 'configuration_list', 'datastore_flavors',
'datastore_list', 'datastore_version_list', 'flavor_list',
'instance_list'),
dash_api.cinder: ('volume_type_list',),
dash_api.neutron: ('network_list',),
dash_api.nova: ('availability_zone_list',),
policy: ('check',),
})
def test_launch_instance(self):
self.mock_check.return_value = True
self.mock_datastore_flavors.return_value = self.flavors.list()
self.mock_backup_list.return_value = self.database_backups.list()
self.mock_configuration_list.return_value = []
self.mock_instance_list.return_value = self.databases.list()
# Mock datastores
self.mock_datastore_list.return_value = self.datastores.list()
# Mock datastore versions
self.mock_datastore_version_list.return_value = (
self.datastore_versions.list())
self.mock_volume_type_list.return_value = []
self.mock_network_list.side_effect = [self.networks.list()[:1],
self.networks.list()[1:]]
self.mock_availability_zone_list.return_value = (
self.availability_zones.list())
res = self.client.get(LAUNCH_URL)
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_check, 5, mock.call((), test.IsHttpRequest()))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_datastore_flavors, 20,
mock.call(test.IsHttpRequest(),
test.IsA(str),
test.IsA(str)))
self.mock_backup_list.assert_called_once_with(test.IsHttpRequest())
self.mock_configuration_list.assert_called_once_with(
test.IsHttpRequest())
self.mock_instance_list.assert_called_once_with(test.IsHttpRequest())
self.mock_datastore_list.assert_called_once_with(test.IsHttpRequest())
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_datastore_version_list, 4,
mock.call(test.IsHttpRequest(), test.IsA(str)))
self.mock_volume_type_list.assert_called_once_with(
test.IsHttpRequest())
self.mock_network_list.assert_has_calls([
mock.call(test.IsHttpRequest(),
tenant_id=self.tenant.id,
shared=False),
mock.call(test.IsHttpRequest(), shared=True)])
self.mock_availability_zone_list.assert_called_once_with(
test.IsHttpRequest())
self.assertTemplateUsed(res, 'project/databases/launch.html')
# django 1.7 and later does not handle the thrown Http302
# exception well enough.
# TODO(mrunge): re-check when django-1.8 is stable
@unittest.skipIf(django.VERSION >= (1, 7, 0),
'Currently skipped with Django >= 1.7')
@test.create_mocks({api.trove: ('flavor_list',)})
def test_launch_instance_exception_on_flavors(self):
trove_exception = self.exceptions.nova
self.mock_flavor_list.side_effect = trove_exception
toSuppress = ["trove_dashboard.content.databases."
"workflows.create_instance",
"horizon.workflows.base"]
# Suppress expected log messages in the test output
loggers = []
for cls in toSuppress:
logger = logging.getLogger(cls)
loggers.append((logger, logger.getEffectiveLevel()))
logger.setLevel(logging.CRITICAL)
try:
with self.assertRaises(exceptions.Http302):
self.client.get(LAUNCH_URL)
self.mock_datastore_flavors.assert_called_once_with(
test.IsHttpRequest(), mock.ANY, mock.ANY)
finally:
# Restore the previous log levels
for (log, level) in loggers:
log.setLevel(level)
@test.create_mocks({
api.trove: ('backup_list', 'configuration_list', 'datastore_flavors',
'datastore_list', 'datastore_version_list', 'flavor_list',
'instance_create', 'instance_list'),
dash_api.cinder: ('volume_type_list',),
dash_api.neutron: ('network_list',),
dash_api.nova: ('availability_zone_list',),
policy: ('check',),
})
def test_create_simple_instance(self):
self.mock_check.return_value = True
self.mock_datastore_flavors.return_value = self.flavors.list()
self.mock_backup_list.return_value = self.database_backups.list()
self.mock_instance_list.return_value = self.databases.list()
# Mock datastores
self.mock_datastore_list.return_value = self.datastores.list()
# Mock datastore versions
self.mock_datastore_version_list.return_value = (
self.datastore_versions.list())
self.mock_volume_type_list.return_value = []
self.mock_network_list.side_effect = [self.networks.list()[:1],
self.networks.list()[1:]]
nics = [{"net-id": self.networks.first().id}]
datastore = 'mysql'
datastore_version = '5.5'
field_name = self._build_flavor_widget_name(datastore,
datastore_version)
self.mock_availability_zone_list.return_value = (
self.availability_zones.list())
# Mock create database call
self.mock_instance_create.return_value = self.databases.first()
post = {
'name': "MyDB",
'volume': '1',
'flavor': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'datastore': field_name,
field_name: 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'network': self.networks.first().id,
'volume_type': 'no_type'
}
res = self.client.post(LAUNCH_URL, post)
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_check, 5, mock.call((), test.IsHttpRequest()))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_datastore_flavors, 20,
mock.call(test.IsHttpRequest(),
test.IsA(str),
test.IsA(str)))
self.mock_backup_list.assert_called_once_with(test.IsHttpRequest())
self.mock_instance_list.assert_called_once_with(test.IsHttpRequest())
self.mock_datastore_list.assert_called_once_with(test.IsHttpRequest())
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_datastore_version_list, 4,
mock.call(test.IsHttpRequest(), test.IsA(str)))
self.mock_volume_type_list.assert_called_once_with(
test.IsHttpRequest())
self.mock_network_list.assert_has_calls([
mock.call(test.IsHttpRequest(),
tenant_id=self.tenant.id,
shared=False),
mock.call(test.IsHttpRequest(), shared=True)])
self.mock_availability_zone_list.assert_called_once_with(
test.IsHttpRequest())
self.mock_instance_create.assert_called_once_with(
test.IsHttpRequest(),
test.IsA(str),
test.IsA(int),
test.IsA(str),
databases=None,
datastore=datastore,
datastore_version=datastore_version,
restore_point=None,
replica_of=None,
configuration=None,
users=None,
nics=nics,
replica_count=None,
volume_type=None,
locality=None,
availability_zone=test.IsA(str),
access=None)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_mocks({
api.trove: ('backup_list', 'configuration_list', 'datastore_flavors',
'datastore_list', 'datastore_version_list', 'flavor_list',
'instance_create', 'instance_list'),
dash_api.cinder: ('volume_type_list',),
dash_api.neutron: ('network_list',),
dash_api.nova: ('availability_zone_list',),
policy: ('check',),
})
def test_create_simple_instance_exception(self):
self.mock_check.return_value = True
trove_exception = self.exceptions.nova
self.mock_datastore_flavors.return_value = self.flavors.list()
self.mock_backup_list.return_value = self.database_backups.list()
self.mock_instance_list.return_value = self.databases.list()
# Mock datastores
self.mock_datastore_list.return_value = self.datastores.list()
# Mock datastore versions
self.mock_datastore_version_list.return_value = (
self.datastore_versions.list())
self.mock_volume_type_list.return_value = []
self.mock_network_list.side_effect = [self.networks.list()[:1],
self.networks.list()[1:]]
nics = [{"net-id": self.networks.first().id}]
datastore = 'mysql'
datastore_version = '5.5'
field_name = self._build_flavor_widget_name(datastore,
datastore_version)
self.mock_availability_zone_list.return_value = (
self.availability_zones.list())
# Mock create database call
self.mock_instance_create.side_effect = trove_exception
post = {
'name': "MyDB",
'volume': '1',
'flavor': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'datastore': field_name,
field_name: 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'network': self.networks.first().id,
'volume_type': 'no_type'
}
res = self.client.post(LAUNCH_URL, post)
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_check, 5, mock.call((), test.IsHttpRequest()))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_datastore_flavors, 20,
mock.call(test.IsHttpRequest(),
test.IsA(str),
test.IsA(str)))
self.mock_backup_list.assert_called_once_with(test.IsHttpRequest())
self.mock_instance_list.assert_called_once_with(test.IsHttpRequest())
self.mock_datastore_list.assert_called_once_with(test.IsHttpRequest())
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_datastore_version_list, 4,
mock.call(test.IsHttpRequest(), test.IsA(str)))
self.mock_volume_type_list.assert_called_once_with(
test.IsHttpRequest())
self.mock_network_list.assert_has_calls([
mock.call(test.IsHttpRequest(),
tenant_id=self.tenant.id,
shared=False),
mock.call(test.IsHttpRequest(), shared=True)])
self.mock_availability_zone_list.assert_called_once_with(
test.IsHttpRequest())
self.mock_instance_create.assert_called_once_with(
test.IsHttpRequest(),
test.IsA(str),
test.IsA(int),
test.IsA(str),
databases=None,
datastore=datastore,
datastore_version=datastore_version,
restore_point=None,
replica_of=None,
configuration=None,
users=None,
nics=nics,
replica_count=None,
volume_type=None,
locality=None,
availability_zone=test.IsA(str),
access=None)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_mocks({
api.trove: ('instance_get', 'flavor_get', 'root_show')
})
def _test_details(self, database, test_text, assert_contains=True):
self.mock_instance_get.return_value = database
self.mock_flavor_get.return_value = self.flavors.first()
self.mock_root_show.return_value = self.database_user_roots.first()
# Suppress expected log messages in the test output
loggers = []
toSuppress = ["trove_dashboard.content.databases.tabs",
"horizon.tables"]
for cls in toSuppress:
logger = logging.getLogger(cls)
loggers.append((logger, logger.getEffectiveLevel()))
logger.setLevel(logging.CRITICAL)
try:
res = self.client.get(DETAILS_URL)
self.mock_instance_get.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str))
self.mock_flavor_get.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str))
self.mock_root_show.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str))
self.assertTemplateUsed(res, 'project/databases/'
'_detail_overview.html')
if assert_contains:
self.assertContains(res, test_text)
else:
self.assertNotContains(res, test_text)
finally:
# Restore the previous log levels
for (log, level) in loggers:
log.setLevel(level)
def test_details_with_ip(self):
database = self.databases.first()
self._test_details(database, database.ip[0])
def test_details_with_hostname(self):
database = self.databases.list()[1]
self._test_details(database, database.hostname)
def test_details_without_locality(self):
database = self.databases.list()[1]
self._test_details(database, "Locality", assert_contains=False)
def test_details_with_locality(self):
database = self.databases.first()
self._test_details(database, "Locality")
def test_create_database(self):
database = self.databases.first()
url = reverse('horizon:project:databases:create_database',
args=[database.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/databases/create_database.html')
@test.create_mocks({api.trove: ('database_create',)})
def test_create_new_database(self):
new_database = {
"status": "ACTIVE",
"updated": "2013-08-12T22:00:09",
"name": "NewDB",
"links": [],
"created": "2013-08-12T22:00:03",
"ip": [
"10.0.0.3",
],
"volume": {
"used": 0.13,
"size": 1,
},
"flavor": {
"id": "1",
"links": [],
},
"datastore": {
"type": "mysql",
"version": "5.5"
},
"id": "12345678-73db-4e23-b52e-368937d72719",
}
self.mock_database_create.return_value = new_database
url = reverse('horizon:project:databases:create_database',
args=['id'])
post = {
'method': 'CreateDatabaseForm',
'instance_id': 'id',
'name': 'NewDB'}
res = self.client.post(url, post)
self.mock_database_create.assert_called_once_with(
test.IsHttpRequest(), u'id', u'NewDB', character_set=u'',
collation=u'')
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
@test.create_mocks({api.trove: ('database_create',)})
def test_create_new_database_exception(self):
self.mock_database_create.side_effect = self.exceptions.trove
url = reverse('horizon:project:databases:create_database',
args=['id'])
post = {
'method': 'CreateDatabaseForm',
'instance_id': 'id',
'name': 'NewDB'}
res = self.client.post(url, post)
self.mock_database_create.assert_called_once_with(
test.IsHttpRequest(), u'id', u'NewDB', character_set=u'',
collation=u'')
self.assertEqual(res.status_code, 302)
@test.create_mocks({api.trove: ('instance_get', 'root_show')})
def test_show_root(self):
database = self.databases.first()
database.id = u'id'
user = self.database_user_roots.first()
self.mock_instance_get.return_value = database
self.mock_root_show.return_value = user
url = reverse('horizon:project:databases:manage_root',
args=['id'])
res = self.client.get(url)
self.mock_instance_get.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_root_show, 2,
mock.call(test.IsHttpRequest(), database.id))
self.assertTemplateUsed(
res, 'project/databases/manage_root.html')
@test.create_mocks({api.trove: ('instance_get', 'root_show')})
def test_show_root_exception(self):
database = self.databases.first()
self.mock_instance_get.return_value = database
self.mock_root_show.side_effect = self.exceptions.trove
url = reverse('horizon:project:databases:manage_root',
args=['id'])
res = self.client.get(url)
self.mock_instance_get.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str))
self.mock_root_show.assert_called_once_with(
test.IsHttpRequest(), u'id')
self.assertRedirectsNoFollow(res, DETAILS_URL)
@test.create_mocks({api.trove: ('root_enable',)})
def test_enable_root(self):
self.mock_root_enable.return_value = ("root", "password")
url = reverse('horizon:project:databases:manage_root',
args=['id'])
form_data = {"action": "manage_root__enable_root_action__%s" % 'id'}
req = self.factory.post(url, form_data)
kwargs = {'instance_id': 'id'}
enable_root_info_list = []
enable_root_info = views.EnableRootInfo('id', 'inst1', False, '')
enable_root_info_list.append(enable_root_info)
table = tables.ManageRootTable(req, enable_root_info_list, **kwargs)
table.maybe_handle()
self.mock_root_enable.assert_called_once_with(
test.IsHttpRequest(), [u'id'])
self.assertEqual(table.data[0].enabled, True)
self.assertEqual(table.data[0].password, "password")
@test.create_mocks({api.trove: ('root_enable',)})
def test_enable_root_exception(self):
self.mock_root_enable.side_effect = self.exceptions.trove
url = reverse('horizon:project:databases:manage_root',
args=['id'])
form_data = {"action": "manage_root__enable_root_action__%s" % 'id'}
req = self.factory.post(url, form_data)
kwargs = {'instance_id': 'id'}
enable_root_info_list = []
enable_root_info = views.EnableRootInfo('id', 'inst1', False, '')
enable_root_info_list.append(enable_root_info)
table = tables.ManageRootTable(req, enable_root_info_list, **kwargs)
table.maybe_handle()
self.mock_root_enable.assert_called_once_with(
test.IsHttpRequest(), [u'id'])
self.assertNotEqual(table.data[0].enabled, True)
self.assertNotEqual(table.data[0].password, "password")
@test.create_mocks({api.trove: ('root_disable',)})
def test_disable_root(self):
url = reverse('horizon:project:databases:manage_root',
args=['id'])
form_data = {"action": "manage_root__disable_root_action__%s" % 'id'}
req = self.factory.post(url, form_data)
kwargs = {'instance_id': 'id'}
enable_root_info_list = []
enable_root_info = views.EnableRootInfo(
'id', 'inst1', True, 'password')
enable_root_info_list.append(enable_root_info)
table = tables.ManageRootTable(req, enable_root_info_list, **kwargs)
table.maybe_handle()
self.mock_root_disable.assert_called_once_with(
test.IsHttpRequest(), u'id')
self.assertEqual(table.data[0].enabled, True)
self.assertIsNone(table.data[0].password)
@test.create_mocks({api.trove: ('root_disable',)})
def test_disable_root_exception(self):
self.mock_root_disable.side_effect = self.exceptions.trove
url = reverse('horizon:project:databases:manage_root',
args=['id'])
form_data = {"action": "manage_root__disable_root_action__%s" % 'id'}
req = self.factory.post(url, form_data)
kwargs = {'instance_id': 'id'}
enable_root_info_list = []
enable_root_info = views.EnableRootInfo(
'id', 'inst1', True, 'password')
enable_root_info_list.append(enable_root_info)
table = tables.ManageRootTable(req, enable_root_info_list, **kwargs)
table.maybe_handle()
self.mock_root_disable.assert_called_once_with(
test.IsHttpRequest(), u'id')
self.assertEqual(table.data[0].enabled, True)
self.assertEqual(table.data[0].password, "password")
@test.create_mocks({
api.trove: ('instance_get', 'flavor_get', 'user_delete', 'users_list',
'user_list_access')
})
def test_user_delete(self):
database = self.databases.first()
user = self.database_users.first()
user_db = self.database_user_dbs.first()
database_id = database.id
# Instead of using the user's ID, the api uses the user's name. BOOO!
user_id = user.name + "@" + user.host
# views.py: DetailView.get_data
self.mock_instance_get.return_value = database
self.mock_flavor_get.return_value = self.flavors.first()
# tabs.py: UserTab.get_user_data
self.mock_users_list.return_value = [user]
self.mock_user_list_access.return_value = [user_db]
# tables.py: DeleteUser.delete
self.mock_user_delete.return_value = None
details_url = reverse('horizon:project:databases:detail',
args=[database_id])
url = details_url + '?tab=instance_details__users_tab'
action_string = u"users__delete__%s" % user_id
form_data = {'action': action_string}
res = self.client.post(url, form_data)
self.mock_instance_get.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str))
self.mock_flavor_get.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str))
self.mock_users_list.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str))
self.mock_user_list_access.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str), test.IsA(str),
host=test.IsA(str))
self.mock_user_delete.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str),
test.IsA(str), host=test.IsA(str))
self.assertRedirectsNoFollow(res, url)
def test_create_user(self):
user = self.users.first()
url = reverse('horizon:project:databases:create_user',
args=[user.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/databases/create_user.html')
@test.create_mocks({api.trove: ('user_create',)})
def test_create_new_user(self):
database = self.databases.first()
user = self.users.first()
new_user = {
"name": "Test_User2",
"host": "%",
"databases": ["TestDB"],
}
self.mock_user_create.return_value = new_user
url = reverse('horizon:project:databases:create_user',
args=[database.id])
post = {
'method': 'CreateUserForm',
'instance_id': database.id,
'name': user.name,
'password': 'password'}
res = self.client.post(url, post)
self.mock_user_create.assert_called_once_with(
test.IsHttpRequest(), database.id, user.name, u'password',
host=u'', databases=[])
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
@test.create_mocks({api.trove: ('user_create',)})
def test_create_new_user_exception(self):
self.mock_user_create.side_effect = self.exceptions.trove
url = reverse('horizon:project:databases:create_user',
args=['id'])
post = {
'method': 'CreateUserForm',
'instance_id': 'id',
'name': 'name',
'password': 'password'}
res = self.client.post(url, post)
self.mock_user_create.assert_called_once_with(
test.IsHttpRequest(), u'id', u'name', u'password',
host=u'', databases=[])
self.assertEqual(res.status_code, 302)
@test.create_mocks({api.trove: ('user_update_attributes',)})
def test_edit_user(self):
database = self.databases.first()
user = self.users.first()
url = reverse('horizon:project:databases:edit_user',
args=[database.id, user.name, '%'])
post = {
'method': 'EditUserForm',
'instance_id': database.id,
'user_name': user.name,
'user_host': '%',
'new_name': 'new_name',
'new_password': 'new_password',
'new_host': '127.0.0.1'}
res = self.client.post(url, post)
self.mock_user_update_attributes.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str),
test.IsA(str), host=test.IsA(str),
new_name=test.IsA(str),
new_password=test.IsA(str),
new_host=test.IsA(str))
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
@test.create_mocks({api.trove: ('user_update_attributes',)})
def test_edit_user_exception(self):
database = self.databases.first()
user = self.users.first()
url = reverse('horizon:project:databases:edit_user',
args=[database.id, user.name, '%'])
post = {
'method': 'EditUserForm',
'instance_id': database.id,
'user_name': user.name,
'new_name': 'new_name',
'user_host': '%',
'new_password': 'new_password',
'new_host': '127.0.0.1'}
res = self.client.post(url, post)
self.mock_user_update_attributes.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str),
test.IsA(str), host=test.IsA(str),
new_name=test.IsA(str),
new_password=test.IsA(str),
new_host=test.IsA(str))
self.assertEqual(res.status_code, 302)
def test_edit_user_no_values(self):
database = self.databases.first()
user = self.users.first()
url = reverse('horizon:project:databases:edit_user',
args=[database.id, user.name, '%'])
post = {
'method': 'EditUserForm',
'instance_id': database.id,
'user_name': user.name,
'user_host': '%'}
res = self.client.post(url, post)
msg = forms.EditUserForm.validation_error_message
self.assertFormError(res, "form", None, [msg])
@test.create_mocks({api.trove: ('database_list', 'user_show_access')})
def test_access_detail_get(self):
self.mock_database_list.return_value = self.databases.list()
self.mock_user_show_access.return_value = self.databases.list()
url = reverse('horizon:project:databases:access_detail',
args=['id', 'name', 'host'])
res = self.client.get(url)
self.mock_database_list.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str))
self.mock_user_show_access.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str),
test.IsA(str), host=test.IsA(str))
self.assertTemplateUsed(
res, 'project/databases/access_detail.html')
@test.create_mocks({api.trove: ('database_list', 'user_show_access')})
def test_access_detail_get_exception(self):
self.mock_database_list.return_value = self.databases.list()
self.mock_user_show_access.side_effect = self.exceptions.trove
url = reverse('horizon:project:databases:access_detail',
args=['id', 'name', 'host'])
res = self.client.get(url)
self.mock_database_list.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str))
self.mock_user_show_access.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str),
test.IsA(str), host=test.IsA(str))
self.assertRedirectsNoFollow(res, DETAILS_URL)
@test.create_mocks({api.trove: ('user_grant_access',)})
def test_detail_grant_access(self):
url = reverse('horizon:project:databases:access_detail',
args=['id', 'name', 'host'])
form_data = {"action": "access__grant_access__%s" % 'db1'}
req = self.factory.post(url, form_data)
kwargs = {'instance_id': 'id', 'user_name': 'name', 'user_host': '%'}
db_access_list = []
db_access = views.DBAccess('db1', False)
db_access_list.append(db_access)
table = tables.AccessTable(req, db_access_list, **kwargs)
handled = table.maybe_handle()
self.mock_user_grant_access.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str), test.IsA(str),
[test.IsA(str)], host=test.IsA(str))
handled_url = handled['location']
self.assertEqual(handled_url, url)
@test.create_mocks({api.trove: ('user_grant_access',)})
def test_detail_grant_access_exception(self):
self.mock_user_grant_access.side_effect = self.exceptions.trove
url = reverse('horizon:project:databases:access_detail',
args=['id', 'name', 'host'])
form_data = {"action": "access__grant_access__%s" % 'db1'}
req = self.factory.post(url, form_data)
kwargs = {'instance_id': 'id', 'user_name': 'name', 'user_host': '%'}
db_access_list = []
db_access = views.DBAccess('db1', False)
db_access_list.append(db_access)
table = tables.AccessTable(req, db_access_list, **kwargs)
handled = table.maybe_handle()
self.mock_user_grant_access.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str), test.IsA(str),
[test.IsA(str)], host=test.IsA(str))
handled_url = handled['location']
self.assertEqual(handled_url, url)
@test.create_mocks({api.trove: ('user_revoke_access',)})
def test_detail_revoke_access(self):
url = reverse('horizon:project:databases:access_detail',
args=['id', 'name', 'host'])
form_data = {"action": "access__revoke_access__%s" % 'db1'}
req = self.factory.post(url, form_data)
kwargs = {'instance_id': 'id', 'user_name': 'name', 'user_host': '%'}
db_access_list = []
db_access = views.DBAccess('db1', True)
db_access_list.append(db_access)
table = tables.AccessTable(req, db_access_list, **kwargs)
handled = table.maybe_handle()
self.mock_user_revoke_access.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str), test.IsA(str),
test.IsA(str), host=test.IsA(str))
handled_url = handled['location']
self.assertEqual(handled_url, url)
@test.create_mocks({api.trove: ('user_revoke_access',)})
def test_detail_revoke_access_exception(self):
self.mock_user_revoke_access.side_effect = self.exceptions.trove
url = reverse('horizon:project:databases:access_detail',
args=['id', 'name', 'host'])
form_data = {"action": "access__revoke_access__%s" % 'db1'}
req = self.factory.post(url, form_data)
kwargs = {'instance_id': 'id', 'user_name': 'name', 'user_host': '%'}
db_access_list = []
db_access = views.DBAccess('db1', True)
db_access_list.append(db_access)
table = tables.AccessTable(req, db_access_list, **kwargs)
handled = table.maybe_handle()
self.mock_user_revoke_access.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str), test.IsA(str),
test.IsA(str), host=test.IsA(str))
handled_url = handled['location']
self.assertEqual(handled_url, url)
@test.create_mocks({
api.trove: ('instance_get', 'instance_resize_volume')})
def test_resize_volume(self):
database = self.databases.first()
database_id = database.id
database_size = database.volume.get('size')
# views.py: DetailView.get_data
self.mock_instance_get.return_value = database
# forms.py: ResizeVolumeForm.handle
self.mock_instance_resize_volume.return_value = None
url = reverse('horizon:project:databases:resize_volume',
args=[database_id])
post = {
'instance_id': database_id,
'orig_size': database_size,
'new_size': database_size + 1,
}
res = self.client.post(url, post)
self.mock_instance_get.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str))
self.mock_instance_resize_volume.assert_called_once_with(
test.IsHttpRequest(), database_id, test.IsA(int))
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_mocks({api.trove: ('instance_get', )})
def test_resize_volume_bad_value(self):
database = self.databases.first()
database_id = database.id
database_size = database.volume.get('size')
# views.py: DetailView.get_data
self.mock_instance_get.return_value = database
url = reverse('horizon:project:databases:resize_volume',
args=[database_id])
post = {
'instance_id': database_id,
'orig_size': database_size,
'new_size': database_size,
}
res = self.client.post(url, post)
self.mock_instance_get.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str))
self.assertContains(
res, "New size for volume must be greater than current size.")
@test.create_mocks(
{api.trove: ('instance_get',
'flavor_list')})
def test_resize_instance_get(self):
database = self.databases.first()
# views.py: DetailView.get_data
self.mock_instance_get.return_value = database
self.mock_flavor_list.return_value = self.database_flavors.list()
url = reverse('horizon:project:databases:resize_instance',
args=[database.id])
res = self.client.get(url)
self.mock_instance_get.assert_called_once_with(
test.IsHttpRequest(), database.id)
self.mock_flavor_list.assert_called_once_with(test.IsHttpRequest())
self.assertTemplateUsed(res, 'project/databases/resize_instance.html')
option = '<option value="%s">%s</option>'
for flavor in self.database_flavors.list():
if flavor.id == database.flavor['id']:
self.assertNotContains(res, option % (flavor.id, flavor.name))
else:
self.assertContains(res, option % (flavor.id, flavor.name))
@test.create_mocks(
{api.trove: ('instance_get',
'flavor_list',
'instance_resize')})
def test_resize_instance(self):
database = self.databases.first()
# views.py: DetailView.get_data
self.mock_instance_get.return_value = database
self.mock_flavor_list.return_value = self.database_flavors.list()
old_flavor = self.database_flavors.list()[0]
new_flavor = self.database_flavors.list()[1]
self.mock_instance_resize.return_value = None
url = reverse('horizon:project:databases:resize_instance',
args=[database.id])
post = {
'instance_id': database.id,
'old_flavor_name': old_flavor.name,
'old_flavor_id': old_flavor.id,
'new_flavor': new_flavor.id
}
res = self.client.post(url, post)
self.mock_instance_get.assert_called_once_with(
test.IsHttpRequest(), database.id)
self.mock_flavor_list.assert_called_once_with(test.IsHttpRequest())
self.mock_instance_resize.assert_called_once_with(
test.IsHttpRequest(), database.id, new_flavor.id)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_mocks({
api.trove: ('backup_list', 'configuration_list', 'datastore_flavors',
'datastore_list', 'datastore_version_list', 'flavor_list',
'instance_create', 'instance_get', 'instance_list_all'),
dash_api.cinder: ('volume_type_list',),
dash_api.neutron: ('network_list',),
dash_api.nova: ('availability_zone_list',),
policy: ('check',),
})
def test_create_replica_instance(self):
self.mock_check.return_value = True
self.mock_datastore_flavors.return_value = self.flavors.list()
self.mock_backup_list.return_value = self.database_backups.list()
self.mock_instance_list_all.return_value = self.databases.list()
self.mock_datastore_list.return_value = self.datastores.list()
self.mock_datastore_version_list.return_value = (
self.datastore_versions.list())
self.mock_volume_type_list.return_value = []
self.mock_network_list.side_effect = [self.networks.list()[:1],
self.networks.list()[1:]]
nics = [{"net-id": self.networks.first().id}]
self.mock_availability_zone_list.return_value = (
self.availability_zones.list())
self.mock_instance_get.return_value = self.databases.first()
datastore = 'mysql'
datastore_version = '5.5'
field_name = self._build_flavor_widget_name(datastore,
datastore_version)
# Mock create database call
self.mock_instance_create.return_value = self.databases.first()
post = {
'name': "MyDB",
'volume': '1',
'flavor': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'datastore': field_name,
field_name: 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'network': self.networks.first().id,
'initial_state': 'master',
'master': self.databases.first().id,
'replica_count': 2,
'volume_type': 'no_type'
}
res = self.client.post(LAUNCH_URL, post)
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_check, 5, mock.call((), test.IsHttpRequest()))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_datastore_flavors, 20,
mock.call(test.IsHttpRequest(),
test.IsA(str),
test.IsA(str)))
self.mock_backup_list.assert_called_once_with(test.IsHttpRequest())
self.mock_instance_list_all.assert_called_once_with(
test.IsHttpRequest())
self.mock_datastore_list.assert_called_once_with(test.IsHttpRequest())
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_datastore_version_list, 4,
mock.call(test.IsHttpRequest(), test.IsA(str)))
self.mock_volume_type_list.assert_called_once_with(
test.IsHttpRequest())
self.mock_network_list.assert_has_calls([
mock.call(test.IsHttpRequest(),
tenant_id=self.tenant.id,
shared=False),
mock.call(test.IsHttpRequest(), shared=True)])
self.mock_availability_zone_list.assert_called_once_with(
test.IsHttpRequest())
self.mock_instance_get.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str))
self.mock_instance_create.assert_called_once_with(
test.IsHttpRequest(),
test.IsA(str),
test.IsA(int),
test.IsA(str),
databases=None,
datastore=datastore,
datastore_version=datastore_version,
restore_point=None,
replica_of=self.databases.first().id,
configuration=None,
users=None,
nics=nics,
replica_count=2,
volume_type=None,
locality=None,
availability_zone=test.IsA(str),
access=None)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_mocks({
api.trove: ('promote_to_replica_source',),
views.PromoteToReplicaSourceView: ('get_initial',)})
def test_promote_replica_instance(self):
replica_source = self.databases.first()
replica = self.databases.list()[1]
initial = {'instance_id': replica_source.id,
'replica': replica,
'replica_source': replica_source}
self.mock_get_initial.return_value = initial
url = reverse('horizon:project:databases:promote_to_replica_source',
args=[replica_source.id])
form = {
'instance_id': replica_source.id
}
res = self.client.post(url, form)
self.mock_get_initial.assert_called_once()
self.mock_promote_to_replica_source.assert_called_once_with(
test.IsHttpRequest(), replica_source.id)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_mocks({
api.trove: ('promote_to_replica_source',),
views.PromoteToReplicaSourceView: ('get_initial',)})
def test_promote_replica_instance_exception(self):
replica_source = self.databases.first()
replica = self.databases.list()[1]
initial = {'instance_id': replica_source.id,
'replica': replica,
'replica_source': replica_source}
self.mock_get_initial.return_value = initial
self.mock_promote_to_replica_source.side_effect = (
self.exceptions.trove)
url = reverse('horizon:project:databases:promote_to_replica_source',
args=[replica_source.id])
form = {
'instance_id': replica_source.id
}
res = self.client.post(url, form)
self.mock_get_initial.assert_called_once()
self.mock_promote_to_replica_source.assert_called_once_with(
test.IsHttpRequest(), replica_source.id)
self.assertEqual(res.status_code, 302)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_mocks({
api.trove: ('flavor_list', 'instance_list',
'eject_replica_source',),
})
def test_eject_replica_source(self):
databases = common.Paginated(self.databases.list())
database = databases[2]
databases = common.Paginated(self.databases.list())
self.mock_instance_list.return_value = databases
self.mock_flavor_list.return_value = self.flavors.list()
res = self.client.post(
INDEX_URL,
{'action': 'databases__eject_replica_source__%s' % database.id})
self.mock_eject_replica_source.assert_called_once_with(
test.IsHttpRequest(), database.id)
self.mock_instance_list.assert_called_once_with(
test.IsHttpRequest(), marker=None)
self.mock_flavor_list.assert_called_once_with(test.IsHttpRequest())
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_mocks({
api.trove: ('flavor_list', 'instance_list',
'eject_replica_source',),
})
def test_eject_replica_source_exception(self):
databases = common.Paginated(self.databases.list())
database = databases[2]
self.mock_eject_replica_source.side_effect = self.exceptions.trove
databases = common.Paginated(self.databases.list())
self.mock_instance_list.return_value = databases
self.mock_flavor_list.return_value = self.flavors.list()
res = self.client.post(
INDEX_URL,
{'action': 'databases__eject_replica_source__%s' % database.id})
self.mock_eject_replica_source.assert_called_once_with(
test.IsHttpRequest(), database.id)
self.mock_instance_list.assert_called_once_with(
test.IsHttpRequest(), marker=None)
self.mock_flavor_list.assert_called_once_with(test.IsHttpRequest())
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_mocks({
api.trove: ('instance_list',)
})
def test_master_list_pagination(self):
request = http.HttpRequest()
first_part = common.Paginated(items=self.databases.list()[:1],
next_marker='marker')
second_part = common.Paginated(items=self.databases.list()[1:])
self.mock_instance_list.side_effect = [
first_part, second_part, first_part]
advanced_page = create_instance.AdvancedAction(request, None)
choices = advanced_page.populate_master_choices(request, None)
expected_calls = [
mock.call(request),
mock.call(request, marker='marker'),
mock.call(request)]
self.assertEqual(expected_calls,
self.mock_instance_list.call_args_list)
self.assertEqual(len(choices), len(self.databases.list()) + 1)
def _build_datastore_display_text(self, datastore, datastore_version):
return datastore + ' - ' + datastore_version
def _build_flavor_widget_name(self, datastore, datastore_version):
return common_utils.hexlify(self._build_datastore_display_text(
datastore, datastore_version))
@test.create_mocks({
api.trove: ('instance_get',
'configuration_list',
'instance_attach_configuration'),
})
def test_attach_configuration(self):
database = self.databases.first()
configuration = self.database_configurations.first()
self.mock_instance_get.return_value = database
self.mock_configuration_list.return_value = (
self.database_configurations.list())
self.mock_instance_attach_configuration.return_value = None
url = reverse('horizon:project:databases:attach_config',
args=[database.id])
form = {
'instance_id': database.id,
'configuration': configuration.id,
}
res = self.client.post(url, form)
self.mock_instance_get.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str))
self.mock_configuration_list.assert_called_once_with(
test.IsHttpRequest())
self.mock_instance_attach_configuration.assert_called_once_with(
test.IsHttpRequest(), database.id, configuration.id)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_mocks({
api.trove: ('instance_get',
'configuration_list',
'instance_attach_configuration'),
})
def test_attach_configuration_exception(self):
database = self.databases.first()
configuration = self.database_configurations.first()
self.mock_instance_get.return_value = database
self.mock_configuration_list.return_value = (
self.database_configurations.list())
self.mock_instance_attach_configuration.side_effect = (
self.exceptions.trove)
url = reverse('horizon:project:databases:attach_config',
args=[database.id])
form = {
'instance_id': database.id,
'configuration': configuration.id,
}
res = self.client.post(url, form)
self.mock_instance_get.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str))
self.mock_configuration_list.assert_called_once_with(
test.IsHttpRequest())
self.mock_instance_attach_configuration.assert_called_once_with(
test.IsHttpRequest(), database.id, configuration.id)
self.assertEqual(res.status_code, 302)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_mocks({
api.trove: ('instance_list',
'flavor_list',
'instance_detach_configuration',),
})
def test_detach_configuration(self):
databases = common.Paginated(self.databases.list())
database = databases[2]
self.mock_instance_list.return_value = databases
self.mock_flavor_list.return_value = self.flavors.list()
self.mock_instance_detach_configuration.return_value = None
res = self.client.post(
INDEX_URL,
{'action': 'databases__detach_configuration__%s' % database.id})
self.mock_instance_list.assert_called_once_with(
test.IsHttpRequest(), marker=None)
self.mock_flavor_list.assert_called_once_with(test.IsHttpRequest())
self.mock_instance_detach_configuration.assert_called_once_with(
test.IsHttpRequest(), database.id)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_mocks({
api.trove: ('instance_list',
'flavor_list',
'instance_detach_configuration',),
})
def test_detach_configuration_exception(self):
databases = common.Paginated(self.databases.list())
database = databases[2]
self.mock_instance_list.return_value = databases
self.mock_flavor_list.return_value = self.flavors.list()
self.mock_instance_detach_configuration.side_effect = (
self.exceptions.trove)
res = self.client.post(
INDEX_URL,
{'action': 'databases__detach_configuration__%s' % database.id})
self.mock_instance_list.assert_called_once_with(
test.IsHttpRequest(), marker=None)
self.mock_flavor_list.assert_called_once_with(test.IsHttpRequest())
self.mock_instance_detach_configuration.assert_called_once_with(
test.IsHttpRequest(), database.id)
self.assertRedirectsNoFollow(res, INDEX_URL)
|
|
XXXXXXXXX XXXXX
XXXX
XXXXXXXXX XXX XXXXXXXXXX XXXXXXXX X XXXXXXXXX XXXXXXXX XXX XXXXXX XXXXXXXXX
XXX XXXXXXXXXX XXX XXXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX
XXXXXX
XXXXXX
XXXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXX XXXXXX XXXXXXX XXXXXXXX XXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX
XXXXXXX
XXXXXX
XXX XXXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXX XXXXXXX XXXXXXXXX XXXXX XXXXXXXXX XXXXXX
XXXXX
XXXX XXXXXXXXXXXXXX XXXXXXXXXXXX
XXXX XXXXXX XX XXX XXXXXXXXXX XXXXXXXX XXXXX XXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXX XX XXXXXXXX XXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXXXXX
XXX
XXXX XXXXXX XXXXX XXX XX XXXXXXXXX XXXXXXXX XX XXXXXX XXXXX
XX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXXX
XXXXXXXXXX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXXXX
XXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXX XXXX XX XXXXXXXX XXXX XXXXX XXXXXXXXX XXXXX
XXXX
XXX
XX XXX X XXXXXXXX XXXXXXXX XXXXXXXXXX XXXXX XXXXX XXXXX XXXX XXX XXXXXX XXXXX X XXXXXXXX
XXXXXXXXXX XXXX XXX XXXXXX XXXXXXXX XXXXXXXX XX XXX XXX XXXXX XXXXXXXXX XXXXXXXX XXX XXXXXXX
XXXX
XXX
X XXXXXXX XX XXX XXXXXXXXXXXXX XXXX XXX XX XXXX XXXXXX XXXXX XXX XXXXXX XX XXXX XXXX XXX
XXXX XXXXXXXXXXX
XXXX
XXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX X
XXXXXXXX XXXXXXX
XXXXXXXXXXX X XXXXXXXX XXXXXX X
XX
XXXXXXXXXXXXXXXXXX X
XXXXXXXX XXXXXXX
XXXXXXXXXXX X XXXXXXXX XXXXXXXX X
XX
XXX
XXXXXXXXX
XXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXX
XXXXXX XXXXXXXXXXXXXX
XXXXXX XX
XXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX XX XXXX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXX XXX XXX XXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXX
XXXXXXXXXXXXXXXXX XXXXXXXXXX X
XX
X XXXXX XXXXX XXX XXX XXXXXXXX
XX
XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX
X XXXXXXX XXXXXXX XXXXXXX XXXXX XXX XXXXX XXXX XX
X XXXX XXXXX XXXXX XXX XXXXX XXXXXXXX
X
X XXXXX XXX XXXXX XXXX XXXX XXXXXXX XXXXXXXXXX XX
X XX XXX XXXXXXXX XX XXXXXXX XXXXXXXX XXXXXXXX
XX
XXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX
X XXXX XXXXXXX
XX
XXXXXXXXXXXXXXXX X
XXXXXXXX XXXXXXX
XXXXXXXXXXX X XXXXXXXX XXXXXX X
XX
XXXXXXXXXXXXXXXXXX X
XXXXXXXX XXXXXXX
XXXXXXXXXXX X XXXXXXXX XXXXXXXX X
XX
XXXXXXXXXXXXXXXXXXXXX X
XXXXXXXX XXXXXXX
XXXXXXXXXXX X XXXXXXXX XXXXXXXXXXX X
XX
XXXXXXXXXXXXXXXXXX X
XXXXXXXX XXXXXXX
XXXXXXXXXXX X XXXXXXXX XXXXXXXXXXXXXXX XX
XXXXXXXXXX XXXXXXXX
XX
XXXXXXXXXXXXXXXXXXXXX X
XXXXXXXX XXXXXXX
XXXXXXXXXXX X XXXXXXXX XXXXXXXXXXX XX
XXXXXXXXXX XXXXX
XX
XXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXX XXXXXXX
XXXXXXXXXXX X XXXXXXXX XXXXXXXXXXXXX XX
XXXXXXXXXX XXXXX
XX
XX
X XXXX XXXXX
XX
XX XXXX XX XXXXX XXXXXXXXX XX XXX XXXXXXX XXXXXX XXXX XXXX XXXXXXXXXX XX
XX XXXXXXXXX XX X XXXXXXXXXX XXXX XX XXX XXXXX XXXXX XXXXX XXXXX XX XXXX XXXX
XX XX XXXXXXX XX XXXXXX XXXXXXXXXXXXX XXXXXX
XXXXXXXXXXX XXXXXX XXXX XXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXX XXXXXXXXXXXXXXXXX
XX XXXXXX XXX XXX XXXX XXXXXXXX XXXX XX XXXXXXX XX XXX XXXXXXXXX XXX XXXXXX
XX XXXXXXX XXXX XX XXXXX XXXX X XXXX XX XXXXXXXXX XXX XXXX XXXX XXXXXXX XX XXX
XX XXXXX XXXX XX XXXXXX XX XXXX XXXXXXXXXX XXXX XXX XXXX XXXXXXX XXXXX XX
XX XXXXXXXX XX XXX XXXXXXX XXXXXXXXXX
XX XX XXXX XXXX XXXXXXXX XXX XXXXXX XX XXXXXXX XXXX XXXXXXXX XXXX XXX XXXX XX
XX XXX XXX XXXX XXXXXXXXXXXX
XXXXXXXXXXX X
XXXXXXXX XXXXXXX
XXXXXXXXXXX X XXXXXXXX XXXXXXXXXXX XX
XXXXXXXXXX X
X
XXXXXXXX XXXXXXX
XXXXXXXXXXX X
XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
X
X
X
XX
XX
X XXXX XXXXXX
XX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX X
XXXXXXXX XXXXXXX
XXXXXXXXXXX X XXXXXXXX XXXXXXXXX XX
XXXXXXXXXX X
X
XXXXXXXX XXXXXXX
XXXXXXXXXXX X
XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
X
X
X
X X
XX
X XXXX XXXXXXX
XX
XXXXXXXXXXXXXXXXXXXXXXX XXXXXX
XXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXX XXXXXXX
XXXXXXXXXXX X XXXXXXXX XXXXXXXXXX XX
XXXXXXXXXX X
X
XXXXXXXX XXXXXXX
XXXXXXXXXXX X
XXXXXXXX XXXXXXXXXXXXXXXXXXXXXX
X
X
X
XX
XXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXX XXXXXXX
XXXXXXXXXXX X XXXXXXXX XXXXXXXXXXXX XX
XXXXXXXXXX X
X
XXXXXXXX XXXXXXX
XXXXXXXXXXX X
XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX
X
X
X
XX
XX
X XXXXXXXXXXXX
XX
XXXXXXXXXXXXXX X XXXXXXXXXX XXXXXXXXXX XXXXXXXXX XX
XX
X XXXXXXXXX XXXXXXXXXXXXXX
XX
XXXXXXXXXXXXXXX X XXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX XX
XX
X XXXXXX XXXXXX
XX
XXXXXXXXXX X
X XXXXX XXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXX XX
X XXXXX XXXXXXXXXXX XXXXXXXX XXXX XX
X XXXXX XXXXXXXXX XXXXXX XXXXXXXX XXXXXX XX
X XXXXX XXXXXXXXX XXXXXXXX XXXXXXXX XXXXX XX
X XXXXX XXXXXXX XXXXXX XXXXXXXX XXXXXX XX
X XXXXX XXXXXXXXXXX XXXXXXXX XXXXX XX
X XXXXX XXXXXXXX XXXXXX XXXXXXXX XXXXX XX
X XXXXX XXXXXXXXX XXXXXX XXXXXXXX XXXXX XX
X XXXXX XXXXXX XXXXXX XXXXXXXX XXXXXX XX
X XXXXX XXXXXXX XXXXXXXXXXX XXXXXXXX XXX X
X
XXX
XXXXXXXXX
XXXX
XXX
XXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXX
XXXXXXX
XXXX XXXXXXXXXXXX
XXXX
XXX
XXXXXXXX X XXX XXXX XXXXXX XXX XXX XXXXXXXX X XX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX
XX XXXXXXXXXX
XXXXXXXXX XXXXXX XXXXXXXXXX XX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X XXXXXXXXX
XXXXXXXX XXX XXXXXX XXXXXXXXX
XXXX
XXXXXX
XXXXXXX
XXXXXXX
|
|
import tables
import os, os.path
from time import time
import random
import numarray
from numarray import random_array
from numarray import records
# in order to always generate the same random sequence
random.seed(19)
random_array.seed(19, 20)
def open_db(filename, remove=0):
if remove and os.path.exists(filename):
os.remove(filename)
con = tables.openFile(filename, 'a')
return con
def create_db(filename, nrows):
class Record(tables.IsDescription):
col1 = tables.Int32Col()
col2 = tables.Int32Col()
col3 = tables.Float64Col()
col4 = tables.Float64Col()
con = open_db(filename, remove=1)
table = con.createTable(con.root, 'table', Record,
filters=filters, expectedrows=nrows)
table.indexFilters = filters
step = 1000*100
scale = 0.1
t1=time()
j = 0
for i in xrange(0, nrows, step):
stop = (j+1)*step
if stop > nrows:
stop = nrows
arr_f8 = numarray.arange(i, stop, type=numarray.Float64)
arr_i4 = numarray.arange(i, stop, type=numarray.Int32)
if userandom:
arr_f8 += random_array.normal(0, stop*scale, shape=[stop-i])
arr_i4 = numarray.array(arr_f8, type=numarray.Int32)
recarr = records.fromarrays([arr_i4, arr_i4, arr_f8, arr_f8])
table.append(recarr)
j += 1
table.flush()
ctime = time()-t1
if verbose:
print "insert time:", round(ctime, 5)
print "Krows/s:", round((nrows/1000.)/ctime, 5)
index_db(table)
close_db(con)
def index_db(table):
t1=time()
table.cols.col2.createIndex()
itime = time()-t1
if verbose:
print "index time (int):", round(itime, 5)
print "Krows/s:", round((nrows/1000.)/itime, 5)
t1=time()
table.cols.col4.createIndex()
itime = time()-t1
if verbose:
print "index time (float):", round(itime, 5)
print "Krows/s:", round((nrows/1000.)/itime, 5)
def query_db(filename, rng):
con = open_db(filename)
table = con.root.table
# Query for integer columns
# Query for non-indexed column
if not doqueryidx:
t1=time()
ntimes = 10
for i in range(ntimes):
results = [ r['col1'] for r in
table.where(rng[0]+i <= table.cols.col1 <= rng[1]+i) ]
qtime = (time()-t1)/ntimes
if verbose:
print "query time (int, not indexed):", round(qtime, 5)
print "Mrows/s:", round((nrows/1000.)/qtime, 5)
print results
# Query for indexed column
t1=time()
ntimes = 10
for i in range(ntimes):
results = [ r['col1'] for r in
table.where(rng[0]+i <= table.cols.col2 <= rng[1]+i) ]
qtime = (time()-t1)/ntimes
if verbose:
print "query time (int, indexed):", round(qtime, 5)
print "Mrows/s:", round((nrows/1000.)/qtime, 5)
print results
# Query for floating columns
# Query for non-indexed column
if not doqueryidx:
t1=time()
ntimes = 10
for i in range(ntimes):
results = [ r['col3'] for r in
table.where(rng[0]+i <= table.cols.col3 <= rng[1]+i) ]
qtime = (time()-t1)/ntimes
if verbose:
print "query time (float, not indexed):", round(qtime, 5)
print "Mrows/s:", round((nrows/1000.)/qtime, 5)
print results
# Query for indexed column
t1=time()
ntimes = 10
for i in range(ntimes):
results = [ r['col3'] for r in
table.where(rng[0]+i <= table.cols.col4 <= rng[1]+i) ]
qtime = (time()-t1)/ntimes
if verbose:
print "query time (float, indexed):", round(qtime, 5)
print "Mrows/s:", round((nrows/1000.)/qtime, 5)
print results
close_db(con)
def close_db(con):
con.close()
if __name__=="__main__":
import sys
import getopt
try:
import psyco
psyco_imported = 1
except:
psyco_imported = 0
usage = """usage: %s [-v] [-p] [-m] [-c] [-q] [-i] [-z complevel] [-l complib] [-R range] [-n nrows] file
-v verbose
-p use "psyco" if available
-m use random values to fill the table
-q do a query (both indexed and non-indexed version)
-i do a query (exclude non-indexed version)
-c create the database
-z compress with zlib (no compression by default)
-l use complib for compression (zlib used by default)
-R select a range in a field in the form "start,stop" (def "0,10")
-n sets the number of rows (in krows) in each table
\n""" % sys.argv[0]
try:
opts, pargs = getopt.getopt(sys.argv[1:], 'vpmcqiz:l:R:n:')
except:
sys.stderr.write(usage)
sys.exit(0)
# default options
verbose = 0
usepsyco = 0
userandom = 0
docreate = 0
docompress = 0
complib = "zlib"
doquery = 0
doqueryidx = 0
rng = [0, 10]
nrows = 1
# Get the options
for option in opts:
if option[0] == '-v':
verbose = 1
elif option[0] == '-p':
usepsyco = 1
elif option[0] == '-m':
userandom = 1
elif option[0] == '-c':
docreate = 1
createindex = 1
elif option[0] == '-q':
doquery = 1
elif option[0] == '-i':
doqueryidx = 1
elif option[0] == '-z':
docompress = int(option[1])
elif option[0] == '-l':
complib = option[1]
elif option[0] == '-R':
rng = [int(i) for i in option[1].split(",")]
elif option[0] == '-n':
nrows = int(option[1])
# Catch the hdf5 file passed as the last argument
filename = pargs[0]
# The filters chosen
filters = tables.Filters(complevel=docompress, complib=complib)
if verbose:
print "pytables version:", tables.__version__
if userandom:
print "using random values"
if doqueryidx:
print "doing indexed queries only"
if docreate:
if verbose:
print "writing %s krows" % nrows
if psyco_imported and usepsyco:
psyco.bind(create_db)
nrows *= 1000
create_db(filename, nrows)
if doquery:
query_db(filename, rng)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Wrapper for user32.dll in ctypes.
"""
__revision__ = "$Id: user32.py 1299 2013-12-20 09:30:55Z qvasimodo $"
from defines import *
from version import bits
from kernel32 import GetLastError, SetLastError
from gdi32 import POINT, PPOINT, LPPOINT, RECT, PRECT, LPRECT
#==============================================================================
# This is used later on to calculate the list of exported symbols.
_all = None
_all = set(vars().keys())
#==============================================================================
#--- Helpers ------------------------------------------------------------------
def MAKE_WPARAM(wParam):
"""
Convert arguments to the WPARAM type.
Used automatically by SendMessage, PostMessage, etc.
You shouldn't need to call this function.
"""
wParam = ctypes.cast(wParam, LPVOID).value
if wParam is None:
wParam = 0
return wParam
def MAKE_LPARAM(lParam):
"""
Convert arguments to the LPARAM type.
Used automatically by SendMessage, PostMessage, etc.
You shouldn't need to call this function.
"""
return ctypes.cast(lParam, LPARAM)
class __WindowEnumerator (object):
"""
Window enumerator class. Used internally by the window enumeration APIs.
"""
def __init__(self):
self.hwnd = list()
def __call__(self, hwnd, lParam):
## print hwnd # XXX DEBUG
self.hwnd.append(hwnd)
return TRUE
#--- Types --------------------------------------------------------------------
WNDENUMPROC = WINFUNCTYPE(BOOL, HWND, PVOID)
#--- Constants ----------------------------------------------------------------
HWND_DESKTOP = 0
HWND_TOP = 1
HWND_BOTTOM = 1
HWND_TOPMOST = -1
HWND_NOTOPMOST = -2
HWND_MESSAGE = -3
# GetWindowLong / SetWindowLong
GWL_WNDPROC = -4
GWL_HINSTANCE = -6
GWL_HWNDPARENT = -8
GWL_ID = -12
GWL_STYLE = -16
GWL_EXSTYLE = -20
GWL_USERDATA = -21
# GetWindowLongPtr / SetWindowLongPtr
GWLP_WNDPROC = GWL_WNDPROC
GWLP_HINSTANCE = GWL_HINSTANCE
GWLP_HWNDPARENT = GWL_HWNDPARENT
GWLP_STYLE = GWL_STYLE
GWLP_EXSTYLE = GWL_EXSTYLE
GWLP_USERDATA = GWL_USERDATA
GWLP_ID = GWL_ID
# ShowWindow
SW_HIDE = 0
SW_SHOWNORMAL = 1
SW_NORMAL = 1
SW_SHOWMINIMIZED = 2
SW_SHOWMAXIMIZED = 3
SW_MAXIMIZE = 3
SW_SHOWNOACTIVATE = 4
SW_SHOW = 5
SW_MINIMIZE = 6
SW_SHOWMINNOACTIVE = 7
SW_SHOWNA = 8
SW_RESTORE = 9
SW_SHOWDEFAULT = 10
SW_FORCEMINIMIZE = 11
# SendMessageTimeout flags
SMTO_NORMAL = 0
SMTO_BLOCK = 1
SMTO_ABORTIFHUNG = 2
SMTO_NOTIMEOUTIFNOTHUNG = 8
SMTO_ERRORONEXIT = 0x20
# WINDOWPLACEMENT flags
WPF_SETMINPOSITION = 1
WPF_RESTORETOMAXIMIZED = 2
WPF_ASYNCWINDOWPLACEMENT = 4
# GetAncestor flags
GA_PARENT = 1
GA_ROOT = 2
GA_ROOTOWNER = 3
# GetWindow flags
GW_HWNDFIRST = 0
GW_HWNDLAST = 1
GW_HWNDNEXT = 2
GW_HWNDPREV = 3
GW_OWNER = 4
GW_CHILD = 5
GW_ENABLEDPOPUP = 6
#--- Window messages ----------------------------------------------------------
WM_USER = 0x400
WM_APP = 0x800
WM_NULL = 0
WM_CREATE = 1
WM_DESTROY = 2
WM_MOVE = 3
WM_SIZE = 5
WM_ACTIVATE = 6
WA_INACTIVE = 0
WA_ACTIVE = 1
WA_CLICKACTIVE = 2
WM_SETFOCUS = 7
WM_KILLFOCUS = 8
WM_ENABLE = 0x0A
WM_SETREDRAW = 0x0B
WM_SETTEXT = 0x0C
WM_GETTEXT = 0x0D
WM_GETTEXTLENGTH = 0x0E
WM_PAINT = 0x0F
WM_CLOSE = 0x10
WM_QUERYENDSESSION = 0x11
WM_QUIT = 0x12
WM_QUERYOPEN = 0x13
WM_ERASEBKGND = 0x14
WM_SYSCOLORCHANGE = 0x15
WM_ENDSESSION = 0x16
WM_SHOWWINDOW = 0x18
WM_WININICHANGE = 0x1A
WM_SETTINGCHANGE = WM_WININICHANGE
WM_DEVMODECHANGE = 0x1B
WM_ACTIVATEAPP = 0x1C
WM_FONTCHANGE = 0x1D
WM_TIMECHANGE = 0x1E
WM_CANCELMODE = 0x1F
WM_SETCURSOR = 0x20
WM_MOUSEACTIVATE = 0x21
WM_CHILDACTIVATE = 0x22
WM_QUEUESYNC = 0x23
WM_GETMINMAXINFO = 0x24
WM_PAINTICON = 0x26
WM_ICONERASEBKGND = 0x27
WM_NEXTDLGCTL = 0x28
WM_SPOOLERSTATUS = 0x2A
WM_DRAWITEM = 0x2B
WM_MEASUREITEM = 0x2C
WM_DELETEITEM = 0x2D
WM_VKEYTOITEM = 0x2E
WM_CHARTOITEM = 0x2F
WM_SETFONT = 0x30
WM_GETFONT = 0x31
WM_SETHOTKEY = 0x32
WM_GETHOTKEY = 0x33
WM_QUERYDRAGICON = 0x37
WM_COMPAREITEM = 0x39
WM_GETOBJECT = 0x3D
WM_COMPACTING = 0x41
WM_OTHERWINDOWCREATED = 0x42
WM_OTHERWINDOWDESTROYED = 0x43
WM_COMMNOTIFY = 0x44
CN_RECEIVE = 0x1
CN_TRANSMIT = 0x2
CN_EVENT = 0x4
WM_WINDOWPOSCHANGING = 0x46
WM_WINDOWPOSCHANGED = 0x47
WM_POWER = 0x48
PWR_OK = 1
PWR_FAIL = -1
PWR_SUSPENDREQUEST = 1
PWR_SUSPENDRESUME = 2
PWR_CRITICALRESUME = 3
WM_COPYDATA = 0x4A
WM_CANCELJOURNAL = 0x4B
WM_NOTIFY = 0x4E
WM_INPUTLANGCHANGEREQUEST = 0x50
WM_INPUTLANGCHANGE = 0x51
WM_TCARD = 0x52
WM_HELP = 0x53
WM_USERCHANGED = 0x54
WM_NOTIFYFORMAT = 0x55
WM_CONTEXTMENU = 0x7B
WM_STYLECHANGING = 0x7C
WM_STYLECHANGED = 0x7D
WM_DISPLAYCHANGE = 0x7E
WM_GETICON = 0x7F
WM_SETICON = 0x80
WM_NCCREATE = 0x81
WM_NCDESTROY = 0x82
WM_NCCALCSIZE = 0x83
WM_NCHITTEST = 0x84
WM_NCPAINT = 0x85
WM_NCACTIVATE = 0x86
WM_GETDLGCODE = 0x87
WM_SYNCPAINT = 0x88
WM_NCMOUSEMOVE = 0x0A0
WM_NCLBUTTONDOWN = 0x0A1
WM_NCLBUTTONUP = 0x0A2
WM_NCLBUTTONDBLCLK = 0x0A3
WM_NCRBUTTONDOWN = 0x0A4
WM_NCRBUTTONUP = 0x0A5
WM_NCRBUTTONDBLCLK = 0x0A6
WM_NCMBUTTONDOWN = 0x0A7
WM_NCMBUTTONUP = 0x0A8
WM_NCMBUTTONDBLCLK = 0x0A9
WM_KEYFIRST = 0x100
WM_KEYDOWN = 0x100
WM_KEYUP = 0x101
WM_CHAR = 0x102
WM_DEADCHAR = 0x103
WM_SYSKEYDOWN = 0x104
WM_SYSKEYUP = 0x105
WM_SYSCHAR = 0x106
WM_SYSDEADCHAR = 0x107
WM_KEYLAST = 0x108
WM_INITDIALOG = 0x110
WM_COMMAND = 0x111
WM_SYSCOMMAND = 0x112
WM_TIMER = 0x113
WM_HSCROLL = 0x114
WM_VSCROLL = 0x115
WM_INITMENU = 0x116
WM_INITMENUPOPUP = 0x117
WM_MENUSELECT = 0x11F
WM_MENUCHAR = 0x120
WM_ENTERIDLE = 0x121
WM_CTLCOLORMSGBOX = 0x132
WM_CTLCOLOREDIT = 0x133
WM_CTLCOLORLISTBOX = 0x134
WM_CTLCOLORBTN = 0x135
WM_CTLCOLORDLG = 0x136
WM_CTLCOLORSCROLLBAR = 0x137
WM_CTLCOLORSTATIC = 0x138
WM_MOUSEFIRST = 0x200
WM_MOUSEMOVE = 0x200
WM_LBUTTONDOWN = 0x201
WM_LBUTTONUP = 0x202
WM_LBUTTONDBLCLK = 0x203
WM_RBUTTONDOWN = 0x204
WM_RBUTTONUP = 0x205
WM_RBUTTONDBLCLK = 0x206
WM_MBUTTONDOWN = 0x207
WM_MBUTTONUP = 0x208
WM_MBUTTONDBLCLK = 0x209
WM_MOUSELAST = 0x209
WM_PARENTNOTIFY = 0x210
WM_ENTERMENULOOP = 0x211
WM_EXITMENULOOP = 0x212
WM_MDICREATE = 0x220
WM_MDIDESTROY = 0x221
WM_MDIACTIVATE = 0x222
WM_MDIRESTORE = 0x223
WM_MDINEXT = 0x224
WM_MDIMAXIMIZE = 0x225
WM_MDITILE = 0x226
WM_MDICASCADE = 0x227
WM_MDIICONARRANGE = 0x228
WM_MDIGETACTIVE = 0x229
WM_MDISETMENU = 0x230
WM_DROPFILES = 0x233
WM_MDIREFRESHMENU = 0x234
WM_CUT = 0x300
WM_COPY = 0x301
WM_PASTE = 0x302
WM_CLEAR = 0x303
WM_UNDO = 0x304
WM_RENDERFORMAT = 0x305
WM_RENDERALLFORMATS = 0x306
WM_DESTROYCLIPBOARD = 0x307
WM_DRAWCLIPBOARD = 0x308
WM_PAINTCLIPBOARD = 0x309
WM_VSCROLLCLIPBOARD = 0x30A
WM_SIZECLIPBOARD = 0x30B
WM_ASKCBFORMATNAME = 0x30C
WM_CHANGECBCHAIN = 0x30D
WM_HSCROLLCLIPBOARD = 0x30E
WM_QUERYNEWPALETTE = 0x30F
WM_PALETTEISCHANGING = 0x310
WM_PALETTECHANGED = 0x311
WM_HOTKEY = 0x312
WM_PRINT = 0x317
WM_PRINTCLIENT = 0x318
WM_PENWINFIRST = 0x380
WM_PENWINLAST = 0x38F
#--- Structures ---------------------------------------------------------------
# typedef struct _WINDOWPLACEMENT {
# UINT length;
# UINT flags;
# UINT showCmd;
# POINT ptMinPosition;
# POINT ptMaxPosition;
# RECT rcNormalPosition;
# } WINDOWPLACEMENT;
class WINDOWPLACEMENT(Structure):
_fields_ = [
('length', UINT),
('flags', UINT),
('showCmd', UINT),
('ptMinPosition', POINT),
('ptMaxPosition', POINT),
('rcNormalPosition', RECT),
]
PWINDOWPLACEMENT = POINTER(WINDOWPLACEMENT)
LPWINDOWPLACEMENT = PWINDOWPLACEMENT
# typedef struct tagGUITHREADINFO {
# DWORD cbSize;
# DWORD flags;
# HWND hwndActive;
# HWND hwndFocus;
# HWND hwndCapture;
# HWND hwndMenuOwner;
# HWND hwndMoveSize;
# HWND hwndCaret;
# RECT rcCaret;
# } GUITHREADINFO, *PGUITHREADINFO;
class GUITHREADINFO(Structure):
_fields_ = [
('cbSize', DWORD),
('flags', DWORD),
('hwndActive', HWND),
('hwndFocus', HWND),
('hwndCapture', HWND),
('hwndMenuOwner', HWND),
('hwndMoveSize', HWND),
('hwndCaret', HWND),
('rcCaret', RECT),
]
PGUITHREADINFO = POINTER(GUITHREADINFO)
LPGUITHREADINFO = PGUITHREADINFO
#--- High level classes -------------------------------------------------------
# Point() and Rect() are here instead of gdi32.py because they were mainly
# created to handle window coordinates rather than drawing on the screen.
# XXX not sure if these classes should be psyco-optimized,
# it may not work if the user wants to serialize them for some reason
class Point(object):
"""
Python wrapper over the L{POINT} class.
@type x: int
@ivar x: Horizontal coordinate
@type y: int
@ivar y: Vertical coordinate
"""
def __init__(self, x = 0, y = 0):
"""
@see: L{POINT}
@type x: int
@param x: Horizontal coordinate
@type y: int
@param y: Vertical coordinate
"""
self.x = x
self.y = y
def __iter__(self):
return (self.x, self.y).__iter__()
def __len__(self):
return 2
def __getitem__(self, index):
return (self.x, self.y) [index]
def __setitem__(self, index, value):
if index == 0:
self.x = value
elif index == 1:
self.y = value
else:
raise IndexError("index out of range")
@property
def _as_parameter_(self):
"""
Compatibility with ctypes.
Allows passing transparently a Point object to an API call.
"""
return POINT(self.x, self.y)
def screen_to_client(self, hWnd):
"""
Translates window screen coordinates to client coordinates.
@see: L{client_to_screen}, L{translate}
@type hWnd: int or L{HWND} or L{system.Window}
@param hWnd: Window handle.
@rtype: L{Point}
@return: New object containing the translated coordinates.
"""
return ScreenToClient(hWnd, self)
def client_to_screen(self, hWnd):
"""
Translates window client coordinates to screen coordinates.
@see: L{screen_to_client}, L{translate}
@type hWnd: int or L{HWND} or L{system.Window}
@param hWnd: Window handle.
@rtype: L{Point}
@return: New object containing the translated coordinates.
"""
return ClientToScreen(hWnd, self)
def translate(self, hWndFrom = HWND_DESKTOP, hWndTo = HWND_DESKTOP):
"""
Translate coordinates from one window to another.
@note: To translate multiple points it's more efficient to use the
L{MapWindowPoints} function instead.
@see: L{client_to_screen}, L{screen_to_client}
@type hWndFrom: int or L{HWND} or L{system.Window}
@param hWndFrom: Window handle to translate from.
Use C{HWND_DESKTOP} for screen coordinates.
@type hWndTo: int or L{HWND} or L{system.Window}
@param hWndTo: Window handle to translate to.
Use C{HWND_DESKTOP} for screen coordinates.
@rtype: L{Point}
@return: New object containing the translated coordinates.
"""
return MapWindowPoints(hWndFrom, hWndTo, [self])
class Rect(object):
"""
Python wrapper over the L{RECT} class.
@type left: int
@ivar left: Horizontal coordinate for the top left corner.
@type top: int
@ivar top: Vertical coordinate for the top left corner.
@type right: int
@ivar right: Horizontal coordinate for the bottom right corner.
@type bottom: int
@ivar bottom: Vertical coordinate for the bottom right corner.
@type width: int
@ivar width: Width in pixels. Same as C{right - left}.
@type height: int
@ivar height: Height in pixels. Same as C{bottom - top}.
"""
def __init__(self, left = 0, top = 0, right = 0, bottom = 0):
"""
@see: L{RECT}
@type left: int
@param left: Horizontal coordinate for the top left corner.
@type top: int
@param top: Vertical coordinate for the top left corner.
@type right: int
@param right: Horizontal coordinate for the bottom right corner.
@type bottom: int
@param bottom: Vertical coordinate for the bottom right corner.
"""
self.left = left
self.top = top
self.right = right
self.bottom = bottom
def __iter__(self):
return (self.left, self.top, self.right, self.bottom).__iter__()
def __len__(self):
return 2
def __getitem__(self, index):
return (self.left, self.top, self.right, self.bottom) [index]
def __setitem__(self, index, value):
if index == 0:
self.left = value
elif index == 1:
self.top = value
elif index == 2:
self.right = value
elif index == 3:
self.bottom = value
else:
raise IndexError, "index out of range"
@property
def _as_parameter_(self):
"""
Compatibility with ctypes.
Allows passing transparently a Point object to an API call.
"""
return RECT(self.left, self.top, self.right, self.bottom)
def __get_width(self):
return self.right - self.left
def __get_height(self):
return self.bottom - self.top
def __set_width(self, value):
self.right = value - self.left
def __set_height(self, value):
self.bottom = value - self.top
width = property(__get_width, __set_width)
height = property(__get_height, __set_height)
def screen_to_client(self, hWnd):
"""
Translates window screen coordinates to client coordinates.
@see: L{client_to_screen}, L{translate}
@type hWnd: int or L{HWND} or L{system.Window}
@param hWnd: Window handle.
@rtype: L{Rect}
@return: New object containing the translated coordinates.
"""
topleft = ScreenToClient(hWnd, (self.left, self.top))
bottomright = ScreenToClient(hWnd, (self.bottom, self.right))
return Rect( topleft.x, topleft.y, bottomright.x, bottomright.y )
def client_to_screen(self, hWnd):
"""
Translates window client coordinates to screen coordinates.
@see: L{screen_to_client}, L{translate}
@type hWnd: int or L{HWND} or L{system.Window}
@param hWnd: Window handle.
@rtype: L{Rect}
@return: New object containing the translated coordinates.
"""
topleft = ClientToScreen(hWnd, (self.left, self.top))
bottomright = ClientToScreen(hWnd, (self.bottom, self.right))
return Rect( topleft.x, topleft.y, bottomright.x, bottomright.y )
def translate(self, hWndFrom = HWND_DESKTOP, hWndTo = HWND_DESKTOP):
"""
Translate coordinates from one window to another.
@see: L{client_to_screen}, L{screen_to_client}
@type hWndFrom: int or L{HWND} or L{system.Window}
@param hWndFrom: Window handle to translate from.
Use C{HWND_DESKTOP} for screen coordinates.
@type hWndTo: int or L{HWND} or L{system.Window}
@param hWndTo: Window handle to translate to.
Use C{HWND_DESKTOP} for screen coordinates.
@rtype: L{Rect}
@return: New object containing the translated coordinates.
"""
points = [ (self.left, self.top), (self.right, self.bottom) ]
return MapWindowPoints(hWndFrom, hWndTo, points)
class WindowPlacement(object):
"""
Python wrapper over the L{WINDOWPLACEMENT} class.
"""
def __init__(self, wp = None):
"""
@type wp: L{WindowPlacement} or L{WINDOWPLACEMENT}
@param wp: Another window placement object.
"""
# Initialize all properties with empty values.
self.flags = 0
self.showCmd = 0
self.ptMinPosition = Point()
self.ptMaxPosition = Point()
self.rcNormalPosition = Rect()
# If a window placement was given copy it's properties.
if wp:
self.flags = wp.flags
self.showCmd = wp.showCmd
self.ptMinPosition = Point( wp.ptMinPosition.x, wp.ptMinPosition.y )
self.ptMaxPosition = Point( wp.ptMaxPosition.x, wp.ptMaxPosition.y )
self.rcNormalPosition = Rect(
wp.rcNormalPosition.left,
wp.rcNormalPosition.top,
wp.rcNormalPosition.right,
wp.rcNormalPosition.bottom,
)
@property
def _as_parameter_(self):
"""
Compatibility with ctypes.
Allows passing transparently a Point object to an API call.
"""
wp = WINDOWPLACEMENT()
wp.length = sizeof(wp)
wp.flags = self.flags
wp.showCmd = self.showCmd
wp.ptMinPosition.x = self.ptMinPosition.x
wp.ptMinPosition.y = self.ptMinPosition.y
wp.ptMaxPosition.x = self.ptMaxPosition.x
wp.ptMaxPosition.y = self.ptMaxPosition.y
wp.rcNormalPosition.left = self.rcNormalPosition.left
wp.rcNormalPosition.top = self.rcNormalPosition.top
wp.rcNormalPosition.right = self.rcNormalPosition.right
wp.rcNormalPosition.bottom = self.rcNormalPosition.bottom
return wp
#==============================================================================
# This calculates the list of exported symbols.
_all = set(vars().keys()).difference(_all)
__all__ = [_x for _x in _all if not _x.startswith('_')]
__all__.sort()
#==============================================================================
|
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2015 Diamond Light Source <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Implementation of Jon Diprose's ImageUploader in Python for
# formulatrix plate imagers, takes outputted xml / image files and
# puts them in the correct location, and adds an entry to SynchWeb
import json
import time
import glob
import re
import os
import sys
import atexit
import signal
import errno
import subprocess
import logging
import logging.handlers
import MySQLdb
from PIL import Image
from shutil import copyfile
import xml.etree.ElementTree as ET
import shutil
import getopt
class MySQL:
def __init__(self, user, pw, db, host='127.0.0.1', port=3306):
self._conn = MySQLdb.connect(host=host, user=user, passwd=pw, db=db, port=port)
self._conn.autocommit(1)
self._conn.ping(True)
self._cur = self._conn.cursor(MySQLdb.cursors.DictCursor)
def __del__(self):
if self._cur is not None:
self._cur.close()
if self._conn is not None:
self._conn.close()
def pq(self, query, args=[]):
res = self._cur.execute(query, args)
rows = []
for r in self._cur:
rows.append(r)
return rows if rows else []
def id(self):
return self._cur.connection.insert_id()
class FormulatrixUploader:
_running = True
def __init__(self, db=None, config=None):
self.db = db
self.config = config
for d in ['processed', 'nosample']:
if not os.path.exists(config['holding_dir']+'/'+d):
os.mkdir(config['holding_dir']+'/'+d)
def _move_dir(self, src_dir, target_dir):
"""This will overwrite any existing files with the same names.
Make sure files are completed (written, closed) before moving them."""
logging.getLogger().debug("trying to glob.glob('%s/*')" % src_dir)
files = glob.glob("%s/*" % src_dir)
for f in files:
st = os.stat(f)
if time.time() - st.st_mtime > 10 and st.st_size > 0:
new_f = os.path.join(target_dir, os.path.basename(f))
logging.getLogger().debug('copy: %s to %s' % (f, new_f))
try:
shutil.copyfile(f, new_f)
try:
os.unlink(f)
except IOError as e:
logging.getLogger().error('Error deleting image file %s' % f)
except IOError as e:
logging.getLogger().error('Error copying image file %s to %s' % (f, new_f))
else:
logging.getLogger().debug('Not moving file %s yet as it there is a handle on it' % f)
# Remove the src_dir if empty
self._rmdir(src_dir)
def _rmdir(self, dir):
"""rmdir the dir (only works if it's empty)"""
try:
os.rmdir(dir)
except OSError as e:
pass
def _get_most_recent_container_dirs(self, dirs):
"""Generate a dict of all containers with their most recent z-slice directories (dates)"""
containers = dict()
for dir in dirs:
dir_containers = glob.glob(dir+"/*/")
for dir_container in dir_containers:
barcode = os.path.basename(os.path.abspath(dir_container))
containers[barcode] = dir
return containers
def _get_visit_dir(self, container):
visit = container['visit']
proposal = visit[ : visit.index('-')]
new_root = '{root}/{proposal}/{visit}'.format(root=self.config['upload_dir'], proposal=proposal, visit=visit)
old_root = '{root}/{year}/{visit}'.format(root=self.config['upload_dir_old'], year=container['year'], visit=visit)
the_root = None
if os.path.exists(new_root):
the_root = new_root
elif os.path.exists(old_root):
the_root = old_root
else:
logging.getLogger().error('Visit directory for visit doesnt exist, tried %s and %s' % (new_root, old_root))
return None
return the_root
def _make_dirs(self, path):
if not os.path.exists(path):
try:
os.makedirs(path)
if config['web_user']:
subprocess.call(['/usr/bin/setfacl', '-R', '-m', 'u:'+config['web_user']+':rwx', path]);
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(new_path):
pass
elif exc.errno == errno.EACCES:
logging.getLogger().error("%s - %s" % (exc.strerror, new_path))
return False
else:
raise
return True
def handle_zslice_images(self):
"""Move the z-slice images from the configured 'archive_dir' to their target_dir which is a folder
named by the container barcode in the tmp folder in the container's visit dir."""
date_dirs = glob.glob(self.config['archive_dir']+"/*/")
container_dict = self._get_most_recent_container_dirs(date_dirs)
# Move the files in the most recent container imaging dirs within the archive_dir
for barcode in container_dict:
container = self._get_container_by_barcode(barcode)
if container['visit'] is None:
logging.getLogger().error('Container barcode %s has no session' % (str(barcode)) )
continue
# Determine the container's target directory
visit_dir = self._get_visit_dir(container)
if visit_dir is None:
continue
target_dir = os.path.join(visit_dir, "tmp", barcode)
if not self._make_dirs(target_dir):
continue
# Move all the files (overwrite any existing files) in the barcode dir to the target_dir
src_dir = os.path.join(container_dict[barcode], barcode)
self._move_dir(src_dir, target_dir)
# Delete all non-recent container imaging dirs within the archive_dir
recent_container_dirs = []
for barcode in container_dict:
recent_container_dirs.append(os.path.join(container_dict[barcode], barcode))
all_container_dirs = glob.glob(self.config['archive_dir']+"/*/*/")
for dir in all_container_dirs:
# Remove the last character ("/") from the dir when comparing
if dir[:-1] not in recent_container_dirs:
try:
logging.getLogger().debug("trying to rmtree(%s)" % (dir))
shutil.rmtree(dir)
except OSError as oe:
logging.getLogger().error("OSError in shutil.rmtree('%s')" % dir)
# Remove date folders if empty
for dir in date_dirs:
self._rmdir(dir)
def handle_ef_images(self):
"""Move extended focus (EF) images from the configuration holding_dir to
imaging/{containerid}/{inspectionid} within the container's visit dir.
Also create thumbnail images."""
files = glob.glob(self.config['holding_dir']+"/*EF*.xml")
for xml in files:
logging.getLogger().debug(xml)
st = os.stat(xml)
image = xml.replace('.xml', '.jpg')
if not os.path.exists(image):
logging.getLogger().error('Corresponding image not found for %s expected %s' % (str(xml), str(image)) )
continue
if time.time() - st.st_mtime > 10 and st.st_size > 0:
tree = ET.parse(xml)
root = tree.getroot()
# deal with xml namespace
ns = root.tag.split('}')[0].strip('{')
nss = { 'oppf': ns }
inspectionid = re.sub('\-.*', '', root.find('oppf:ImagingId', nss).text)
logging.getLogger().debug('inspection: %s' % str(inspectionid))
container = self._get_container(inspectionid)
if container is None:
continue
# Check if the visit dir exists yet
the_root = self._get_visit_dir(container)
if the_root is None:
continue
# Keep images in visit/imaging/containerid/inspectionid
new_path = '{the_root}/imaging/{containerid}/{inspectionid}'.format(the_root=the_root, containerid=container['containerid'], inspectionid=inspectionid)
if not self._make_dirs(new_path):
continue
position = self._get_position(root.find('oppf:Drop', nss).text, container['containertype'])
if position is None:
logging.getLogger().error('Could not match drop: %s to position: %s' % (root.find('oppf:Drop', nss).text, container['containertype']) )
continue
logging.getLogger().debug('Drop: %s position: %s' % (root.find('oppf:Drop', nss).text, position))
sampleid = self._get_sampleid(position, container['containerid'])
if sampleid is None:
self._move_files(image, xml, 'nosample')
continue
mppx = float(root.find('oppf:SizeInMicrons', nss).find('oppf:Width', nss).text) / float(root.find('oppf:SizeInPixels', nss).find('oppf:Width', nss).text)
mppy = float(root.find('oppf:SizeInMicrons', nss).find('oppf:Height', nss).text) / float(root.find('oppf:SizeInPixels', nss).find('oppf:Height', nss).text)
db.pq("""INSERT INTO BLSampleImage (blsampleid, micronsperpixelx, micronsperpixely, containerinspectionid)
VALUES (%s,%s,%s,%s)""", [sampleid, mppx, mppy, inspectionid])
logging.getLogger().debug("INSERT INTO BLSampleImage "\
"(blsampleid, micronsperpixelx, micronsperpixely, containerinspectionid) "\
"VALUES (%s,%s,%s,%s)" % (str(sampleid), str(mppx), str(mppy), str(inspectionid)))
iid = db.id()
# Use blsampleimageid as file name as we are sure this is unique
new_file = '{path}/{iid}.jpg'.format(path=new_path, iid=iid)
db.pq("""UPDATE BLSampleImage set imagefullpath=%s WHERE blsampleimageid=%s""", [new_file, iid])
logging.getLogger().debug("UPDATE BLSampleImage set imagefullpath=%s WHERE blsampleimageid=%s" % (new_file, str(iid)))
# move image
logging.getLogger().debug('copy: %s to %s' % (image, new_file))
try:
copyfile(image, new_file)
# create a thumbnail
file, ext = os.path.splitext(new_file)
try:
im = Image.open(new_file)
im.thumbnail((config['thumb_width'], config['thumb_height']))
try:
im.save(file+'th'+ext)
except IOError as e:
logging.getLogger().error('Error saving image file %s' % file+'th'+ext)
# clear up - should be in a try ... except?
#self._move_files(image, xml, 'processed')
try:
os.unlink(image)
except IOError as e:
logging.getLogger().error('Error deleting image file %s' % image)
try:
os.unlink(xml)
except IOError as e:
logging.getLogger().error('Error deleting XML file %s' % xml)
except IOError as e:
logging.getLogger().error('Error opening image file %s' % new_file)
except IOError as e:
logging.getLogger().error('Error copying image file %s to %s' % (image, new_file))
def _move_files(self, image, xml, path):
for f in [image, xml]:
os.rename(f, f.replace(self.config['holding_dir'], self.config['holding_dir']+'/'+path))
logging.getLogger().debug('move %s %s' % (f, f.replace(self.config['holding_dir'], self.config['holding_dir']+'/'+path)))
def _get_container_by_barcode(self, barcode):
container = self.db.pq("""SELECT c.containertype, c.containerid, c.sessionid, CONCAT(p.proposalcode, p.proposalnumber, '-', bs.visit_number) as visit, DATE_FORMAT(c.bltimestamp, '%%Y') as year
FROM Container c
LEFT OUTER JOIN BLSession bs ON bs.sessionid = c.sessionid
LEFT OUTER JOIN Proposal p ON p.proposalid = bs.proposalid
WHERE c.barcode=%s
LIMIT 1""", [barcode])
if not len(container):
logging.getLogger().error('Couldn\'t find container in database for barcode %s' % str(barcode))
return None
logging.getLogger().debug(str(container[0]['visit']))
return container[0]
def _get_container(self, inspectionid):
container = self.db.pq("""SELECT c.containertype, c.containerid, c.sessionid, CONCAT(p.proposalcode, p.proposalnumber, '-', bs.visit_number) as visit, DATE_FORMAT(c.bltimestamp, '%%Y') as year
FROM Container c
INNER JOIN ContainerInspection ci ON ci.containerid = c.containerid
INNER JOIN Dewar d ON d.dewarid = c.dewarid
INNER JOIN Shipping s ON s.shippingid = d.shippingid
INNER JOIN Proposal p ON p.proposalid = s.proposalid
LEFT OUTER JOIN BLSession bs ON bs.sessionid = c.sessionid
WHERE ci.containerinspectionid=%s
LIMIT 1""", [inspectionid])
if not len(container):
logging.getLogger().error('Couldn\'t find container for inspectionid %s' % str(inspectionid))
return
logging.getLogger().debug(str(container))
if not container[0]['sessionid']:
logging.getLogger().error('Container %s has no sessionid. inspectionid is %s ' % (str(container[0]['containerid']), str(inspectionid)))
return
return container[0]
def _get_position(self, text_position, platetype):
well, drop = text_position.split('.')
drop = int(drop)
row = ord(well[0])-65
col = int(well[1:])-1
# Need to know what type of plate this is to know how many columns its got
# This should be in the database, currently in json format embedded in this collection:
# http://ispyb.diamond.ac.uk/beta/client/js/modules/shipment/collections/platetypes.js
if not platetype in self.config['types']:
logging.getLogger().error('Unknown plate type: %s' % platetype)
return
ty = self.config['types'][platetype]
# Position is a linear sequence left to right across the plate
return (ty['well_per_row']*row*ty['drops_per_well']) + (col*ty['drops_per_well']) + (drop-1) + 1
# Return a blsampleid from a position and containerid
def _get_sampleid(self, position, containerid):
sample = self.db.pq("""SELECT s.blsampleid, s.name, s.location
FROM BLSample s
INNER JOIN Container c ON c.containerid = s.containerid
WHERE s.location = %s AND c.containerid = %s
LIMIT 1""", [position, containerid])
if not len(sample):
logging.getLogger().error('Couldn\'t find a blsample for containerid: %s, position: %s', str(containerid), str(position))
return
logging.getLogger().debug(str(sample[0]))
return sample[0]['blsampleid']
def kill_handler(sig,frame):
hostname = os.uname()[1]
logging.getLogger().warning("%s: got SIGTERM on %s :-O" % (sys.argv[0], hostname))
logging.shutdown()
os._exit(-1)
def set_logging(logs):
levels_dict = {"debug" : logging.DEBUG, "info" : logging.INFO, "warning" : logging.WARNING, "error" : logging.ERROR, "critical" : logging.CRITICAL}
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
for log_name in logs:
handler = None
if log_name == "syslog":
handler = logging.handlers.SysLogHandler(address=(logs[log_name]['host'], logs[log_name]['port']))
elif log_name == "rotating_file":
handler = logging.handlers.RotatingFileHandler(filename=logs[log_name]['filename'], maxBytes=logs[log_name]['max_bytes'], backupCount=logs[log_name]['no_files'])
else:
sys.exit("Invalid logging mechanism defined in config: %s. (Valid options are syslog and rotating_file.)" % log_name)
handler.setFormatter(logging.Formatter(logs[log_name]['format']))
level = logs[log_name]['level']
if levels_dict[level]:
handler.setLevel(levels_dict[level])
else:
handler.setLevel(logging.WARNING)
logger.addHandler(handler)
def clean_up():
global pid_file
os.unlink(pid_file)
logging.getLogger().info("%s: exiting python interpreter :-(" % sys.argv[0])
logging.shutdown()
def print_usage():
usage = """Script for uploading image files from Rock Imager into the correct session directories.
Syntax: %s -c <configuration file> [-rp]
Arguments:
-h|--help : display this help
-c|--conf <conf file> : use the given configuration file, default is config_ef.json""" % sys.argv[0]
print usage
global pid_file
pid_file = None
conf_file = 'config_ef.json'
log_file = None
# Get command-line arguments
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "hc:", ["help", "conf"])
except getopt.GetoptError:
print_usage()
sys.exit(2)
for o,a in opts:
if o in ("-h", "--help"):
print_usage()
sys.exit()
elif o in ("-c", "--conf"):
conf_file = a
cf = open(conf_file)
config = json.load(cf)
cf.close()
if config['task'] not in ('EF', 'Z'):
print_usage()
sys.exit()
set_logging(config['logging'])
signal.signal(signal.SIGTERM, kill_handler)
# Create a pid file
pid_file = config['pid_file']
if os.path.isfile(pid_file):
logging.getLogger().error("%s already exists, exiting" % pid_file)
sys.exit()
if pid_file != None:
try:
f = open(pid_file, 'w')
f.write(str(os.getpid()))
f.close()
except:
logging.getLogger().error("Unable to write to pid file %s" % pid_file)
atexit.register(clean_up) # Remove pid file when exiting
atexit.register(logging.shutdown)
db = MySQL(user=config['user'], pw=config['pw'], db=config['db'], host=config['host'], port=int(config['port']))
uploader = FormulatrixUploader(db=db, config=config)
if config['task'] == 'EF':
uploader.handle_ef_images()
elif config['task'] == 'Z':
uploader.handle_zslice_images()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from typing import List, Optional
from flask_appbuilder import Model
from flask_appbuilder.security.sqla.models import User
from superset import appbuilder
from superset.connectors.sqla.models import SqlaTable, sqlatable_user
from superset.models.core import Database
from superset.models.dashboard import (
Dashboard,
dashboard_slices,
dashboard_user,
DashboardRoles,
)
from superset.models.slice import Slice, slice_user
from tests.integration_tests.dashboards.dashboard_test_utils import (
random_slug,
random_str,
random_title,
)
logger = logging.getLogger(__name__)
session = appbuilder.get_session
inserted_dashboards_ids = []
inserted_databases_ids = []
inserted_sqltables_ids = []
inserted_slices_ids = []
def create_dashboard_to_db(
dashboard_title: Optional[str] = None,
slug: Optional[str] = None,
published: bool = False,
owners: Optional[List[User]] = None,
slices: Optional[List[Slice]] = None,
css: str = "",
json_metadata: str = "",
position_json: str = "",
) -> Dashboard:
dashboard = create_dashboard(
dashboard_title,
slug,
published,
owners,
slices,
css,
json_metadata,
position_json,
)
insert_model(dashboard)
inserted_dashboards_ids.append(dashboard.id)
return dashboard
def create_dashboard(
dashboard_title: Optional[str] = None,
slug: Optional[str] = None,
published: bool = False,
owners: Optional[List[User]] = None,
slices: Optional[List[Slice]] = None,
css: str = "",
json_metadata: str = "",
position_json: str = "",
) -> Dashboard:
dashboard_title = dashboard_title if dashboard_title is not None else random_title()
slug = slug if slug is not None else random_slug()
owners = owners if owners is not None else []
slices = slices if slices is not None else []
return Dashboard(
dashboard_title=dashboard_title,
slug=slug,
published=published,
owners=owners,
css=css,
position_json=position_json,
json_metadata=json_metadata,
slices=slices,
)
def insert_model(dashboard: Model) -> None:
session.add(dashboard)
session.commit()
session.refresh(dashboard)
def create_slice_to_db(
name: Optional[str] = None,
datasource_id: Optional[int] = None,
owners: Optional[List[User]] = None,
) -> Slice:
slice_ = create_slice(datasource_id, name=name, owners=owners)
insert_model(slice_)
inserted_slices_ids.append(slice_.id)
return slice_
def create_slice(
datasource_id: Optional[int] = None,
datasource: Optional[SqlaTable] = None,
name: Optional[str] = None,
owners: Optional[List[User]] = None,
) -> Slice:
name = name if name is not None else random_str()
owners = owners if owners is not None else []
datasource_type = "table"
if datasource:
return Slice(
slice_name=name,
table=datasource,
owners=owners,
datasource_type=datasource_type,
)
datasource_id = (
datasource_id
if datasource_id is not None
else create_datasource_table_to_db(name=name + "_table").id
)
return Slice(
slice_name=name,
datasource_id=datasource_id,
owners=owners,
datasource_type=datasource_type,
)
def create_datasource_table_to_db(
name: Optional[str] = None,
db_id: Optional[int] = None,
owners: Optional[List[User]] = None,
) -> SqlaTable:
sqltable = create_datasource_table(name, db_id, owners=owners)
insert_model(sqltable)
inserted_sqltables_ids.append(sqltable.id)
return sqltable
def create_datasource_table(
name: Optional[str] = None,
db_id: Optional[int] = None,
database: Optional[Database] = None,
owners: Optional[List[User]] = None,
) -> SqlaTable:
name = name if name is not None else random_str()
owners = owners if owners is not None else []
if database:
return SqlaTable(table_name=name, database=database, owners=owners)
db_id = db_id if db_id is not None else create_database_to_db(name=name + "_db").id
return SqlaTable(table_name=name, database_id=db_id, owners=owners)
def create_database_to_db(name: Optional[str] = None) -> Database:
database = create_database(name)
insert_model(database)
inserted_databases_ids.append(database.id)
return database
def create_database(name: Optional[str] = None) -> Database:
name = name if name is not None else random_str()
return Database(database_name=name, sqlalchemy_uri="sqlite:///:memory:")
def delete_all_inserted_objects() -> None:
delete_all_inserted_dashboards()
delete_all_inserted_slices()
delete_all_inserted_tables()
delete_all_inserted_dbs()
def delete_all_inserted_dashboards():
try:
dashboards_to_delete: List[Dashboard] = session.query(Dashboard).filter(
Dashboard.id.in_(inserted_dashboards_ids)
).all()
for dashboard in dashboards_to_delete:
try:
delete_dashboard(dashboard, False)
except Exception as ex:
logger.error(f"failed to delete {dashboard.id}", exc_info=True)
raise ex
if len(inserted_dashboards_ids) > 0:
session.commit()
inserted_dashboards_ids.clear()
except Exception as ex2:
logger.error("delete_all_inserted_dashboards failed", exc_info=True)
raise ex2
def delete_dashboard(dashboard: Dashboard, do_commit: bool = False) -> None:
logger.info(f"deleting dashboard{dashboard.id}")
delete_dashboard_roles_associations(dashboard)
delete_dashboard_users_associations(dashboard)
delete_dashboard_slices_associations(dashboard)
session.delete(dashboard)
if do_commit:
session.commit()
def delete_dashboard_users_associations(dashboard: Dashboard) -> None:
session.execute(
dashboard_user.delete().where(dashboard_user.c.dashboard_id == dashboard.id)
)
def delete_dashboard_roles_associations(dashboard: Dashboard) -> None:
session.execute(
DashboardRoles.delete().where(DashboardRoles.c.dashboard_id == dashboard.id)
)
def delete_dashboard_slices_associations(dashboard: Dashboard) -> None:
session.execute(
dashboard_slices.delete().where(dashboard_slices.c.dashboard_id == dashboard.id)
)
def delete_all_inserted_slices():
try:
slices_to_delete: List[Slice] = session.query(Slice).filter(
Slice.id.in_(inserted_slices_ids)
).all()
for slice in slices_to_delete:
try:
delete_slice(slice, False)
except Exception as ex:
logger.error(f"failed to delete {slice.id}", exc_info=True)
raise ex
if len(inserted_slices_ids) > 0:
session.commit()
inserted_slices_ids.clear()
except Exception as ex2:
logger.error("delete_all_inserted_slices failed", exc_info=True)
raise ex2
def delete_slice(slice_: Slice, do_commit: bool = False) -> None:
logger.info(f"deleting slice{slice_.id}")
delete_slice_users_associations(slice_)
session.delete(slice_)
if do_commit:
session.commit()
def delete_slice_users_associations(slice_: Slice) -> None:
session.execute(slice_user.delete().where(slice_user.c.slice_id == slice_.id))
def delete_all_inserted_tables():
try:
tables_to_delete: List[SqlaTable] = session.query(SqlaTable).filter(
SqlaTable.id.in_(inserted_sqltables_ids)
).all()
for table in tables_to_delete:
try:
delete_sqltable(table, False)
except Exception as ex:
logger.error(f"failed to delete {table.id}", exc_info=True)
raise ex
if len(inserted_sqltables_ids) > 0:
session.commit()
inserted_sqltables_ids.clear()
except Exception as ex2:
logger.error("delete_all_inserted_tables failed", exc_info=True)
raise ex2
def delete_sqltable(table: SqlaTable, do_commit: bool = False) -> None:
logger.info(f"deleting table{table.id}")
delete_table_users_associations(table)
session.delete(table)
if do_commit:
session.commit()
def delete_table_users_associations(table: SqlaTable) -> None:
session.execute(
sqlatable_user.delete().where(sqlatable_user.c.table_id == table.id)
)
def delete_all_inserted_dbs():
try:
dbs_to_delete: List[Database] = session.query(Database).filter(
Database.id.in_(inserted_databases_ids)
).all()
for db in dbs_to_delete:
try:
delete_database(db, False)
except Exception as ex:
logger.error(f"failed to delete {db.id}", exc_info=True)
raise ex
if len(inserted_databases_ids) > 0:
session.commit()
inserted_databases_ids.clear()
except Exception as ex2:
logger.error("delete_all_inserted_databases failed", exc_info=True)
raise ex2
def delete_database(database: Database, do_commit: bool = False) -> None:
logger.info(f"deleting database{database.id}")
session.delete(database)
if do_commit:
session.commit()
|
|
"""
Hadoop task
Install and cofigure hadoop -- requires that Ceph is already installed and
already running.
"""
from cStringIO import StringIO
import contextlib
import logging
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.parallel import parallel
from ..orchestra import run
log = logging.getLogger(__name__)
@contextlib.contextmanager
def validate_cluster(ctx):
"""
Check that there is exactly one master and at least one slave configured
"""
log.info('Vaidating Hadoop configuration')
slaves = ctx.cluster.only(teuthology.is_type('hadoop.slave'))
if (len(slaves.remotes) < 1):
raise Exception("At least one hadoop.slave must be specified")
else:
log.info(str(len(slaves.remotes)) + " slaves specified")
masters = ctx.cluster.only(teuthology.is_type('hadoop.master'))
if (len(masters.remotes) == 1):
pass
else:
raise Exception(
"Exactly one hadoop.master must be specified. Currently there are "
+ str(len(masters.remotes)))
try:
yield
finally:
pass
def write_hadoop_env(ctx):
"""
Add required entries to conf/hadoop-env.sh
"""
hadoop_envfile = "{tdir}/apache_hadoop/conf/hadoop-env.sh".format(
tdir=teuthology.get_testdir(ctx))
hadoop_nodes = ctx.cluster.only(teuthology.is_type('hadoop'))
for remote in hadoop_nodes.remotes:
teuthology.write_file(remote, hadoop_envfile,
'''export JAVA_HOME=/usr/lib/jvm/default-java
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/share/java/libcephfs.jar:{tdir}/apache_hadoop/build/hadoop-core*.jar:{tdir}/inktank_hadoop/build/hadoop-cephfs.jar
export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
'''.format(tdir=teuthology.get_testdir(ctx)))
log.info("wrote file: " + hadoop_envfile + " to host: " + str(remote))
def write_core_site(ctx, config):
"""
Add required entries to conf/core-site.xml
"""
testdir = teuthology.get_testdir(ctx)
core_site_file = "{tdir}/apache_hadoop/conf/core-site.xml".format(
tdir=testdir)
hadoop_nodes = ctx.cluster.only(teuthology.is_type('hadoop'))
for remote in hadoop_nodes.remotes:
# check the config to see if we should use hdfs or ceph
default_fs_string = ""
if config.get('hdfs'):
default_fs_string = 'hdfs://{master_ip}:54310'.format(
master_ip=get_hadoop_master_ip(ctx))
else:
default_fs_string = 'ceph:///'
teuthology.write_file(remote, core_site_file,
'''<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>hadoop.tmp.dir</name>
<value>/tmp/hadoop/tmp</value>
</property>
<property>
<name>fs.default.name</name>
<value>{default_fs}</value>
</property>
<property>
<name>ceph.conf.file</name>
<value>/etc/ceph/ceph.conf</value>
</property>
<property>
<name>fs.ceph.impl</name>
<value>org.apache.hadoop.fs.ceph.CephFileSystem</value>
</property>
</configuration>
'''.format(tdir=teuthology.get_testdir(ctx), default_fs=default_fs_string))
log.info("wrote file: " + core_site_file + " to host: " + str(remote))
def get_hadoop_master_ip(ctx):
"""
finds the hadoop.master in the ctx and then pulls out just the IP address
"""
remote, _ = _get_master(ctx)
master_name, master_port = remote.ssh.get_transport().getpeername()
log.info('master name: {name} port {port}'.format(name=master_name,
port=master_port))
return master_name
def write_mapred_site(ctx):
"""
Add required entries to conf/mapred-site.xml
"""
mapred_site_file = "{tdir}/apache_hadoop/conf/mapred-site.xml".format(
tdir=teuthology.get_testdir(ctx))
master_ip = get_hadoop_master_ip(ctx)
log.info('adding host {remote} as jobtracker'.format(remote=master_ip))
hadoop_nodes = ctx.cluster.only(teuthology.is_type('hadoop'))
for remote in hadoop_nodes.remotes:
teuthology.write_file(remote, mapred_site_file,
'''<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>mapred.job.tracker</name>
<value>{remote}:54311</value>
</property>
</configuration>
'''.format(remote=master_ip))
log.info("wrote file: " + mapred_site_file + " to host: " + str(remote))
def write_hdfs_site(ctx):
"""
Add required entries to conf/hdfs-site.xml
"""
hdfs_site_file = "{tdir}/apache_hadoop/conf/hdfs-site.xml".format(
tdir=teuthology.get_testdir(ctx))
hadoop_nodes = ctx.cluster.only(teuthology.is_type('hadoop'))
for remote in hadoop_nodes.remotes:
teuthology.write_file(remote, hdfs_site_file,
'''<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
</configuration>
''')
log.info("wrote file: " + hdfs_site_file + " to host: " + str(remote))
def write_slaves(ctx):
"""
Add required entries to conf/slaves
These nodes host TaskTrackers and DataNodes
"""
log.info('Setting up slave nodes...')
slaves_file = "{tdir}/apache_hadoop/conf/slaves".format(
tdir=teuthology.get_testdir(ctx))
tmp_file = StringIO()
slaves = ctx.cluster.only(teuthology.is_type('hadoop.slave'))
for remote in slaves.remotes:
tmp_file.write('{remote}\n'.format(
remote=remote.ssh.get_transport().getpeername()[0]))
tmp_file.seek(0)
hadoop_nodes = ctx.cluster.only(teuthology.is_type('hadoop'))
for remote in hadoop_nodes.remotes:
teuthology.write_file(remote=remote, path=slaves_file, data=tmp_file)
tmp_file.seek(0)
log.info("wrote file: " + slaves_file + " to host: " + str(remote))
def write_master(ctx):
"""
Add required entries to conf/masters
These nodes host JobTrackers and Namenodes
"""
masters_file = "{tdir}/apache_hadoop/conf/masters".format(
tdir=teuthology.get_testdir(ctx))
master = _get_master(ctx)
master_remote, _ = master
hadoop_nodes = ctx.cluster.only(teuthology.is_type('hadoop'))
for remote in hadoop_nodes.remotes:
teuthology.write_file(remote, masters_file, '{master_host}\n'.format(
master_host=master_remote.ssh.get_transport().getpeername()[0]))
log.info("wrote file: " + masters_file + " to host: " + str(remote))
def _configure_hadoop(ctx, config):
"""
Call the various functions that configure Hadoop
"""
log.info('writing out config files')
write_hadoop_env(ctx)
write_core_site(ctx, config)
write_mapred_site(ctx)
write_hdfs_site(ctx)
write_slaves(ctx)
write_master(ctx)
@contextlib.contextmanager
def configure_hadoop(ctx, config):
"""
Call the various functions that configure Hadoop, and handle the
startup of hadoop and clean up of temporary files if this is an hdfs.
"""
_configure_hadoop(ctx, config)
log.info('config.get(hdfs): {hdfs}'.format(hdfs=config.get('hdfs')))
if config.get('hdfs'):
log.info('hdfs option specified. Setting up hdfs')
# let's run this from the master
master = _get_master(ctx)
remote, _ = master
remote.run(
args=["{tdir}/apache_hadoop/bin/hadoop".format(
tdir=teuthology.get_testdir(ctx)),
"namenode",
"-format"],
wait=True,
)
log.info('done setting up hadoop')
try:
yield
finally:
log.info('Removing hdfs directory')
run.wait(
ctx.cluster.run(
args=[
'rm',
'-rf',
'/tmp/hadoop',
],
wait=False,
),
)
def _start_hadoop(ctx, remote, config):
"""
remotely start hdfs if specified, and start mapred.
"""
testdir = teuthology.get_testdir(ctx)
if config.get('hdfs'):
remote.run(
args=['{tdir}/apache_hadoop/bin/start-dfs.sh'.format(
tdir=testdir), ],
wait=True,
)
log.info('done starting hdfs')
remote.run(
args=['{tdir}/apache_hadoop/bin/start-mapred.sh'.format(
tdir=testdir), ],
wait=True,
)
log.info('done starting mapred')
def _stop_hadoop(ctx, remote, config):
"""
remotely stop mapred, and if hdfs if specified, stop the hdfs handler too.
"""
testdir = teuthology.get_testdir(ctx)
remote.run(
args=['{tdir}/apache_hadoop/bin/stop-mapred.sh'.format(tdir=testdir), ],
wait=True,
)
if config.get('hdfs'):
remote.run(
args=['{tdir}/apache_hadoop/bin/stop-dfs.sh'.format(
tdir=testdir), ],
wait=True,
)
log.info('done stopping hadoop')
def _get_master(ctx):
"""
Return the hadoop master. If more than one is found, fail an assertion
"""
master = ctx.cluster.only(teuthology.is_type('hadoop.master'))
assert 1 == len(master.remotes.items()), \
'There must be exactly 1 hadoop.master configured'
return master.remotes.items()[0]
@contextlib.contextmanager
def start_hadoop(ctx, config):
"""
Handle the starting and stopping of hadoop
"""
master = _get_master(ctx)
remote, _ = master
log.info('Starting hadoop on {remote}\n'.format(
remote=remote.ssh.get_transport().getpeername()[0]))
_start_hadoop(ctx, remote, config)
try:
yield
finally:
log.info('Running stop-mapred.sh on {remote}'.format(
remote=remote.ssh.get_transport().getpeername()[0]))
_stop_hadoop(ctx, remote, config)
def _download_apache_hadoop_bins(ctx, remote, hadoop_url):
"""
download and untar the most recent apache hadoop binaries into
{testdir}/apache_hadoop
"""
log.info(
'_download_apache_hadoop_bins: path {path} on host {host}'.format(
path=hadoop_url, host=str(remote)))
file_name = 'apache-hadoop.tgz'
testdir = teuthology.get_testdir(ctx)
remote.run(
args=[
'mkdir', '-p', '-m0755',
'{tdir}/apache_hadoop'.format(tdir=testdir),
run.Raw('&&'),
'echo',
'{file_name}'.format(file_name=file_name),
run.Raw('|'),
'wget',
'-nv',
'-O-',
'--base={url}'.format(url=hadoop_url),
# need to use --input-file to make wget respect --base
'--input-file=-',
run.Raw('|'),
'tar', '-xzf', '-', '-C',
'{tdir}/apache_hadoop'.format(tdir=testdir),
],
)
def _download_inktank_hadoop_bins(ctx, remote, hadoop_url):
"""
download and untar the most recent Inktank hadoop binaries into
{testdir}/hadoop
"""
log.info(
'_download_inktank_hadoop_bins: path {path} on host {host}'.format(
path=hadoop_url, host=str(remote)))
file_name = 'hadoop.tgz'
testdir = teuthology.get_testdir(ctx)
remote.run(
args=[
'mkdir', '-p', '-m0755',
'{tdir}/inktank_hadoop'.format(tdir=testdir),
run.Raw('&&'),
'echo',
'{file_name}'.format(file_name=file_name),
run.Raw('|'),
'wget',
'-nv',
'-O-',
'--base={url}'.format(url=hadoop_url),
# need to use --input-file to make wget respect --base
'--input-file=-',
run.Raw('|'),
'tar', '-xzf', '-', '-C',
'{tdir}/inktank_hadoop'.format(tdir=testdir),
],
)
def _copy_hadoop_cephfs_jars(ctx, remote, from_dir, to_dir):
"""
copy hadoop-cephfs.jar and hadoop-cephfs-test.jar into apache_hadoop
"""
testdir = teuthology.get_testdir(ctx)
log.info('copy jars from {from_dir} to {to_dir} on host {host}'.format(
from_dir=from_dir, to_dir=to_dir, host=str(remote)))
file_names = ['hadoop-cephfs.jar', 'hadoop-cephfs-test.jar']
for file_name in file_names:
log.info('Copying file {file_name}'.format(file_name=file_name))
remote.run(
args=['cp', '{tdir}/{from_dir}/{file_name}'.format(
tdir=testdir, from_dir=from_dir, file_name=file_name),
'{tdir}/{to_dir}/'.format(tdir=testdir, to_dir=to_dir)
],
)
def _node_binaries(ctx, remote, inktank_hadoop_bindir_url,
apache_hadoop_bindir_url):
"""
Download and copy over the appropriate binaries and jar files.
The calls from binaries() end up spawning this function on remote sites.
"""
_download_inktank_hadoop_bins(ctx, remote, inktank_hadoop_bindir_url)
_download_apache_hadoop_bins(ctx, remote, apache_hadoop_bindir_url)
_copy_hadoop_cephfs_jars(ctx, remote, 'inktank_hadoop/build',
'apache_hadoop/build')
@contextlib.contextmanager
def binaries(ctx, config):
"""
Fetch the binaries from the gitbuilder, and spawn the download tasks on
the remote machines.
"""
path = config.get('path')
if path is None:
# fetch Apache Hadoop from gitbuilder
log.info(
'Fetching and unpacking Apache Hadoop binaries from gitbuilder...')
apache_sha1, apache_hadoop_bindir_url = teuthology.get_ceph_binary_url(
package='apache-hadoop',
branch=config.get('apache_branch'),
tag=config.get('tag'),
sha1=config.get('sha1'),
flavor=config.get('flavor'),
format=config.get('format'),
dist=config.get('dist'),
arch=config.get('arch'),
)
log.info('apache_hadoop_bindir_url %s' % (apache_hadoop_bindir_url))
ctx.summary['apache-hadoop-sha1'] = apache_sha1
# fetch Inktank Hadoop from gitbuilder
log.info(
'Fetching and unpacking Inktank Hadoop binaries from gitbuilder...')
inktank_sha1, inktank_hadoop_bindir_url = \
teuthology.get_ceph_binary_url(
package='hadoop',
branch=config.get('inktank_branch'),
tag=config.get('tag'),
sha1=config.get('sha1'),
flavor=config.get('flavor'),
format=config.get('format'),
dist=config.get('dist'),
arch=config.get('arch'),
)
log.info('inktank_hadoop_bindir_url %s' % (inktank_hadoop_bindir_url))
ctx.summary['inktank-hadoop-sha1'] = inktank_sha1
else:
raise Exception(
"The hadoop task does not support the path argument at present")
with parallel() as parallel_task:
hadoop_nodes = ctx.cluster.only(teuthology.is_type('hadoop'))
# these can happen independently
for remote in hadoop_nodes.remotes.iterkeys():
parallel_task.spawn(_node_binaries, ctx, remote,
inktank_hadoop_bindir_url, apache_hadoop_bindir_url)
try:
yield
finally:
log.info('Removing hadoop binaries...')
run.wait(
ctx.cluster.run(
args=['rm', '-rf', '--', '{tdir}/apache_hadoop'.format(
tdir=teuthology.get_testdir(ctx))],
wait=False,
),
)
run.wait(
ctx.cluster.run(
args=['rm', '-rf', '--', '{tdir}/inktank_hadoop'.format(
tdir=teuthology.get_testdir(ctx))],
wait=False,
),
)
@contextlib.contextmanager
def out_of_safemode(ctx, config):
"""
A Hadoop NameNode will stay in safe mode for 30 seconds by default.
This method blocks until the NameNode is out of safe mode.
"""
if config.get('hdfs'):
log.info('Waiting for the Namenode to exit safe mode...')
master = _get_master(ctx)
remote, _ = master
remote.run(
args=["{tdir}/apache_hadoop/bin/hadoop".format(
tdir=teuthology.get_testdir(ctx)),
"dfsadmin",
"-safemode",
"wait"],
wait=True,
)
else:
pass
try:
yield
finally:
pass
@contextlib.contextmanager
def task(ctx, config):
"""
Set up and tear down a Hadoop cluster.
This depends on either having ceph installed prior to hadoop, like so:
roles:
- [mon.0, mds.0, osd.0, hadoop.master.0]
- [mon.1, osd.1, hadoop.slave.0]
- [mon.2, hadoop.slave.1]
tasks:
- ceph:
- hadoop:
Or if you want to use HDFS under Hadoop, this will configure Hadoop
for HDFS and start it along with MapReduce. Note that it does not
require Ceph be installed.
roles:
- [hadoop.master.0]
- [hadoop.slave.0]
- [hadoop.slave.1]
tasks:
- hadoop:
hdfs: True
This task requires exactly one hadoop.master be specified
and at least one hadoop.slave.
This does *not* do anything with the Hadoop setup. To run wordcount,
you could use pexec like so (after the hadoop task):
- pexec:
hadoop.slave.0:
- mkdir -p /tmp/hadoop_input
- wget http://ceph.com/qa/hadoop_input_files.tar -O /tmp/hadoop_input/files.tar
- cd /tmp/hadoop_input/; tar -xf /tmp/hadoop_input/files.tar
- {tdir}/hadoop/bin/hadoop fs -mkdir wordcount_input
- {tdir}/hadoop/bin/hadoop fs -put /tmp/hadoop_input/*txt wordcount_input/
- {tdir}/hadoop/bin/hadoop jar {tdir}/hadoop/build/hadoop-example*jar wordcount wordcount_input wordcount_output
- rm -rf /tmp/hadoop_input
Note: {tdir} in the above example is the teuthology test directory.
"""
if config is None:
config = {}
assert isinstance(config, dict), \
"task hadoop only supports a dictionary for configuration"
dist = 'precise'
format_type = 'jar'
arch = 'x86_64'
flavor = config.get('flavor', 'basic')
ctx.summary['flavor'] = flavor
overrides = ctx.config.get('overrides', {})
teuthology.deep_merge(config, overrides.get('hadoop', {}))
apache_branch = None
if config.get('apache_hadoop_branch') is not None:
apache_branch = config.get('apache_hadoop_branch')
else:
apache_branch = 'branch-1.0' # hadoop branch to acquire
inktank_branch = None
if config.get('inktank_hadoop_branch') is not None:
inktank_branch = config.get('inktank_hadoop_branch')
else:
inktank_branch = 'cephfs/branch-1.0' # default branch name
# replace any '/' with a '_' to match the artifact paths
inktank_branch = inktank_branch.replace('/', '_')
apache_branch = apache_branch.replace('/', '_')
with contextutil.nested(
lambda: validate_cluster(ctx=ctx),
lambda: binaries(ctx=ctx, config=dict(
tag=config.get('tag'),
sha1=config.get('sha1'),
path=config.get('path'),
flavor=flavor,
dist=config.get('dist', dist),
format=format_type,
arch=arch,
apache_branch=apache_branch,
inktank_branch=inktank_branch,
)),
lambda: configure_hadoop(ctx=ctx, config=config),
lambda: start_hadoop(ctx=ctx, config=config),
lambda: out_of_safemode(ctx=ctx, config=config),
):
yield
|
|
# coding: utf-8
"""
Picarto.TV API Documentation
The Picarto.TV API documentation Note, for fixed access tokens, the header that needs to be sent is of the format: `Authorization: Bearer yourTokenHere` This can be generated at https://oauth.picarto.tv/ For chat API, see https://docs.picarto.tv/chat/chat.proto - contact via the email below for implementation details
OpenAPI spec version: 1.2.5
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Notification(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'type': 'str',
'uuid': 'str',
'body': 'str',
'uri': 'str',
'timestamp': 'int'
}
attribute_map = {
'type': 'type',
'uuid': 'uuid',
'body': 'body',
'uri': 'uri',
'timestamp': 'timestamp'
}
def __init__(self, type=None, uuid=None, body=None, uri=None, timestamp=None):
"""
Notification - a model defined in Swagger
"""
self._type = None
self._uuid = None
self._body = None
self._uri = None
self._timestamp = None
if type is not None:
self.type = type
if uuid is not None:
self.uuid = uuid
if body is not None:
self.body = body
if uri is not None:
self.uri = uri
if timestamp is not None:
self.timestamp = timestamp
@property
def type(self):
"""
Gets the type of this Notification.
The notification type
:return: The type of this Notification.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this Notification.
The notification type
:param type: The type of this Notification.
:type: str
"""
self._type = type
@property
def uuid(self):
"""
Gets the uuid of this Notification.
The notification UUID
:return: The uuid of this Notification.
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""
Sets the uuid of this Notification.
The notification UUID
:param uuid: The uuid of this Notification.
:type: str
"""
self._uuid = uuid
@property
def body(self):
"""
Gets the body of this Notification.
The content of the notification (HTML)
:return: The body of this Notification.
:rtype: str
"""
return self._body
@body.setter
def body(self, body):
"""
Sets the body of this Notification.
The content of the notification (HTML)
:param body: The body of this Notification.
:type: str
"""
self._body = body
@property
def uri(self):
"""
Gets the uri of this Notification.
URI this notification links to (may be unused)
:return: The uri of this Notification.
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""
Sets the uri of this Notification.
URI this notification links to (may be unused)
:param uri: The uri of this Notification.
:type: str
"""
self._uri = uri
@property
def timestamp(self):
"""
Gets the timestamp of this Notification.
Unix timestamp of when this notification was created
:return: The timestamp of this Notification.
:rtype: int
"""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
"""
Sets the timestamp of this Notification.
Unix timestamp of when this notification was created
:param timestamp: The timestamp of this Notification.
:type: int
"""
self._timestamp = timestamp
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Notification):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
import unittest, json, cPickle as pickle
from cStringIO import StringIO
from scrapy.item import Item, Field
from scrapy.utils.python import str_to_unicode
from scrapy.contrib.exporter import BaseItemExporter, PprintItemExporter, \
PickleItemExporter, CsvItemExporter, XmlItemExporter, JsonLinesItemExporter, \
JsonItemExporter
class TestItem(Item):
name = Field()
age = Field()
class BaseItemExporterTest(unittest.TestCase):
def setUp(self):
self.i = TestItem(name=u'John\xa3', age='22')
self.output = StringIO()
self.ie = self._get_exporter()
def _get_exporter(self, **kwargs):
return BaseItemExporter(**kwargs)
def _check_output(self):
pass
def _assert_expected_item(self, exported_dict):
for k, v in exported_dict.items():
exported_dict[k] = str_to_unicode(v)
self.assertEqual(self.i, exported_dict)
def test_export_item(self):
self.ie.start_exporting()
try:
self.ie.export_item(self.i)
except NotImplementedError:
if self.ie.__class__ is not BaseItemExporter:
raise
self.ie.finish_exporting()
self._check_output()
def test_serialize_field(self):
self.assertEqual(self.ie.serialize_field( \
self.i.fields['name'], 'name', self.i['name']), 'John\xc2\xa3')
self.assertEqual( \
self.ie.serialize_field(self.i.fields['age'], 'age', self.i['age']), '22')
def test_fields_to_export(self):
ie = self._get_exporter(fields_to_export=['name'])
self.assertEqual(list(ie._get_serialized_fields(self.i)), [('name', 'John\xc2\xa3')])
ie = self._get_exporter(fields_to_export=['name'], encoding='latin-1')
name = list(ie._get_serialized_fields(self.i))[0][1]
assert isinstance(name, str)
self.assertEqual(name, 'John\xa3')
def test_field_custom_serializer(self):
def custom_serializer(value):
return str(int(value) + 2)
class CustomFieldItem(Item):
name = Field()
age = Field(serializer=custom_serializer)
i = CustomFieldItem(name=u'John\xa3', age='22')
ie = self._get_exporter()
self.assertEqual(ie.serialize_field(i.fields['name'], 'name', i['name']), 'John\xc2\xa3')
self.assertEqual(ie.serialize_field(i.fields['age'], 'age', i['age']), '24')
class PprintItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
return PprintItemExporter(self.output, **kwargs)
def _check_output(self):
self._assert_expected_item(eval(self.output.getvalue()))
class PickleItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
return PickleItemExporter(self.output, **kwargs)
def _check_output(self):
self._assert_expected_item(pickle.loads(self.output.getvalue()))
def test_export_multiple_items(self):
i1 = TestItem(name='hello', age='world')
i2 = TestItem(name='bye', age='world')
f = StringIO()
ie = PickleItemExporter(f)
ie.start_exporting()
ie.export_item(i1)
ie.export_item(i2)
ie.finish_exporting()
f.reset()
self.assertEqual(pickle.load(f), i1)
self.assertEqual(pickle.load(f), i2)
class CsvItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
return CsvItemExporter(self.output, **kwargs)
def _check_output(self):
self.assertEqual(self.output.getvalue(), 'age,name\r\n22,John\xc2\xa3\r\n')
def test_header(self):
output = StringIO()
ie = CsvItemExporter(output, fields_to_export=self.i.fields.keys())
ie.start_exporting()
ie.export_item(self.i)
ie.finish_exporting()
self.assertEqual(output.getvalue(), 'age,name\r\n22,John\xc2\xa3\r\n')
output = StringIO()
ie = CsvItemExporter(output, fields_to_export=['age'])
ie.start_exporting()
ie.export_item(self.i)
ie.finish_exporting()
self.assertEqual(output.getvalue(), 'age\r\n22\r\n')
output = StringIO()
ie = CsvItemExporter(output)
ie.start_exporting()
ie.export_item(self.i)
ie.export_item(self.i)
ie.finish_exporting()
self.assertEqual(output.getvalue(), 'age,name\r\n22,John\xc2\xa3\r\n22,John\xc2\xa3\r\n')
output = StringIO()
ie = CsvItemExporter(output, include_headers_line=False)
ie.start_exporting()
ie.export_item(self.i)
ie.finish_exporting()
self.assertEqual(output.getvalue(), '22,John\xc2\xa3\r\n')
def test_join_multivalue(self):
class TestItem2(Item):
name = Field()
friends = Field()
i = TestItem2(name='John', friends=['Mary', 'Paul'])
output = StringIO()
ie = CsvItemExporter(output, include_headers_line=False)
ie.start_exporting()
ie.export_item(i)
ie.finish_exporting()
self.assertEqual(output.getvalue(), '"Mary,Paul",John\r\n')
class XmlItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
return XmlItemExporter(self.output, **kwargs)
def _check_output(self):
expected_value = '<?xml version="1.0" encoding="utf-8"?>\n<items><item><age>22</age><name>John\xc2\xa3</name></item></items>'
self.assertEqual(self.output.getvalue(), expected_value)
def test_multivalued_fields(self):
output = StringIO()
item = TestItem(name=[u'John\xa3', u'Doe'])
ie = XmlItemExporter(output)
ie.start_exporting()
ie.export_item(item)
ie.finish_exporting()
expected_value = '<?xml version="1.0" encoding="utf-8"?>\n<items><item><name><value>John\xc2\xa3</value><value>Doe</value></name></item></items>'
self.assertEqual(output.getvalue(), expected_value)
class JsonLinesItemExporterTest(BaseItemExporterTest):
_expected_nested = {'name': u'Jesus', 'age': {'name': 'Maria', 'age': {'name': 'Joseph', 'age': '22'}}}
def _get_exporter(self, **kwargs):
return JsonLinesItemExporter(self.output, **kwargs)
def _check_output(self):
exported = json.loads(self.output.getvalue().strip())
self.assertEqual(exported, dict(self.i))
def test_nested_item(self):
i1 = TestItem(name=u'Joseph', age='22')
i2 = TestItem(name=u'Maria', age=i1)
i3 = TestItem(name=u'Jesus', age=i2)
self.ie.start_exporting()
self.ie.export_item(i3)
self.ie.finish_exporting()
exported = json.loads(self.output.getvalue())
self.assertEqual(exported, self._expected_nested)
class JsonItemExporterTest(JsonLinesItemExporterTest):
_expected_nested = [JsonLinesItemExporterTest._expected_nested]
def _get_exporter(self, **kwargs):
return JsonItemExporter(self.output, **kwargs)
def _check_output(self):
exported = json.loads(self.output.getvalue().strip())
self.assertEqual(exported, [dict(self.i)])
def test_two_items(self):
self.ie.start_exporting()
self.ie.export_item(self.i)
self.ie.export_item(self.i)
self.ie.finish_exporting()
exported = json.loads(self.output.getvalue())
self.assertEqual(exported, [dict(self.i), dict(self.i)])
def test_nested_item(self):
i1 = TestItem(name=u'Joseph\xa3', age='22')
i2 = TestItem(name=u'Maria', age=i1)
i3 = TestItem(name=u'Jesus', age=i2)
self.ie.start_exporting()
self.ie.export_item(i3)
self.ie.finish_exporting()
exported = json.loads(self.output.getvalue())
expected = {'name': u'Jesus', 'age': {'name': 'Maria', 'age': dict(i1)}}
self.assertEqual(exported, [expected])
class CustomItemExporterTest(unittest.TestCase):
def test_exporter_custom_serializer(self):
class CustomItemExporter(BaseItemExporter):
def serialize_field(self, field, name, value):
if name == 'age':
return str(int(value) + 1)
else:
return super(CustomItemExporter, self).serialize_field(field, \
name, value)
i = TestItem(name=u'John', age='22')
ie = CustomItemExporter()
self.assertEqual( \
ie.serialize_field(i.fields['name'], 'name', i['name']), 'John')
self.assertEqual(
ie.serialize_field(i.fields['age'], 'age', i['age']), '23')
if __name__ == '__main__':
unittest.main()
|
|
import datetime
import urllib
import pprint
import flask
import requests
from pluss.app import app, full_url_for
from pluss.util.cache import Cache
from pluss.util.config import Config
from pluss.util.db import TokenIdMapping
GOOGLE_API_TIMEOUT = 5
OAUTH2_BASE = 'https://accounts.google.com/o/oauth2'
OAUTH2_SCOPE = 'https://www.googleapis.com/auth/plus.me'
GPLUS_API_ME_ENDPOINT = 'https://www.googleapis.com/plus/v1/people/me'
ACCESS_TOKEN_CACHE_KEY_TEMPLATE = 'pluss--gplusid--oauth--1--%s'
PROFILE_CACHE_KEY_TEMPLATE = 'pluss--gplusid--profile--1--%s'
# Shared session to allow persistent connection pooling
session = requests.Session()
@app.route("/auth")
def auth():
"""Redirect the user to Google to obtain authorization."""
data = {
# Basic OAuth2 parameters
'client_id': Config.get('oauth', 'client-id'),
'redirect_uri': full_url_for('oauth2'),
'scope': OAUTH2_SCOPE,
'response_type': 'code',
# Settings necessary for daemon operation
'access_type': 'offline',
'approval_prompt': 'force',
}
return flask.redirect('%s/auth?%s' % (OAUTH2_BASE, urllib.urlencode(data)))
@app.route("/access_denied")
def denied():
return flask.render_template('denied_main.html')
@app.route("/oauth2callback")
def oauth2():
"""Google redirects the user back to this endpoint to continue the OAuth2 flow."""
# Check for errors from the OAuth2 process
err = flask.request.args.get('error')
if err == 'access_denied':
return flask.redirect(flask.url_for('denied'))
elif err is not None:
app.logger.warning("OAuth2 callback received error: %s", err)
# TODO: handle this better (flash message?)
message = 'Whoops, something went wrong (error=%s). Please try again later.'
return message % flask.escape(err), 500
# Okay, no errors, so we should have a valid authorization code.
# Time to go get our server-side tokens for this user from Google.
auth_code = flask.request.args['code']
if auth_code is None:
return 'Authorization code is missing.', 400 # Bad Request
data = {
'code': auth_code,
'client_id': Config.get('oauth', 'client-id'),
'client_secret': Config.get('oauth', 'client-secret'),
'redirect_uri': full_url_for('oauth2'),
'grant_type': 'authorization_code',
}
try:
response = session.post(OAUTH2_BASE + '/token', data, timeout=GOOGLE_API_TIMEOUT)
except requests.exceptions.Timeout:
app.logger.error('OAuth2 token request timed out.')
# TODO: handle this better (flash message?)
message = 'Whoops, Google took too long to respond. Please try again later.'
return message, 504 # Gateway Timeout
if response.status_code != 200:
app.logger.error('OAuth2 token request got HTTP response %s for code "%s".',
response.status_code, auth_code)
# TODO: handle this better (flash message?)
message = ('Whoops, we failed to finish processing your authorization with Google.'
' Please try again later.')
return message, 401 # Unauthorized
try:
result = response.json()
except ValueError:
app.logger.error('OAuth2 token request got non-JSON response for code "%s".', auth_code)
# TODO: handle this better (flash message?)
message = ('Whoops, we got an invalid response from Google for your authorization.'
' Please try again later.')
return message, 502 # Bad Gateway
# Sanity check: we always expect Bearer tokens.
if result.get('token_type') != 'Bearer':
app.logger.error('OAuth2 token request got unknown token type "%s" for code "%s".',
result['token_type'], auth_code)
# TODO: handle this better (flash message?)
message = ('Whoops, we got an invalid response from Google for your authorization.'
' Please try again later.')
return message, 502 # Bad Gateway
# All non-error responses should have an access token.
access_token = result['access_token']
refresh_token = result.get('refresh_token')
# This is in seconds, but we convert it to an absolute timestamp so that we can
# account for the potential delay it takes to look up the G+ id we should associate
# the access tokens with. (Could be up to GOOGLE_API_TIMEOUT seconds later.)
expiry = datetime.datetime.today() + datetime.timedelta(seconds=result['expires_in'])
try:
person = get_person_by_access_token(access_token)
except UnavailableException as e:
app.logger.error('Unable to finish OAuth2 flow: %r.' % e)
message = ('Whoops, we got an invalid response from Google for your authorization.'
' Please try again later.')
return message, 502 # Bad Gateway
if refresh_token is not None:
TokenIdMapping.update_refresh_token(person['id'], refresh_token)
# Convert the absolute expiry timestamp back into a duration in seconds
expires_in = int((expiry - datetime.datetime.today()).total_seconds())
Cache.set(ACCESS_TOKEN_CACHE_KEY_TEMPLATE % person['id'], access_token, time=expires_in)
# Whew, all done! Set a cookie with the user's G+ id and send them back to the homepage.
app.logger.info("Successfully authenticated G+ id %s.", person['id'])
response = flask.make_response(flask.redirect(flask.url_for('main')))
response.set_cookie('gplus_id', person['id'])
return response
################################################################################
# HELPER FUNCTIONS
################################################################################
# Exception raised by any of the following if they are unable to acquire a result.
class UnavailableException(Exception):
def __init__(self, message, status, *args, **kwargs):
super(UnavailableException, self).__init__(message, status, *args, **kwargs)
self.status = status
def get_person_by_access_token(token):
"""Fetch details about an individual from the G+ API and return a dict with the response."""
headers = {
'Authorization': 'Bearer %s' % token,
}
try:
response = session.get(GPLUS_API_ME_ENDPOINT, headers=headers, timeout=GOOGLE_API_TIMEOUT)
person = response.json()
except requests.exceptions.Timeout:
raise UnavailableException('Person API request timed out.', 504)
except Exception as e:
raise UnavailableException('Person API request raised exception "%r" for %s.' % (e, pprint.pformat(response).text), 502)
Cache.set(PROFILE_CACHE_KEY_TEMPLATE % person['id'], person,
time=Config.getint('cache', 'profile-expire'))
return person
def get_person_by_id(gplus_id):
"""A proxy for fetch_person_by_access_token that resolves an id into an access token first."""
# Check the cache first.
person = Cache.get(PROFILE_CACHE_KEY_TEMPLATE % gplus_id)
if person:
return person
# If we don't have them cached, try to get an access token.
access_token = get_access_token_for_id(gplus_id)
return get_person_by_token(access_token)
def get_access_token_for_id(gplus_id):
"""Get an access token for an id, potentially via refresh token if necessary."""
# Check the cache first.
token = Cache.get(ACCESS_TOKEN_CACHE_KEY_TEMPLATE % gplus_id)
if token:
return token
# If we don't have a cached token, see if we have a refresh token available.
refresh_token = TokenIdMapping.lookup_refresh_token(gplus_id)
if not refresh_token:
raise UnavailableException('No tokens available for G+ id %s.' % gplus_id, 401)
data = {
'client_id': Config.get('oauth', 'client-id'),
'client_secret': Config.get('oauth', 'client-secret'),
'refresh_token': refresh_token,
'grant_type': 'refresh_token',
}
try:
response = session.post(OAUTH2_BASE + '/token', data=data, timeout=GOOGLE_API_TIMEOUT)
result = response.json()
except requests.exceptions.Timeout:
raise UnavailableException('Access token API request timed out.', 504)
except Exception as e:
raise UnavailableException('Access token API request raised exception "%r".' % e, 502)
if 'invalid_grant' in result or ('error' in result and result['error'] == 'invalid_grant'):
# The provided refresh token is invalid which means the user has revoked
# access to their content - thus, pluss should forget about them.
Cache.delete(ACCESS_TOKEN_CACHE_KEY_TEMPLATE % gplus_id)
Cache.delete(PROFILE_CACHE_KEY_TEMPLATE % gplus_id)
TokenIdMapping.remove_id(gplus_id)
raise UnavailableException('Access revoked for G+ id %s.' % gplus_id, 502)
elif response.status_code != 200:
app.logger.error('Non-200 response to access token refresh request (%s): "%r".',
response.status_code, result)
raise UnavailableException('Failed to refresh access token for G+ id %s.' % gplus_id, 502)
elif result.get('token_type') != 'Bearer':
app.logger.error('Unknown token type "%s" refreshed for G+ id %s.', result.get('token_type'), gplus_id)
raise UnavailableException('Failed to refresh access token for G+ id %s.' % gplus_id, 502)
token = result['access_token']
Cache.set(ACCESS_TOKEN_CACHE_KEY_TEMPLATE % gplus_id, token, time=result['expires_in'])
return token
def authed_request_for_id(gplus_id, request):
"""Adds the proper access credentials for the specified user and then makes an HTTP request."""
# Helper method to make retry easier
def make_request(retry=True):
token = get_access_token_for_id(gplus_id)
request.headers['Authorization'] = 'Bearer %s' % token
prepared_request = request.prepare()
response = session.send(prepared_request, timeout=GOOGLE_API_TIMEOUT)
if response.status_code == 401:
# Our access token is invalid. If this is the first failure,
# try forcing a refresh of the access token.
if retry:
Cache.delete(ACCESS_TOKEN_CACHE_KEY_TEMPLATE % gplus_id)
return make_request(retry=False)
return response
response = make_request()
if response.status_code == 403:
# Typically used to indicate that Google is rate-limiting the API call
raise UnavailableException('API 403 response: %r' % api_response.json(), 503)
elif response.status_code == 401:
raise UnavailableException('Invalid access token.', 401)
elif response.status_code != 200:
raise UnavailableException(
'Unknown API error (code=%d): %r' % (response.status_code, response.json()), 502)
return response
# vim: set ts=4 sts=4 sw=4 et:
|
|
# Copyright (c) 2014 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for VMware VMDK driver volumeops module.
"""
import mock
from cinder.openstack.common import units
from cinder import test
from cinder.volume.drivers.vmware import error_util
from cinder.volume.drivers.vmware import vim_util
from cinder.volume.drivers.vmware import volumeops
class VolumeOpsTestCase(test.TestCase):
"""Unit tests for volumeops module."""
MAX_OBJECTS = 100
def setUp(self):
super(VolumeOpsTestCase, self).setUp()
self.session = mock.MagicMock()
self.vops = volumeops.VMwareVolumeOps(self.session, self.MAX_OBJECTS)
def test_split_datastore_path(self):
test1 = '[datastore1] myfolder/mysubfolder/myvm.vmx'
(datastore, folder, file_name) = volumeops.split_datastore_path(test1)
self.assertEqual(datastore, 'datastore1')
self.assertEqual(folder, 'myfolder/mysubfolder/')
self.assertEqual(file_name, 'myvm.vmx')
test2 = '[datastore2 ] myfolder/myvm.vmdk'
(datastore, folder, file_name) = volumeops.split_datastore_path(test2)
self.assertEqual(datastore, 'datastore2')
self.assertEqual(folder, 'myfolder/')
self.assertEqual(file_name, 'myvm.vmdk')
test3 = 'myfolder/myvm.vmdk'
self.assertRaises(IndexError, volumeops.split_datastore_path, test3)
def vm(self, val):
"""Create a mock vm in retrieve result format."""
vm = mock.MagicMock()
prop = mock.Mock(spec=object)
prop.val = val
vm.propSet = [prop]
return vm
def test_get_backing(self):
name = 'mock-backing'
# Test no result
self.session.invoke_api.return_value = None
result = self.vops.get_backing(name)
self.assertIsNone(result)
self.session.invoke_api.assert_called_once_with(vim_util,
'get_objects',
self.session.vim,
'VirtualMachine',
self.MAX_OBJECTS)
# Test single result
vm = self.vm(name)
vm.obj = mock.sentinel.vm_obj
retrieve_result = mock.Mock(spec=object)
retrieve_result.objects = [vm]
self.session.invoke_api.return_value = retrieve_result
self.vops.cancel_retrieval = mock.Mock(spec=object)
result = self.vops.get_backing(name)
self.assertEqual(mock.sentinel.vm_obj, result)
self.session.invoke_api.assert_called_with(vim_util, 'get_objects',
self.session.vim,
'VirtualMachine',
self.MAX_OBJECTS)
self.vops.cancel_retrieval.assert_called_once_with(retrieve_result)
# Test multiple results
retrieve_result2 = mock.Mock(spec=object)
retrieve_result2.objects = [vm('1'), vm('2'), vm('3')]
self.session.invoke_api.return_value = retrieve_result2
self.vops.continue_retrieval = mock.Mock(spec=object)
self.vops.continue_retrieval.return_value = retrieve_result
result = self.vops.get_backing(name)
self.assertEqual(mock.sentinel.vm_obj, result)
self.session.invoke_api.assert_called_with(vim_util, 'get_objects',
self.session.vim,
'VirtualMachine',
self.MAX_OBJECTS)
self.vops.continue_retrieval.assert_called_once_with(retrieve_result2)
self.vops.cancel_retrieval.assert_called_with(retrieve_result)
def test_delete_backing(self):
backing = mock.sentinel.backing
task = mock.sentinel.task
self.session.invoke_api.return_value = task
self.vops.delete_backing(backing)
self.session.invoke_api.assert_called_once_with(self.session.vim,
"Destroy_Task",
backing)
self.session.wait_for_task(task)
def test_get_host(self):
instance = mock.sentinel.instance
host = mock.sentinel.host
self.session.invoke_api.return_value = host
result = self.vops.get_host(instance)
self.assertEqual(host, result)
self.session.invoke_api.assert_called_once_with(vim_util,
'get_object_property',
self.session.vim,
instance,
'runtime.host')
def test_get_hosts(self):
hosts = mock.sentinel.hosts
self.session.invoke_api.return_value = hosts
result = self.vops.get_hosts()
self.assertEqual(hosts, result)
self.session.invoke_api.assert_called_once_with(vim_util,
'get_objects',
self.session.vim,
'HostSystem',
self.MAX_OBJECTS)
def test_continue_retrieval(self):
retrieve_result = mock.sentinel.retrieve_result
self.session.invoke_api.return_value = retrieve_result
result = self.vops.continue_retrieval(retrieve_result)
self.assertEqual(retrieve_result, result)
self.session.invoke_api.assert_called_once_with(vim_util,
'continue_retrieval',
self.session.vim,
retrieve_result)
def test_cancel_retrieval(self):
retrieve_result = mock.sentinel.retrieve_result
self.session.invoke_api.return_value = retrieve_result
result = self.vops.cancel_retrieval(retrieve_result)
self.assertIsNone(result)
self.session.invoke_api.assert_called_once_with(vim_util,
'cancel_retrieval',
self.session.vim,
retrieve_result)
def test_is_usable(self):
mount_info = mock.Mock(spec=object)
mount_info.accessMode = "readWrite"
mount_info.mounted = True
mount_info.accessible = True
self.assertTrue(self.vops._is_usable(mount_info))
del mount_info.mounted
self.assertTrue(self.vops._is_usable(mount_info))
mount_info.accessMode = "readonly"
self.assertFalse(self.vops._is_usable(mount_info))
mount_info.accessMode = "readWrite"
mount_info.mounted = False
self.assertFalse(self.vops._is_usable(mount_info))
mount_info.mounted = True
mount_info.accessible = False
self.assertFalse(self.vops._is_usable(mount_info))
del mount_info.accessible
self.assertFalse(self.vops._is_usable(mount_info))
def _create_host_mounts(self, access_mode, host, set_accessible=True,
is_accessible=True, mounted=True):
"""Create host mount value of datastore with single mount info.
:param access_mode: string specifying the read/write permission
:param set_accessible: specify whether accessible property
should be set
:param is_accessible: boolean specifying whether the datastore
is accessible to host
:param host: managed object reference of the connected
host
:return: list of host mount info
"""
mntInfo = mock.Mock(spec=object)
mntInfo.accessMode = access_mode
if set_accessible:
mntInfo.accessible = is_accessible
else:
del mntInfo.accessible
mntInfo.mounted = mounted
host_mount = mock.Mock(spec=object)
host_mount.key = host
host_mount.mountInfo = mntInfo
host_mounts = mock.Mock(spec=object)
host_mounts.DatastoreHostMount = [host_mount]
return host_mounts
def test_get_connected_hosts(self):
with mock.patch.object(self.vops, 'get_summary') as get_summary:
datastore = mock.sentinel.datastore
summary = mock.Mock(spec=object)
get_summary.return_value = summary
summary.accessible = False
hosts = self.vops.get_connected_hosts(datastore)
self.assertEqual([], hosts)
summary.accessible = True
host = mock.Mock(spec=object)
host.value = mock.sentinel.host
host_mounts = self._create_host_mounts("readWrite", host)
self.session.invoke_api.return_value = host_mounts
hosts = self.vops.get_connected_hosts(datastore)
self.assertEqual([mock.sentinel.host], hosts)
self.session.invoke_api.assert_called_once_with(
vim_util,
'get_object_property',
self.session.vim,
datastore,
'host')
del host_mounts.DatastoreHostMount
hosts = self.vops.get_connected_hosts(datastore)
self.assertEqual([], hosts)
def test_is_valid(self):
with mock.patch.object(self.vops, 'get_summary') as get_summary:
summary = mock.Mock(spec=object)
get_summary.return_value = summary
datastore = mock.sentinel.datastore
host = mock.Mock(spec=object)
host.value = mock.sentinel.host
def _is_valid(host_mounts, is_valid):
self.session.invoke_api.return_value = host_mounts
result = self.vops._is_valid(datastore, host)
self.assertEqual(is_valid, result)
self.session.invoke_api.assert_called_with(
vim_util,
'get_object_property',
self.session.vim,
datastore,
'host')
# Test positive cases
summary.maintenanceMode = 'normal'
summary.accessible = True
_is_valid(self._create_host_mounts("readWrite", host), True)
# Test negative cases
_is_valid(self._create_host_mounts("Inaccessible", host), False)
_is_valid(self._create_host_mounts("readWrite", host, True, False),
False)
_is_valid(self._create_host_mounts("readWrite", host, True, True,
False), False)
summary.accessible = False
_is_valid(self._create_host_mounts("readWrite", host, False),
False)
summary.accessible = True
summary.maintenanceMode = 'inMaintenance'
_is_valid(self._create_host_mounts("readWrite", host), False)
def test_get_dss_rp(self):
with mock.patch.object(self.vops, 'get_summary') as get_summary:
summary = mock.Mock(spec=object)
summary.accessible = True
summary.maintenanceModel = 'normal'
get_summary.return_value = summary
# build out props to be returned by 1st invoke_api call
datastore_prop = mock.Mock(spec=object)
datastore_prop.name = 'datastore'
datastore_prop.val = mock.Mock(spec=object)
datastore_prop.val.ManagedObjectReference = [mock.sentinel.ds1,
mock.sentinel.ds2]
compute_resource_prop = mock.Mock(spec=object)
compute_resource_prop.name = 'parent'
compute_resource_prop.val = mock.sentinel.compute_resource
elem = mock.Mock(spec=object)
elem.propSet = [datastore_prop, compute_resource_prop]
props = [elem]
# build out host_mounts to be returned by 2nd invoke_api call
host = mock.Mock(spec=object)
host.value = mock.sentinel.host
host_mounts = self._create_host_mounts("readWrite", host)
# build out resource_pool to be returned by 3rd invoke_api call
resource_pool = mock.sentinel.resource_pool
# set return values for each call of invoke_api
self.session.invoke_api.side_effect = [props,
host_mounts,
host_mounts,
resource_pool]
# invoke function and verify results
(dss_actual, rp_actual) = self.vops.get_dss_rp(host)
self.assertEqual([mock.sentinel.ds1, mock.sentinel.ds2],
dss_actual)
self.assertEqual(resource_pool, rp_actual)
# invoke function with no valid datastore
summary.maintenanceMode = 'inMaintenance'
self.session.invoke_api.side_effect = [props,
host_mounts,
host_mounts,
resource_pool]
self.assertRaises(error_util.VimException,
self.vops.get_dss_rp,
host)
# Clear side effects.
self.session.invoke_api.side_effect = None
def test_get_parent(self):
# Not recursive
child = mock.Mock(spec=object)
child._type = 'Parent'
ret = self.vops._get_parent(child, 'Parent')
self.assertEqual(ret, child)
# Recursive
parent = mock.Mock(spec=object)
parent._type = 'Parent'
child = mock.Mock(spec=object)
child._type = 'Child'
self.session.invoke_api.return_value = parent
ret = self.vops._get_parent(child, 'Parent')
self.assertEqual(ret, parent)
self.session.invoke_api.assert_called_with(vim_util,
'get_object_property',
self.session.vim, child,
'parent')
def test_get_dc(self):
# set up hierarchy of objects
dc = mock.Mock(spec=object)
dc._type = 'Datacenter'
o1 = mock.Mock(spec=object)
o1._type = 'mockType1'
o1.parent = dc
o2 = mock.Mock(spec=object)
o2._type = 'mockType2'
o2.parent = o1
# mock out invoke_api behaviour to fetch parent
def mock_invoke_api(vim_util, method, vim, the_object, arg):
return the_object.parent
self.session.invoke_api.side_effect = mock_invoke_api
ret = self.vops.get_dc(o2)
self.assertEqual(dc, ret)
# Clear side effects.
self.session.invoke_api.side_effect = None
def test_get_vmfolder(self):
self.session.invoke_api.return_value = mock.sentinel.ret
ret = self.vops.get_vmfolder(mock.sentinel.dc)
self.assertEqual(mock.sentinel.ret, ret)
self.session.invoke_api.assert_called_once_with(vim_util,
'get_object_property',
self.session.vim,
mock.sentinel.dc,
'vmFolder')
def test_create_folder_not_present(self):
"""Test create_folder when child not present."""
parent_folder = mock.sentinel.parent_folder
child_name = 'child_folder'
prop_val = mock.Mock(spec=object)
prop_val.ManagedObjectReference = []
child_folder = mock.sentinel.child_folder
self.session.invoke_api.side_effect = [prop_val, child_folder]
ret = self.vops.create_folder(parent_folder, child_name)
self.assertEqual(child_folder, ret)
expected_invoke_api = [mock.call(vim_util, 'get_object_property',
self.session.vim, parent_folder,
'childEntity'),
mock.call(self.session.vim, 'CreateFolder',
parent_folder, name=child_name)]
self.assertEqual(expected_invoke_api,
self.session.invoke_api.mock_calls)
# Clear side effects.
self.session.invoke_api.side_effect = None
def test_create_folder_already_present(self):
"""Test create_folder when child already present."""
parent_folder = mock.sentinel.parent_folder
child_name = 'child_folder'
prop_val = mock.Mock(spec=object)
child_entity_1 = mock.Mock(spec=object)
child_entity_1._type = 'Folder'
child_entity_1_name = 'SomeOtherName'
child_entity_2 = mock.Mock(spec=object)
child_entity_2._type = 'Folder'
child_entity_2_name = child_name
prop_val.ManagedObjectReference = [child_entity_1, child_entity_2]
self.session.invoke_api.side_effect = [prop_val, child_entity_1_name,
child_entity_2_name]
ret = self.vops.create_folder(parent_folder, child_name)
self.assertEqual(child_entity_2, ret)
expected_invoke_api = [mock.call(vim_util, 'get_object_property',
self.session.vim, parent_folder,
'childEntity'),
mock.call(vim_util, 'get_object_property',
self.session.vim, child_entity_1,
'name'),
mock.call(vim_util, 'get_object_property',
self.session.vim, child_entity_2,
'name')]
self.assertEqual(expected_invoke_api,
self.session.invoke_api.mock_calls)
# Clear side effects.
self.session.invoke_api.side_effect = None
def test_create_folder_with_special_characters(self):
"""Test create_folder with names containing special characters."""
# Test folder already exists case.
child_entity_1 = mock.Mock(_type='Folder')
child_entity_1_name = 'cinder-volumes'
child_entity_2 = mock.Mock(_type='Folder')
child_entity_2_name = '%2fcinder-volumes'
prop_val = mock.Mock(ManagedObjectReference=[child_entity_1,
child_entity_2])
self.session.invoke_api.side_effect = [prop_val,
child_entity_1_name,
child_entity_2_name]
parent_folder = mock.sentinel.parent_folder
child_name = '/cinder-volumes'
ret = self.vops.create_folder(parent_folder, child_name)
self.assertEqual(child_entity_2, ret)
# Test non-existing folder case.
child_entity_2_name = '%25%25cinder-volumes'
new_child_folder = mock.sentinel.new_child_folder
self.session.invoke_api.side_effect = [prop_val,
child_entity_1_name,
child_entity_2_name,
new_child_folder]
child_name = '%cinder-volumes'
ret = self.vops.create_folder(parent_folder, child_name)
self.assertEqual(new_child_folder, ret)
self.session.invoke_api.assert_called_with(self.session.vim,
'CreateFolder',
parent_folder,
name=child_name)
# Reset side effects.
self.session.invoke_api.side_effect = None
def test_create_disk_backing_thin(self):
backing = mock.Mock()
del backing.eagerlyScrub
cf = self.session.vim.client.factory
cf.create.return_value = backing
disk_type = 'thin'
ret = self.vops._create_disk_backing(disk_type, None)
self.assertEqual(backing, ret)
self.assertIsInstance(ret.thinProvisioned, bool)
self.assertTrue(ret.thinProvisioned)
self.assertEqual('', ret.fileName)
self.assertEqual('persistent', ret.diskMode)
def test_create_disk_backing_thick(self):
backing = mock.Mock()
del backing.eagerlyScrub
del backing.thinProvisioned
cf = self.session.vim.client.factory
cf.create.return_value = backing
disk_type = 'thick'
ret = self.vops._create_disk_backing(disk_type, None)
self.assertEqual(backing, ret)
self.assertEqual('', ret.fileName)
self.assertEqual('persistent', ret.diskMode)
def test_create_disk_backing_eager_zeroed_thick(self):
backing = mock.Mock()
del backing.thinProvisioned
cf = self.session.vim.client.factory
cf.create.return_value = backing
disk_type = 'eagerZeroedThick'
ret = self.vops._create_disk_backing(disk_type, None)
self.assertEqual(backing, ret)
self.assertIsInstance(ret.eagerlyScrub, bool)
self.assertTrue(ret.eagerlyScrub)
self.assertEqual('', ret.fileName)
self.assertEqual('persistent', ret.diskMode)
def test_create_virtual_disk_config_spec(self):
cf = self.session.vim.client.factory
cf.create.side_effect = lambda *args: mock.Mock()
size_kb = units.Ki
controller_key = 200
disk_type = 'thick'
spec = self.vops._create_virtual_disk_config_spec(size_kb,
disk_type,
controller_key,
None)
cf.create.side_effect = None
self.assertEqual('add', spec.operation)
self.assertEqual('create', spec.fileOperation)
device = spec.device
self.assertEqual(size_kb, device.capacityInKB)
self.assertEqual(-101, device.key)
self.assertEqual(0, device.unitNumber)
self.assertEqual(controller_key, device.controllerKey)
backing = device.backing
self.assertEqual('', backing.fileName)
self.assertEqual('persistent', backing.diskMode)
def test_create_specs_for_ide_disk_add(self):
factory = self.session.vim.client.factory
factory.create.side_effect = lambda *args: mock.Mock()
size_kb = 1
disk_type = 'thin'
adapter_type = 'ide'
ret = self.vops._create_specs_for_disk_add(size_kb, disk_type,
adapter_type)
factory.create.side_effect = None
self.assertEqual(1, len(ret))
self.assertEqual(units.Ki, ret[0].device.capacityInKB)
self.assertEqual(200, ret[0].device.controllerKey)
expected = [mock.call.create('ns0:VirtualDeviceConfigSpec'),
mock.call.create('ns0:VirtualDisk'),
mock.call.create('ns0:VirtualDiskFlatVer2BackingInfo')]
factory.create.assert_has_calls(expected, any_order=True)
def test_create_specs_for_scsi_disk_add(self):
factory = self.session.vim.client.factory
factory.create.side_effect = lambda *args: mock.Mock()
size_kb = 2 * units.Ki
disk_type = 'thin'
adapter_type = 'lsiLogicsas'
ret = self.vops._create_specs_for_disk_add(size_kb, disk_type,
adapter_type)
factory.create.side_effect = None
self.assertEqual(2, len(ret))
self.assertEqual('noSharing', ret[1].device.sharedBus)
self.assertEqual(size_kb, ret[0].device.capacityInKB)
expected = [mock.call.create('ns0:VirtualLsiLogicSASController'),
mock.call.create('ns0:VirtualDeviceConfigSpec'),
mock.call.create('ns0:VirtualDisk'),
mock.call.create('ns0:VirtualDiskFlatVer2BackingInfo'),
mock.call.create('ns0:VirtualDeviceConfigSpec')]
factory.create.assert_has_calls(expected, any_order=True)
def test_get_create_spec_disk_less(self):
factory = self.session.vim.client.factory
factory.create.side_effect = lambda *args: mock.Mock()
name = mock.sentinel.name
ds_name = mock.sentinel.ds_name
profile_id = mock.sentinel.profile_id
ret = self.vops._get_create_spec_disk_less(name, ds_name, profile_id)
factory.create.side_effect = None
self.assertEqual(name, ret.name)
self.assertEqual('[%s]' % ds_name, ret.files.vmPathName)
self.assertEqual("vmx-08", ret.version)
self.assertEqual(profile_id, ret.vmProfile[0].profileId)
expected = [mock.call.create('ns0:VirtualMachineFileInfo'),
mock.call.create('ns0:VirtualMachineConfigSpec'),
mock.call.create('ns0:VirtualMachineDefinedProfileSpec')]
factory.create.assert_has_calls(expected, any_order=True)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'get_create_spec')
def test_create_backing(self, get_create_spec):
create_spec = mock.sentinel.create_spec
get_create_spec.return_value = create_spec
task = mock.sentinel.task
self.session.invoke_api.return_value = task
task_info = mock.Mock(spec=object)
task_info.result = mock.sentinel.result
self.session.wait_for_task.return_value = task_info
name = 'backing_name'
size_kb = mock.sentinel.size_kb
disk_type = mock.sentinel.disk_type
adapter_type = mock.sentinel.adapter_type
folder = mock.sentinel.folder
resource_pool = mock.sentinel.resource_pool
host = mock.sentinel.host
ds_name = mock.sentinel.ds_name
profile_id = mock.sentinel.profile_id
ret = self.vops.create_backing(name, size_kb, disk_type, folder,
resource_pool, host, ds_name,
profile_id, adapter_type)
self.assertEqual(mock.sentinel.result, ret)
get_create_spec.assert_called_once_with(name, size_kb, disk_type,
ds_name, profile_id,
adapter_type)
self.session.invoke_api.assert_called_once_with(self.session.vim,
'CreateVM_Task',
folder,
config=create_spec,
pool=resource_pool,
host=host)
self.session.wait_for_task.assert_called_once_with(task)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_create_spec_disk_less')
def test_create_backing_disk_less(self, get_create_spec_disk_less):
create_spec = mock.sentinel.create_spec
get_create_spec_disk_less.return_value = create_spec
task = mock.sentinel.task
self.session.invoke_api.return_value = task
task_info = mock.Mock(spec=object)
task_info.result = mock.sentinel.result
self.session.wait_for_task.return_value = task_info
name = 'backing_name'
folder = mock.sentinel.folder
resource_pool = mock.sentinel.resource_pool
host = mock.sentinel.host
ds_name = mock.sentinel.ds_name
profile_id = mock.sentinel.profile_id
ret = self.vops.create_backing_disk_less(name, folder, resource_pool,
host, ds_name, profile_id)
self.assertEqual(mock.sentinel.result, ret)
get_create_spec_disk_less.assert_called_once_with(name, ds_name,
profile_id)
self.session.invoke_api.assert_called_once_with(self.session.vim,
'CreateVM_Task',
folder,
config=create_spec,
pool=resource_pool,
host=host)
self.session.wait_for_task.assert_called_once_with(task)
def test_get_datastore(self):
backing = mock.sentinel.backing
datastore = mock.Mock(spec=object)
datastore.ManagedObjectReference = [mock.sentinel.ds]
self.session.invoke_api.return_value = datastore
ret = self.vops.get_datastore(backing)
self.assertEqual(mock.sentinel.ds, ret)
self.session.invoke_api.assert_called_once_with(vim_util,
'get_object_property',
self.session.vim,
backing, 'datastore')
def test_get_summary(self):
datastore = mock.sentinel.datastore
summary = mock.sentinel.summary
self.session.invoke_api.return_value = summary
ret = self.vops.get_summary(datastore)
self.assertEqual(summary, ret)
self.session.invoke_api.assert_called_once_with(vim_util,
'get_object_property',
self.session.vim,
datastore,
'summary')
def test_get_relocate_spec(self):
delete_disk_attribute = True
def _create_side_effect(type):
obj = mock.Mock()
if type == "ns0:VirtualDiskFlatVer2BackingInfo":
del obj.eagerlyScrub
elif (type == "ns0:VirtualMachineRelocateSpec" and
delete_disk_attribute):
del obj.disk
else:
pass
return obj
factory = self.session.vim.client.factory
factory.create.side_effect = _create_side_effect
datastore = mock.sentinel.datastore
resource_pool = mock.sentinel.resource_pool
host = mock.sentinel.host
disk_move_type = mock.sentinel.disk_move_type
ret = self.vops._get_relocate_spec(datastore, resource_pool, host,
disk_move_type)
self.assertEqual(datastore, ret.datastore)
self.assertEqual(resource_pool, ret.pool)
self.assertEqual(host, ret.host)
self.assertEqual(disk_move_type, ret.diskMoveType)
# Test with disk locator.
delete_disk_attribute = False
disk_type = 'thin'
disk_device = mock.Mock()
ret = self.vops._get_relocate_spec(datastore, resource_pool, host,
disk_move_type, disk_type,
disk_device)
factory.create.side_effect = None
self.assertEqual(datastore, ret.datastore)
self.assertEqual(resource_pool, ret.pool)
self.assertEqual(host, ret.host)
self.assertEqual(disk_move_type, ret.diskMoveType)
self.assertIsInstance(ret.disk, list)
self.assertEqual(1, len(ret.disk))
disk_locator = ret.disk[0]
self.assertEqual(datastore, disk_locator.datastore)
self.assertEqual(disk_device.key, disk_locator.diskId)
backing = disk_locator.diskBackingInfo
self.assertIsInstance(backing.thinProvisioned, bool)
self.assertTrue(backing.thinProvisioned)
self.assertEqual('', backing.fileName)
self.assertEqual('persistent', backing.diskMode)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_disk_device')
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_relocate_spec')
def test_relocate_backing(self, get_relocate_spec, get_disk_device):
disk_device = mock.sentinel.disk_device
get_disk_device.return_value = disk_device
spec = mock.sentinel.relocate_spec
get_relocate_spec.return_value = spec
task = mock.sentinel.task
self.session.invoke_api.return_value = task
backing = mock.sentinel.backing
datastore = mock.sentinel.datastore
resource_pool = mock.sentinel.resource_pool
host = mock.sentinel.host
disk_type = mock.sentinel.disk_type
self.vops.relocate_backing(backing, datastore, resource_pool, host,
disk_type)
# Verify calls
disk_move_type = 'moveAllDiskBackingsAndAllowSharing'
get_disk_device.assert_called_once_with(backing)
get_relocate_spec.assert_called_once_with(datastore, resource_pool,
host, disk_move_type,
disk_type, disk_device)
self.session.invoke_api.assert_called_once_with(self.session.vim,
'RelocateVM_Task',
backing,
spec=spec)
self.session.wait_for_task.assert_called_once_with(task)
def test_move_backing_to_folder(self):
task = mock.sentinel.task
self.session.invoke_api.return_value = task
backing = mock.sentinel.backing
folder = mock.sentinel.folder
self.vops.move_backing_to_folder(backing, folder)
# Verify calls
self.session.invoke_api.assert_called_once_with(self.session.vim,
'MoveIntoFolder_Task',
folder,
list=[backing])
self.session.wait_for_task.assert_called_once_with(task)
def test_create_snapshot_operation(self):
task = mock.sentinel.task
self.session.invoke_api.return_value = task
task_info = mock.Mock(spec=object)
task_info.result = mock.sentinel.result
self.session.wait_for_task.return_value = task_info
backing = mock.sentinel.backing
name = mock.sentinel.name
desc = mock.sentinel.description
quiesce = True
ret = self.vops.create_snapshot(backing, name, desc, quiesce)
self.assertEqual(mock.sentinel.result, ret)
self.session.invoke_api.assert_called_once_with(self.session.vim,
'CreateSnapshot_Task',
backing, name=name,
description=desc,
memory=False,
quiesce=quiesce)
self.session.wait_for_task.assert_called_once_with(task)
def test_get_snapshot_from_tree(self):
volops = volumeops.VMwareVolumeOps
name = mock.sentinel.name
# Test snapshot == 'None'
ret = volops._get_snapshot_from_tree(name, None)
self.assertIsNone(ret)
# Test root == snapshot
snapshot = mock.sentinel.snapshot
node = mock.Mock(spec=object)
node.name = name
node.snapshot = snapshot
ret = volops._get_snapshot_from_tree(name, node)
self.assertEqual(ret, snapshot)
# Test root.childSnapshotList == None
root = mock.Mock(spec=object)
root.name = 'root'
del root.childSnapshotList
ret = volops._get_snapshot_from_tree(name, root)
self.assertIsNone(ret)
# Test root.child == snapshot
root.childSnapshotList = [node]
ret = volops._get_snapshot_from_tree(name, root)
self.assertEqual(ret, snapshot)
def test_get_snapshot(self):
# build out the root snapshot tree
snapshot_name = mock.sentinel.snapshot_name
snapshot = mock.sentinel.snapshot
root = mock.Mock(spec=object)
root.name = 'root'
node = mock.Mock(spec=object)
node.name = snapshot_name
node.snapshot = snapshot
root.childSnapshotList = [node]
# Test rootSnapshotList is not None
snapshot_tree = mock.Mock(spec=object)
snapshot_tree.rootSnapshotList = [root]
self.session.invoke_api.return_value = snapshot_tree
backing = mock.sentinel.backing
ret = self.vops.get_snapshot(backing, snapshot_name)
self.assertEqual(snapshot, ret)
self.session.invoke_api.assert_called_with(vim_util,
'get_object_property',
self.session.vim,
backing,
'snapshot')
# Test rootSnapshotList == None
snapshot_tree.rootSnapshotList = None
ret = self.vops.get_snapshot(backing, snapshot_name)
self.assertIsNone(ret)
self.session.invoke_api.assert_called_with(vim_util,
'get_object_property',
self.session.vim,
backing,
'snapshot')
def test_snapshot_exists(self):
backing = mock.sentinel.backing
invoke_api = self.session.invoke_api
invoke_api.return_value = None
self.assertFalse(self.vops.snapshot_exists(backing))
invoke_api.assert_called_once_with(vim_util,
'get_object_property',
self.session.vim,
backing,
'snapshot')
snapshot = mock.Mock()
invoke_api.return_value = snapshot
snapshot.rootSnapshotList = None
self.assertFalse(self.vops.snapshot_exists(backing))
snapshot.rootSnapshotList = [mock.Mock()]
self.assertTrue(self.vops.snapshot_exists(backing))
def test_delete_snapshot(self):
backing = mock.sentinel.backing
snapshot_name = mock.sentinel.snapshot_name
# Test snapshot is None
with mock.patch.object(self.vops, 'get_snapshot') as get_snapshot:
get_snapshot.return_value = None
self.vops.delete_snapshot(backing, snapshot_name)
get_snapshot.assert_called_once_with(backing, snapshot_name)
# Test snapshot is not None
snapshot = mock.sentinel.snapshot
task = mock.sentinel.task
invoke_api = self.session.invoke_api
invoke_api.return_value = task
with mock.patch.object(self.vops, 'get_snapshot') as get_snapshot:
get_snapshot.return_value = snapshot
self.vops.delete_snapshot(backing, snapshot_name)
get_snapshot.assert_called_with(backing, snapshot_name)
invoke_api.assert_called_once_with(self.session.vim,
'RemoveSnapshot_Task',
snapshot, removeChildren=False)
self.session.wait_for_task.assert_called_once_with(task)
def test_get_folder(self):
folder = mock.sentinel.folder
backing = mock.sentinel.backing
with mock.patch.object(self.vops, '_get_parent') as get_parent:
get_parent.return_value = folder
ret = self.vops._get_folder(backing)
self.assertEqual(folder, ret)
get_parent.assert_called_once_with(backing, 'Folder')
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_relocate_spec')
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_disk_device')
def test_get_clone_spec(self, get_disk_device, get_relocate_spec):
factory = self.session.vim.client.factory
factory.create.side_effect = lambda *args: mock.Mock()
relocate_spec = mock.sentinel.relocate_spec
get_relocate_spec.return_value = relocate_spec
datastore = mock.sentinel.datastore
disk_move_type = mock.sentinel.disk_move_type
snapshot = mock.sentinel.snapshot
disk_type = None
backing = mock.sentinel.backing
ret = self.vops._get_clone_spec(datastore, disk_move_type, snapshot,
backing, disk_type)
self.assertEqual(relocate_spec, ret.location)
self.assertFalse(ret.powerOn)
self.assertFalse(ret.template)
self.assertEqual(snapshot, ret.snapshot)
get_relocate_spec.assert_called_once_with(datastore, None, None,
disk_move_type, disk_type,
None)
disk_device = mock.sentinel.disk_device
get_disk_device.return_value = disk_device
disk_type = 'thin'
ret = self.vops._get_clone_spec(datastore, disk_move_type, snapshot,
backing, disk_type)
factory.create.side_effect = None
self.assertEqual(relocate_spec, ret.location)
self.assertFalse(ret.powerOn)
self.assertFalse(ret.template)
self.assertEqual(snapshot, ret.snapshot)
get_disk_device.assert_called_once_with(backing)
get_relocate_spec.assert_called_with(datastore, None, None,
disk_move_type, disk_type,
disk_device)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_clone_spec')
def test_clone_backing(self, get_clone_spec):
folder = mock.Mock(name='folder', spec=object)
folder._type = 'Folder'
task = mock.sentinel.task
self.session.invoke_api.side_effect = [folder, task, folder, task,
folder, task]
task_info = mock.Mock(spec=object)
task_info.result = mock.sentinel.new_backing
self.session.wait_for_task.return_value = task_info
clone_spec = mock.sentinel.clone_spec
get_clone_spec.return_value = clone_spec
# Test non-linked clone_backing
name = mock.sentinel.name
backing = mock.Mock(spec=object)
backing._type = 'VirtualMachine'
snapshot = mock.sentinel.snapshot
clone_type = "anything-other-than-linked"
datastore = mock.sentinel.datstore
ret = self.vops.clone_backing(name, backing, snapshot, clone_type,
datastore)
# verify calls
self.assertEqual(mock.sentinel.new_backing, ret)
disk_move_type = 'moveAllDiskBackingsAndDisallowSharing'
get_clone_spec.assert_called_with(datastore, disk_move_type, snapshot,
backing, None, None)
expected = [mock.call(vim_util, 'get_object_property',
self.session.vim, backing, 'parent'),
mock.call(self.session.vim, 'CloneVM_Task', backing,
folder=folder, name=name, spec=clone_spec)]
self.assertEqual(expected, self.session.invoke_api.mock_calls)
# Test linked clone_backing
clone_type = volumeops.LINKED_CLONE_TYPE
self.session.invoke_api.reset_mock()
ret = self.vops.clone_backing(name, backing, snapshot, clone_type,
datastore)
# verify calls
self.assertEqual(mock.sentinel.new_backing, ret)
disk_move_type = 'createNewChildDiskBacking'
get_clone_spec.assert_called_with(datastore, disk_move_type, snapshot,
backing, None, None)
expected = [mock.call(vim_util, 'get_object_property',
self.session.vim, backing, 'parent'),
mock.call(self.session.vim, 'CloneVM_Task', backing,
folder=folder, name=name, spec=clone_spec)]
self.assertEqual(expected, self.session.invoke_api.mock_calls)
# Test disk type conversion and target host.
clone_type = None
disk_type = 'thin'
host = mock.sentinel.host
self.session.invoke_api.reset_mock()
ret = self.vops.clone_backing(name, backing, snapshot, clone_type,
datastore, disk_type, host)
self.assertEqual(mock.sentinel.new_backing, ret)
disk_move_type = 'moveAllDiskBackingsAndDisallowSharing'
get_clone_spec.assert_called_with(datastore, disk_move_type, snapshot,
backing, disk_type, host)
expected = [mock.call(vim_util, 'get_object_property',
self.session.vim, backing, 'parent'),
mock.call(self.session.vim, 'CloneVM_Task', backing,
folder=folder, name=name, spec=clone_spec)]
self.assertEqual(expected, self.session.invoke_api.mock_calls)
# Clear side effects.
self.session.invoke_api.side_effect = None
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_create_specs_for_disk_add')
def test_attach_disk_to_backing(self, create_spec):
reconfig_spec = mock.Mock()
self.session.vim.client.factory.create.return_value = reconfig_spec
disk_add_config_specs = mock.Mock()
create_spec.return_value = disk_add_config_specs
task = mock.Mock()
self.session.invoke_api.return_value = task
backing = mock.Mock()
size_in_kb = units.Ki
disk_type = "thin"
adapter_type = "ide"
vmdk_ds_file_path = mock.Mock()
self.vops.attach_disk_to_backing(backing, size_in_kb, disk_type,
adapter_type, vmdk_ds_file_path)
self.assertEqual(disk_add_config_specs, reconfig_spec.deviceChange)
create_spec.assert_called_once_with(size_in_kb, disk_type,
adapter_type,
vmdk_ds_file_path)
self.session.invoke_api.assert_called_once_with(self.session.vim,
"ReconfigVM_Task",
backing,
spec=reconfig_spec)
self.session.wait_for_task.assert_called_once_with(task)
def test_rename_backing(self):
task = mock.sentinel.task
self.session.invoke_api.return_value = task
backing = mock.sentinel.backing
new_name = mock.sentinel.new_name
self.vops.rename_backing(backing, new_name)
self.session.invoke_api.assert_called_once_with(self.session.vim,
"Rename_Task",
backing,
newName=new_name)
self.session.wait_for_task.assert_called_once_with(task)
def test_change_backing_profile(self):
# Test change to empty profile.
reconfig_spec = mock.Mock()
empty_profile_spec = mock.sentinel.empty_profile_spec
self.session.vim.client.factory.create.side_effect = [
reconfig_spec, empty_profile_spec]
task = mock.sentinel.task
self.session.invoke_api.return_value = task
backing = mock.sentinel.backing
unique_profile_id = mock.sentinel.unique_profile_id
profile_id = mock.Mock(uniqueId=unique_profile_id)
self.vops.change_backing_profile(backing, profile_id)
self.assertEqual([empty_profile_spec], reconfig_spec.vmProfile)
self.session.invoke_api.assert_called_once_with(self.session.vim,
"ReconfigVM_Task",
backing,
spec=reconfig_spec)
self.session.wait_for_task.assert_called_once_with(task)
# Test change to non-empty profile.
profile_spec = mock.Mock()
self.session.vim.client.factory.create.side_effect = [
reconfig_spec, profile_spec]
self.session.invoke_api.reset_mock()
self.session.wait_for_task.reset_mock()
self.vops.change_backing_profile(backing, profile_id)
self.assertEqual([profile_spec], reconfig_spec.vmProfile)
self.assertEqual(unique_profile_id,
reconfig_spec.vmProfile[0].profileId)
self.session.invoke_api.assert_called_once_with(self.session.vim,
"ReconfigVM_Task",
backing,
spec=reconfig_spec)
self.session.wait_for_task.assert_called_once_with(task)
# Clear side effects.
self.session.vim.client.factory.create.side_effect = None
def test_delete_file(self):
file_mgr = mock.sentinel.file_manager
self.session.vim.service_content.fileManager = file_mgr
task = mock.sentinel.task
invoke_api = self.session.invoke_api
invoke_api.return_value = task
# Test delete file
file_path = mock.sentinel.file_path
datacenter = mock.sentinel.datacenter
self.vops.delete_file(file_path, datacenter)
# verify calls
invoke_api.assert_called_once_with(self.session.vim,
'DeleteDatastoreFile_Task',
file_mgr,
name=file_path,
datacenter=datacenter)
self.session.wait_for_task.assert_called_once_with(task)
def test_get_path_name(self):
path = mock.Mock(spec=object)
path_name = mock.sentinel.vm_path_name
path.vmPathName = path_name
invoke_api = self.session.invoke_api
invoke_api.return_value = path
backing = mock.sentinel.backing
ret = self.vops.get_path_name(backing)
self.assertEqual(path_name, ret)
invoke_api.assert_called_once_with(vim_util, 'get_object_property',
self.session.vim, backing,
'config.files')
def test_get_entity_name(self):
entity_name = mock.sentinel.entity_name
invoke_api = self.session.invoke_api
invoke_api.return_value = entity_name
entity = mock.sentinel.entity
ret = self.vops.get_entity_name(entity)
self.assertEqual(entity_name, ret)
invoke_api.assert_called_once_with(vim_util, 'get_object_property',
self.session.vim, entity, 'name')
def test_get_vmdk_path(self):
# Setup hardware_devices for test
device = mock.Mock()
device.__class__.__name__ = 'VirtualDisk'
backing = mock.Mock()
backing.__class__.__name__ = 'VirtualDiskFlatVer2BackingInfo'
backing.fileName = mock.sentinel.vmdk_path
device.backing = backing
invoke_api = self.session.invoke_api
invoke_api.return_value = [device]
# Test get_vmdk_path
ret = self.vops.get_vmdk_path(backing)
self.assertEqual(mock.sentinel.vmdk_path, ret)
invoke_api.assert_called_once_with(vim_util, 'get_object_property',
self.session.vim, backing,
'config.hardware.device')
backing.__class__.__name__ = ' VirtualDiskSparseVer2BackingInfo'
self.assertRaises(AssertionError, self.vops.get_vmdk_path, backing)
# Test with no disk device.
invoke_api.return_value = []
self.assertRaises(error_util.VirtualDiskNotFoundException,
self.vops.get_vmdk_path,
backing)
def test_get_disk_size(self):
# Test with valid disk device.
device = mock.Mock()
device.__class__.__name__ = 'VirtualDisk'
disk_size_bytes = 1024
device.capacityInKB = disk_size_bytes / units.Ki
invoke_api = self.session.invoke_api
invoke_api.return_value = [device]
self.assertEqual(disk_size_bytes,
self.vops.get_disk_size(mock.sentinel.backing))
# Test with no disk device.
invoke_api.return_value = []
self.assertRaises(error_util.VirtualDiskNotFoundException,
self.vops.get_disk_size,
mock.sentinel.backing)
def test_create_virtual_disk(self):
task = mock.Mock()
invoke_api = self.session.invoke_api
invoke_api.return_value = task
spec = mock.Mock()
factory = self.session.vim.client.factory
factory.create.return_value = spec
disk_mgr = self.session.vim.service_content.virtualDiskManager
dc_ref = mock.Mock()
vmdk_ds_file_path = mock.Mock()
size_in_kb = 1024
adapter_type = 'ide'
disk_type = 'thick'
self.vops.create_virtual_disk(dc_ref, vmdk_ds_file_path, size_in_kb,
adapter_type, disk_type)
self.assertEqual(volumeops.VirtualDiskAdapterType.IDE,
spec.adapterType)
self.assertEqual(volumeops.VirtualDiskType.PREALLOCATED, spec.diskType)
self.assertEqual(size_in_kb, spec.capacityKb)
invoke_api.assert_called_once_with(self.session.vim,
'CreateVirtualDisk_Task',
disk_mgr,
name=vmdk_ds_file_path,
datacenter=dc_ref,
spec=spec)
self.session.wait_for_task.assert_called_once_with(task)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'create_virtual_disk')
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'delete_file')
def test_create_flat_extent_virtual_disk_descriptor(self, delete_file,
create_virtual_disk):
dc_ref = mock.Mock()
path = mock.Mock()
size_in_kb = 1024
adapter_type = 'ide'
disk_type = 'thick'
self.vops.create_flat_extent_virtual_disk_descriptor(dc_ref,
path,
size_in_kb,
adapter_type,
disk_type)
create_virtual_disk.assert_called_once_with(
dc_ref, path.get_descriptor_ds_file_path(), size_in_kb,
adapter_type, disk_type)
delete_file.assert_called_once_with(
path.get_flat_extent_ds_file_path(), dc_ref)
def test_copy_vmdk_file(self):
task = mock.sentinel.task
invoke_api = self.session.invoke_api
invoke_api.return_value = task
disk_mgr = self.session.vim.service_content.virtualDiskManager
dc_ref = self.session.dc_ref
src_vmdk_file_path = self.session.src
dest_vmdk_file_path = self.session.dest
self.vops.copy_vmdk_file(dc_ref, src_vmdk_file_path,
dest_vmdk_file_path)
invoke_api.assert_called_once_with(self.session.vim,
'CopyVirtualDisk_Task',
disk_mgr,
sourceName=src_vmdk_file_path,
sourceDatacenter=dc_ref,
destName=dest_vmdk_file_path,
destDatacenter=dc_ref,
force=True)
self.session.wait_for_task.assert_called_once_with(task)
def test_delete_vmdk_file(self):
task = mock.sentinel.task
invoke_api = self.session.invoke_api
invoke_api.return_value = task
disk_mgr = self.session.vim.service_content.virtualDiskManager
dc_ref = self.session.dc_ref
vmdk_file_path = self.session.vmdk_file
self.vops.delete_vmdk_file(vmdk_file_path, dc_ref)
invoke_api.assert_called_once_with(self.session.vim,
'DeleteVirtualDisk_Task',
disk_mgr,
name=vmdk_file_path,
datacenter=dc_ref)
self.session.wait_for_task.assert_called_once_with(task)
def test_extend_virtual_disk(self):
"""Test volumeops.extend_virtual_disk."""
task = mock.sentinel.task
invoke_api = self.session.invoke_api
invoke_api.return_value = task
disk_mgr = self.session.vim.service_content.virtualDiskManager
fake_size = 5
fake_size_in_kb = fake_size * units.Mi
fake_name = 'fake_volume_0000000001'
fake_dc = mock.sentinel.datacenter
self.vops.extend_virtual_disk(fake_size,
fake_name, fake_dc)
invoke_api.assert_called_once_with(self.session.vim,
"ExtendVirtualDisk_Task",
disk_mgr,
name=fake_name,
datacenter=fake_dc,
newCapacityKb=fake_size_in_kb,
eagerZero=False)
self.session.wait_for_task.assert_called_once_with(task)
def test_get_all_profiles(self):
profile_ids = [1, 2]
methods = ['PbmQueryProfile', 'PbmRetrieveContent']
def invoke_api_side_effect(module, method, *args, **kwargs):
self.assertEqual(self.session.pbm, module)
self.assertEqual(methods.pop(0), method)
self.assertEqual(self.session.pbm.service_content.profileManager,
args[0])
if method == 'PbmQueryProfile':
self.assertEqual('STORAGE',
kwargs['resourceType'].resourceType)
return profile_ids
self.assertEqual(profile_ids, kwargs['profileIds'])
self.session.invoke_api.side_effect = invoke_api_side_effect
self.vops.get_all_profiles()
self.assertEqual(2, self.session.invoke_api.call_count)
# Clear side effects.
self.session.invoke_api.side_effect = None
def test_get_all_profiles_with_no_profiles(self):
self.session.invoke_api.return_value = []
res_type = mock.sentinel.res_type
self.session.pbm.client.factory.create.return_value = res_type
profiles = self.vops.get_all_profiles()
self.session.invoke_api.assert_called_once_with(
self.session.pbm,
'PbmQueryProfile',
self.session.pbm.service_content.profileManager,
resourceType=res_type)
self.assertEqual([], profiles)
def _create_profile(self, profile_id, name):
profile = mock.Mock()
profile.profileId = profile_id
profile.name = name
return profile
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'get_all_profiles')
def test_retrieve_profile_id(self, get_all_profiles):
profiles = [self._create_profile(str(i), 'profile-%d' % i)
for i in range(0, 10)]
get_all_profiles.return_value = profiles
exp_profile_id = '5'
profile_id = self.vops.retrieve_profile_id(
'profile-%s' % exp_profile_id)
self.assertEqual(exp_profile_id, profile_id)
get_all_profiles.assert_called_once_with()
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'get_all_profiles')
def test_retrieve_profile_id_with_invalid_profile(self, get_all_profiles):
profiles = [self._create_profile(str(i), 'profile-%d' % i)
for i in range(0, 10)]
get_all_profiles.return_value = profiles
profile_id = self.vops.retrieve_profile_id('profile-%s' % (i + 1))
self.assertIsNone(profile_id)
get_all_profiles.assert_called_once_with()
def test_filter_matching_hubs(self):
hubs = mock.Mock()
profile_id = 'profile-0'
self.vops.filter_matching_hubs(hubs, profile_id)
self.session.invoke_api.assert_called_once_with(
self.session.pbm,
'PbmQueryMatchingHub',
self.session.pbm.service_content.placementSolver,
hubsToSearch=hubs,
profile=profile_id)
class VirtualDiskPathTest(test.TestCase):
"""Unit tests for VirtualDiskPath."""
def setUp(self):
super(VirtualDiskPathTest, self).setUp()
self._path = volumeops.VirtualDiskPath("nfs", "A/B/", "disk")
def test_get_datastore_file_path(self):
self.assertEqual("[nfs] A/B/disk.vmdk",
self._path.get_datastore_file_path("nfs",
"A/B/disk.vmdk"))
def test_get_descriptor_file_path(self):
self.assertEqual("A/B/disk.vmdk",
self._path.get_descriptor_file_path())
def test_get_descriptor_ds_file_path(self):
self.assertEqual("[nfs] A/B/disk.vmdk",
self._path.get_descriptor_ds_file_path())
class FlatExtentVirtualDiskPathTest(test.TestCase):
"""Unit tests for FlatExtentVirtualDiskPath."""
def setUp(self):
super(FlatExtentVirtualDiskPathTest, self).setUp()
self._path = volumeops.FlatExtentVirtualDiskPath("nfs", "A/B/", "disk")
def test_get_flat_extent_file_path(self):
self.assertEqual("A/B/disk-flat.vmdk",
self._path.get_flat_extent_file_path())
def test_get_flat_extent_ds_file_path(self):
self.assertEqual("[nfs] A/B/disk-flat.vmdk",
self._path.get_flat_extent_ds_file_path())
class VirtualDiskTypeTest(test.TestCase):
"""Unit tests for VirtualDiskType."""
def test_is_valid(self):
self.assertTrue(volumeops.VirtualDiskType.is_valid("thick"))
self.assertTrue(volumeops.VirtualDiskType.is_valid("thin"))
self.assertTrue(volumeops.VirtualDiskType.is_valid("eagerZeroedThick"))
self.assertFalse(volumeops.VirtualDiskType.is_valid("preallocated"))
def test_validate(self):
volumeops.VirtualDiskType.validate("thick")
volumeops.VirtualDiskType.validate("thin")
volumeops.VirtualDiskType.validate("eagerZeroedThick")
self.assertRaises(error_util.InvalidDiskTypeException,
volumeops.VirtualDiskType.validate,
"preallocated")
def test_get_virtual_disk_type(self):
self.assertEqual("preallocated",
volumeops.VirtualDiskType.get_virtual_disk_type(
"thick"))
self.assertEqual("thin",
volumeops.VirtualDiskType.get_virtual_disk_type(
"thin"))
self.assertEqual("eagerZeroedThick",
volumeops.VirtualDiskType.get_virtual_disk_type(
"eagerZeroedThick"))
self.assertRaises(error_util.InvalidDiskTypeException,
volumeops.VirtualDiskType.get_virtual_disk_type,
"preallocated")
class VirtualDiskAdapterTypeTest(test.TestCase):
"""Unit tests for VirtualDiskAdapterType."""
def test_is_valid(self):
self.assertTrue(volumeops.VirtualDiskAdapterType.is_valid("lsiLogic"))
self.assertTrue(volumeops.VirtualDiskAdapterType.is_valid("busLogic"))
self.assertTrue(volumeops.VirtualDiskAdapterType.is_valid(
"lsiLogicsas"))
self.assertTrue(volumeops.VirtualDiskAdapterType.is_valid("ide"))
self.assertFalse(volumeops.VirtualDiskAdapterType.is_valid("pvscsi"))
def test_validate(self):
volumeops.VirtualDiskAdapterType.validate("lsiLogic")
volumeops.VirtualDiskAdapterType.validate("busLogic")
volumeops.VirtualDiskAdapterType.validate("lsiLogicsas")
volumeops.VirtualDiskAdapterType.validate("ide")
self.assertRaises(error_util.InvalidAdapterTypeException,
volumeops.VirtualDiskAdapterType.validate,
"pvscsi")
def test_get_adapter_type(self):
self.assertEqual("lsiLogic",
volumeops.VirtualDiskAdapterType.get_adapter_type(
"lsiLogic"))
self.assertEqual("busLogic",
volumeops.VirtualDiskAdapterType.get_adapter_type(
"busLogic"))
self.assertEqual("lsiLogic",
volumeops.VirtualDiskAdapterType.get_adapter_type(
"lsiLogicsas"))
self.assertEqual("ide",
volumeops.VirtualDiskAdapterType.get_adapter_type(
"ide"))
self.assertRaises(error_util.InvalidAdapterTypeException,
volumeops.VirtualDiskAdapterType.get_adapter_type,
"pvscsi")
class ControllerTypeTest(test.TestCase):
"""Unit tests for ControllerType."""
def test_get_controller_type(self):
self.assertEqual(volumeops.ControllerType.LSI_LOGIC,
volumeops.ControllerType.get_controller_type(
'lsiLogic'))
self.assertEqual(volumeops.ControllerType.BUS_LOGIC,
volumeops.ControllerType.get_controller_type(
'busLogic'))
self.assertEqual(volumeops.ControllerType.LSI_LOGIC_SAS,
volumeops.ControllerType.get_controller_type(
'lsiLogicsas'))
self.assertEqual(volumeops.ControllerType.IDE,
volumeops.ControllerType.get_controller_type(
'ide'))
self.assertRaises(error_util.InvalidAdapterTypeException,
volumeops.ControllerType.get_controller_type,
'invalid_type')
def test_is_scsi_controller(self):
self.assertTrue(volumeops.ControllerType.is_scsi_controller(
volumeops.ControllerType.LSI_LOGIC))
self.assertTrue(volumeops.ControllerType.is_scsi_controller(
volumeops.ControllerType.BUS_LOGIC))
self.assertTrue(volumeops.ControllerType.is_scsi_controller(
volumeops.ControllerType.LSI_LOGIC_SAS))
self.assertFalse(volumeops.ControllerType.is_scsi_controller(
volumeops.ControllerType.IDE))
|
|
"""JSON (de)serialization framework.
The framework presented here is somewhat based on `Go's "json" package`_
(especially the ``omitempty`` functionality).
.. _`Go's "json" package`: http://golang.org/pkg/encoding/json/
"""
import abc
import binascii
import logging
import OpenSSL
import six
from acme.jose import b64
from acme.jose import errors
from acme.jose import interfaces
from acme.jose import util
logger = logging.getLogger(__name__)
class Field(object):
"""JSON object field.
:class:`Field` is meant to be used together with
:class:`JSONObjectWithFields`.
``encoder`` (``decoder``) is a callable that accepts a single
parameter, i.e. a value to be encoded (decoded), and returns the
serialized (deserialized) value. In case of errors it should raise
:class:`~acme.jose.errors.SerializationError`
(:class:`~acme.jose.errors.DeserializationError`).
Note, that ``decoder`` should perform partial serialization only.
:ivar str json_name: Name of the field when encoded to JSON.
:ivar default: Default value (used when not present in JSON object).
:ivar bool omitempty: If ``True`` and the field value is empty, then
it will not be included in the serialized JSON object, and
``default`` will be used for deserialization. Otherwise, if ``False``,
field is considered as required, value will always be included in the
serialized JSON objected, and it must also be present when
deserializing.
"""
__slots__ = ('json_name', 'default', 'omitempty', 'fdec', 'fenc')
def __init__(self, json_name, default=None, omitempty=False,
decoder=None, encoder=None):
# pylint: disable=too-many-arguments
self.json_name = json_name
self.default = default
self.omitempty = omitempty
self.fdec = self.default_decoder if decoder is None else decoder
self.fenc = self.default_encoder if encoder is None else encoder
@classmethod
def _empty(cls, value):
"""Is the provided value cosidered "empty" for this field?
This is useful for subclasses that might want to override the
definition of being empty, e.g. for some more exotic data types.
"""
return not isinstance(value, bool) and not value
def omit(self, value):
"""Omit the value in output?"""
return self._empty(value) and self.omitempty
def _update_params(self, **kwargs):
current = dict(json_name=self.json_name, default=self.default,
omitempty=self.omitempty,
decoder=self.fdec, encoder=self.fenc)
current.update(kwargs)
return type(self)(**current) # pylint: disable=star-args
def decoder(self, fdec):
"""Descriptor to change the decoder on JSON object field."""
return self._update_params(decoder=fdec)
def encoder(self, fenc):
"""Descriptor to change the encoder on JSON object field."""
return self._update_params(encoder=fenc)
def decode(self, value):
"""Decode a value, optionally with context JSON object."""
return self.fdec(value)
def encode(self, value):
"""Encode a value, optionally with context JSON object."""
return self.fenc(value)
@classmethod
def default_decoder(cls, value):
"""Default decoder.
Recursively deserialize into immutable types (
:class:`acme.jose.util.frozendict` instead of
:func:`dict`, :func:`tuple` instead of :func:`list`).
"""
# bases cases for different types returned by json.loads
if isinstance(value, list):
return tuple(cls.default_decoder(subvalue) for subvalue in value)
elif isinstance(value, dict):
return util.frozendict(
dict((cls.default_decoder(key), cls.default_decoder(value))
for key, value in six.iteritems(value)))
else: # integer or string
return value
@classmethod
def default_encoder(cls, value):
"""Default (passthrough) encoder."""
# field.to_partial_json() is no good as encoder has to do partial
# serialization only
return value
class JSONObjectWithFieldsMeta(abc.ABCMeta):
"""Metaclass for :class:`JSONObjectWithFields` and its subclasses.
It makes sure that, for any class ``cls`` with ``__metaclass__``
set to ``JSONObjectWithFieldsMeta``:
1. All fields (attributes of type :class:`Field`) in the class
definition are moved to the ``cls._fields`` dictionary, where
keys are field attribute names and values are fields themselves.
2. ``cls.__slots__`` is extended by all field attribute names
(i.e. not :attr:`Field.json_name`). Original ``cls.__slots__``
are stored in ``cls._orig_slots``.
In a consequence, for a field attribute name ``some_field``,
``cls.some_field`` will be a slot descriptor and not an instance
of :class:`Field`. For example::
some_field = Field('someField', default=())
class Foo(object):
__metaclass__ = JSONObjectWithFieldsMeta
__slots__ = ('baz',)
some_field = some_field
assert Foo.__slots__ == ('some_field', 'baz')
assert Foo._orig_slots == ()
assert Foo.some_field is not Field
assert Foo._fields.keys() == ['some_field']
assert Foo._fields['some_field'] is some_field
As an implementation note, this metaclass inherits from
:class:`abc.ABCMeta` (and not the usual :class:`type`) to mitigate
the metaclass conflict (:class:`ImmutableMap` and
:class:`JSONDeSerializable`, parents of :class:`JSONObjectWithFields`,
use :class:`abc.ABCMeta` as its metaclass).
"""
def __new__(mcs, name, bases, dikt):
fields = {}
for base in bases:
fields.update(getattr(base, '_fields', {}))
# Do not reorder, this class might override fields from base classes!
for key, value in tuple(six.iteritems(dikt)):
# not six.iterkeys() (in-place edit!)
if isinstance(value, Field):
fields[key] = dikt.pop(key)
dikt['_orig_slots'] = dikt.get('__slots__', ())
dikt['__slots__'] = tuple(
list(dikt['_orig_slots']) + list(six.iterkeys(fields)))
dikt['_fields'] = fields
return abc.ABCMeta.__new__(mcs, name, bases, dikt)
@six.add_metaclass(JSONObjectWithFieldsMeta)
class JSONObjectWithFields(util.ImmutableMap, interfaces.JSONDeSerializable):
# pylint: disable=too-few-public-methods
"""JSON object with fields.
Example::
class Foo(JSONObjectWithFields):
bar = Field('Bar')
empty = Field('Empty', omitempty=True)
@bar.encoder
def bar(value):
return value + 'bar'
@bar.decoder
def bar(value):
if not value.endswith('bar'):
raise errors.DeserializationError('No bar suffix!')
return value[:-3]
assert Foo(bar='baz').to_partial_json() == {'Bar': 'bazbar'}
assert Foo.from_json({'Bar': 'bazbar'}) == Foo(bar='baz')
assert (Foo.from_json({'Bar': 'bazbar', 'Empty': '!'})
== Foo(bar='baz', empty='!'))
assert Foo(bar='baz').bar == 'baz'
"""
@classmethod
def _defaults(cls):
"""Get default fields values."""
return dict([(slot, field.default) for slot, field
in six.iteritems(cls._fields)])
def __init__(self, **kwargs):
# pylint: disable=star-args
super(JSONObjectWithFields, self).__init__(
**(dict(self._defaults(), **kwargs)))
def encode(self, name):
"""Encode a single field.
:param str name: Name of the field to be encoded.
:raises erors.SerializationError: if field cannot be serialized
:raises errors.Error: if field could not be found
"""
try:
field = self._fields[name]
except KeyError:
raise errors.Error("Field not found: {0}".format(name))
return field.encode(getattr(self, name))
def fields_to_partial_json(self):
"""Serialize fields to JSON."""
jobj = {}
omitted = set()
for slot, field in six.iteritems(self._fields):
value = getattr(self, slot)
if field.omit(value):
omitted.add((slot, value))
else:
try:
jobj[field.json_name] = field.encode(value)
except errors.SerializationError as error:
raise errors.SerializationError(
'Could not encode {0} ({1}): {2}'.format(
slot, value, error))
if omitted:
# pylint: disable=star-args
logger.debug('Omitted empty fields: %s', ', '.join(
'{0!s}={1!r}'.format(*field) for field in omitted))
return jobj
def to_partial_json(self):
return self.fields_to_partial_json()
@classmethod
def _check_required(cls, jobj):
missing = set()
for _, field in six.iteritems(cls._fields):
if not field.omitempty and field.json_name not in jobj:
missing.add(field.json_name)
if missing:
raise errors.DeserializationError(
'The following field are required: {0}'.format(
','.join(missing)))
@classmethod
def fields_from_json(cls, jobj):
"""Deserialize fields from JSON."""
cls._check_required(jobj)
fields = {}
for slot, field in six.iteritems(cls._fields):
if field.json_name not in jobj and field.omitempty:
fields[slot] = field.default
else:
value = jobj[field.json_name]
try:
fields[slot] = field.decode(value)
except errors.DeserializationError as error:
raise errors.DeserializationError(
'Could not decode {0!r} ({1!r}): {2}'.format(
slot, value, error))
return fields
@classmethod
def from_json(cls, jobj):
return cls(**cls.fields_from_json(jobj))
def encode_b64jose(data):
"""Encode JOSE Base-64 field.
:param bytes data:
:rtype: `unicode`
"""
# b64encode produces ASCII characters only
return b64.b64encode(data).decode('ascii')
def decode_b64jose(data, size=None, minimum=False):
"""Decode JOSE Base-64 field.
:param unicode data:
:param int size: Required length (after decoding).
:param bool minimum: If ``True``, then `size` will be treated as
minimum required length, as opposed to exact equality.
:rtype: bytes
"""
error_cls = TypeError if six.PY2 else binascii.Error
try:
decoded = b64.b64decode(data.encode())
except error_cls as error:
raise errors.DeserializationError(error)
if size is not None and ((not minimum and len(decoded) != size) or
(minimum and len(decoded) < size)):
raise errors.DeserializationError(
"Expected at least or exactly {0} bytes".format(size))
return decoded
def encode_hex16(value):
"""Hexlify.
:param bytes value:
:rtype: unicode
"""
return binascii.hexlify(value).decode()
def decode_hex16(value, size=None, minimum=False):
"""Decode hexlified field.
:param unicode value:
:param int size: Required length (after decoding).
:param bool minimum: If ``True``, then `size` will be treated as
minimum required length, as opposed to exact equality.
:rtype: bytes
"""
value = value.encode()
if size is not None and ((not minimum and len(value) != size * 2) or
(minimum and len(value) < size * 2)):
raise errors.DeserializationError()
error_cls = TypeError if six.PY2 else binascii.Error
try:
return binascii.unhexlify(value)
except error_cls as error:
raise errors.DeserializationError(error)
def encode_cert(cert):
"""Encode certificate as JOSE Base-64 DER.
:type cert: `OpenSSL.crypto.X509` wrapped in `.ComparableX509`
:rtype: unicode
"""
return encode_b64jose(OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1, cert))
def decode_cert(b64der):
"""Decode JOSE Base-64 DER-encoded certificate.
:param unicode b64der:
:rtype: `OpenSSL.crypto.X509` wrapped in `.ComparableX509`
"""
try:
return util.ComparableX509(OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_ASN1, decode_b64jose(b64der)))
except OpenSSL.crypto.Error as error:
raise errors.DeserializationError(error)
def encode_csr(csr):
"""Encode CSR as JOSE Base-64 DER.
:type csr: `OpenSSL.crypto.X509Req` wrapped in `.ComparableX509`
:rtype: unicode
"""
return encode_b64jose(OpenSSL.crypto.dump_certificate_request(
OpenSSL.crypto.FILETYPE_ASN1, csr))
def decode_csr(b64der):
"""Decode JOSE Base-64 DER-encoded CSR.
:param unicode b64der:
:rtype: `OpenSSL.crypto.X509Req` wrapped in `.ComparableX509`
"""
try:
return util.ComparableX509(OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_ASN1, decode_b64jose(b64der)))
except OpenSSL.crypto.Error as error:
raise errors.DeserializationError(error)
class TypedJSONObjectWithFields(JSONObjectWithFields):
"""JSON object with type."""
typ = NotImplemented
"""Type of the object. Subclasses must override."""
type_field_name = "type"
"""Field name used to distinguish different object types.
Subclasses will probably have to override this.
"""
TYPES = NotImplemented
"""Types registered for JSON deserialization"""
@classmethod
def register(cls, type_cls, typ=None):
"""Register class for JSON deserialization."""
typ = type_cls.typ if typ is None else typ
cls.TYPES[typ] = type_cls
return type_cls
@classmethod
def get_type_cls(cls, jobj):
"""Get the registered class for ``jobj``."""
if cls in six.itervalues(cls.TYPES):
if cls.type_field_name not in jobj:
raise errors.DeserializationError(
"Missing type field ({0})".format(cls.type_field_name))
# cls is already registered type_cls, force to use it
# so that, e.g Revocation.from_json(jobj) fails if
# jobj["type"] != "revocation".
return cls
if not isinstance(jobj, dict):
raise errors.DeserializationError(
"{0} is not a dictionary object".format(jobj))
try:
typ = jobj[cls.type_field_name]
except KeyError:
raise errors.DeserializationError("missing type field")
try:
return cls.TYPES[typ]
except KeyError:
raise errors.UnrecognizedTypeError(typ, jobj)
def to_partial_json(self):
"""Get JSON serializable object.
:returns: Serializable JSON object representing ACME typed object.
:meth:`validate` will almost certainly not work, due to reasons
explained in :class:`acme.interfaces.IJSONSerializable`.
:rtype: dict
"""
jobj = self.fields_to_partial_json()
jobj[self.type_field_name] = self.typ
return jobj
@classmethod
def from_json(cls, jobj):
"""Deserialize ACME object from valid JSON object.
:raises acme.errors.UnrecognizedTypeError: if type
of the ACME object has not been registered.
"""
# make sure subclasses don't cause infinite recursive from_json calls
type_cls = cls.get_type_cls(jobj)
return type_cls(**type_cls.fields_from_json(jobj))
|
|
# -*- coding: utf-8 -*-
import pytest
import pytz
import ioex.datetimeex
import datetime
@pytest.mark.parametrize(('start', 'end'), [
[datetime.datetime(2016, 7, 24, 12, 21),
datetime.datetime(2016, 7, 24, 12, 22)],
[None, datetime.datetime(2016, 7, 24, 12, 22)],
[datetime.datetime(2016, 7, 24, 12, 21), None],
[None, None],
])
def test_init_start_end(start, end):
p = ioex.datetimeex.Period(start=start, end=end)
assert p.start == start
assert p.end == end
@pytest.mark.parametrize(('start', 'end'), [
[';-)', datetime.datetime(2016, 7, 24, 12, 22)],
[datetime.datetime(2016, 7, 24, 12, 22), ';-)'],
])
def test_init_start_end_fail(start, end):
with pytest.raises(TypeError):
ioex.datetimeex.Period(start=start, end=end)
@pytest.mark.parametrize(('start'), [
datetime.datetime(2016, 7, 24, 12, 21),
None,
])
def test_set_start(start):
p = ioex.datetimeex.Period()
assert p.start is None
p.start = start
assert p.start == start
@pytest.mark.parametrize(('start'), [
':-/',
])
def test_set_start_fail(start):
p = ioex.datetimeex.Period()
with pytest.raises(TypeError):
p.start = start
@pytest.mark.parametrize(('end'), [
datetime.datetime(2016, 7, 24, 12, 21),
None,
])
def test_set_end(end):
p = ioex.datetimeex.Period()
assert p.end is None
p.end = end
assert p.end == end
@pytest.mark.parametrize(('end'), [
':-/',
])
def test_set_end_fail(end):
p = ioex.datetimeex.Period()
with pytest.raises(TypeError):
p.end = end
@pytest.mark.parametrize(('start', 'end', 'iso'), [
[
datetime.datetime(2016, 7, 24, 12, 21, 0),
datetime.datetime(2016, 7, 24, 12, 22, 13),
'2016-07-24T12:21:00/2016-07-24T12:22:13',
],
[
datetime.datetime(2016, 7, 24, 12, 21, 0, tzinfo=pytz.utc),
datetime.datetime(2016, 7, 24, 12, 22, 13, tzinfo=pytz.utc),
'2016-07-24T12:21:00Z/2016-07-24T12:22:13Z',
],
[
datetime.datetime(2016, 7, 24, 12, 21, 0, tzinfo=pytz.utc),
pytz.timezone('Europe/Vienna').localize(
datetime.datetime(2016, 7, 24, 12, 22, 13)
),
'2016-07-24T12:21:00Z/2016-07-24T12:22:13+02:00',
],
[
pytz.timezone('US/Pacific').localize(
datetime.datetime(2016, 1, 12, 12, 22, 13)
),
pytz.timezone('Europe/London').localize(
datetime.datetime(2016, 1, 24, 12, 22, 13)
),
'2016-01-12T12:22:13-08:00/2016-01-24T12:22:13Z',
],
[
datetime.datetime(2016, 7, 24, 12, 20, 0, microsecond=25500),
datetime.datetime(
2016, 7, 24, 12, 21, 0,
microsecond=13,
tzinfo=pytz.utc,
),
'2016-07-24T12:20:00.025500/2016-07-24T12:21:00.000013Z',
],
])
def test_get_isoformat(start, end, iso):
p = ioex.datetimeex.Period(start=start, end=end)
assert p.isoformat == iso
@pytest.mark.parametrize(('start', 'end', 'source_iso'), [
[
datetime.datetime(2016, 7, 24, 12, 21, 0),
datetime.datetime(2016, 7, 24, 12, 22, 13),
'2016-07-24T12:21:00/2016-07-24T12:22:13',
],
[
datetime.datetime(2016, 7, 24, 12, 21, 0, tzinfo=pytz.utc),
datetime.datetime(2016, 7, 24, 12, 22, 13, tzinfo=pytz.utc),
'2016-07-24T12:21:00Z/2016-07-24T12:22:13Z',
],
[
datetime.datetime(2016, 7, 24, 12, 21, 0, tzinfo=pytz.utc),
pytz.timezone('Europe/Vienna').localize(
datetime.datetime(2016, 7, 24, 12, 22, 13)
),
'2016-07-24T12:21:00Z/2016-07-24T12:22:13+02:00',
],
[
pytz.timezone('US/Pacific').localize(
datetime.datetime(2016, 1, 12, 12, 22, 13)
),
pytz.timezone('Europe/London').localize(
datetime.datetime(2016, 1, 24, 12, 22, 13)
),
'2016-01-12T12:22:13-08:00/2016-01-24T12:22:13Z',
],
[
datetime.datetime(2016, 7, 24, 12, 20, 0, microsecond=25500),
datetime.datetime(
2016, 7, 24, 12, 21, 0,
microsecond=13,
tzinfo=pytz.utc,
),
'2016-07-24T12:20:00.025500/2016-07-24T12:21:00.000013Z',
],
[
datetime.datetime(2016, 7, 24, 12, 20, 0, microsecond=25500),
datetime.datetime(
2016, 7, 24, 12, 21, 0,
microsecond=130000,
tzinfo=pytz.utc,
),
'2016-07-24T12:20:00.0255/2016-07-24T12:21:00.13Z',
],
])
def test_from_iso(start, end, source_iso):
p = ioex.datetimeex.Period.from_iso(source_iso)
assert p.start == start
assert p.end == end
@pytest.mark.parametrize(('source_iso'), [
'2016-07-24T12:20:0<INVALID>0.0255/2016-07-24T12:21:00.13Z',
])
def test_from_iso_fail(source_iso):
with pytest.raises(ValueError):
ioex.datetimeex.Period.from_iso(source_iso)
@pytest.mark.parametrize(('a', 'b'), [
[
ioex.datetimeex.Period(
start=datetime.datetime(2016, 7, 24, 12, 21, 0),
end=datetime.datetime(2016, 7, 24, 12, 22, 13),
),
ioex.datetimeex.Period(
start=datetime.datetime(2016, 7, 24, 12, 21, 0, 0),
end=datetime.datetime(2016, 7, 24, 12, 22, 13, 0),
),
],
[
ioex.datetimeex.Period(
start=pytz.timezone('Europe/Vienna').localize(
datetime.datetime(2016, 7, 24, 12, 21, 0)
),
end=datetime.datetime(2016, 7, 24, 12, 22, 13),
),
ioex.datetimeex.Period(
start=pytz.timezone('Europe/Vienna').localize(
datetime.datetime(2016, 7, 24, 12, 21, 0)
),
end=datetime.datetime(2016, 7, 24, 12, 22, 13),
),
],
[
ioex.datetimeex.Period(
start=pytz.timezone('Europe/Vienna').localize(
datetime.datetime(2016, 7, 24, 12, 21, 0)
),
end=pytz.timezone('Europe/London').localize(
datetime.datetime(2016, 7, 24, 12, 22, 13)
),
),
ioex.datetimeex.Period(
start=pytz.timezone('Europe/London').localize(
datetime.datetime(2016, 7, 24, 11, 21, 0)
),
end=pytz.timezone('Europe/Vienna').localize(
datetime.datetime(2016, 7, 24, 13, 22, 13)
),
),
],
])
def test_eq(a, b):
assert a == b
assert b == a
|
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import io
from unittest import TestCase, main
from collections import defaultdict
import numpy as np
import numpy.testing as npt
from scipy.stats import pearsonr
from skbio import DistanceMatrix, TreeNode
from skbio.tree import (DuplicateNodeError, NoLengthError,
TreeError, MissingNodeError, NoParentError)
from skbio.util import RepresentationWarning
class TreeTests(TestCase):
def setUp(self):
"""Prep the self"""
self.simple_t = TreeNode.read(io.StringIO("((a,b)i1,(c,d)i2)root;"))
nodes = dict([(x, TreeNode(x)) for x in 'abcdefgh'])
nodes['a'].append(nodes['b'])
nodes['b'].append(nodes['c'])
nodes['c'].append(nodes['d'])
nodes['c'].append(nodes['e'])
nodes['c'].append(nodes['f'])
nodes['f'].append(nodes['g'])
nodes['a'].append(nodes['h'])
self.TreeNode = nodes
self.TreeRoot = nodes['a']
def rev_f(items):
items.reverse()
def rotate_f(items):
tmp = items[-1]
items[1:] = items[:-1]
items[0] = tmp
self.rev_f = rev_f
self.rotate_f = rotate_f
self.complex_tree = TreeNode.read(io.StringIO(
"(((a,b)int1,(x,y,(w,z)int2,(c,d)int3)int4),(e,f)int5);"))
def test_observed_node_counts(self):
"""returns observed nodes counts given vector of otu observation counts
"""
# no OTUs observed
otu_counts = {}
expected = defaultdict(int)
self.assertEqual(self.simple_t.observed_node_counts(otu_counts),
expected)
# error on zero count(s)
otu_counts = {'a': 0}
self.assertRaises(ValueError, self.simple_t.observed_node_counts,
otu_counts)
otu_counts = {'a': 0, 'b': 0, 'c': 0, 'd': 0}
self.assertRaises(ValueError, self.simple_t.observed_node_counts,
otu_counts)
# all OTUs observed once
otu_counts = {'a': 1, 'b': 1, 'c': 1, 'd': 1}
expected = defaultdict(int)
expected[self.simple_t.find('root')] = 4
expected[self.simple_t.find('i1')] = 2
expected[self.simple_t.find('i2')] = 2
expected[self.simple_t.find('a')] = 1
expected[self.simple_t.find('b')] = 1
expected[self.simple_t.find('c')] = 1
expected[self.simple_t.find('d')] = 1
self.assertEqual(self.simple_t.observed_node_counts(otu_counts),
expected)
# some OTUs observed twice
otu_counts = {'a': 2, 'b': 1, 'c': 1, 'd': 1}
expected = defaultdict(int)
expected[self.simple_t.find('root')] = 5
expected[self.simple_t.find('i1')] = 3
expected[self.simple_t.find('i2')] = 2
expected[self.simple_t.find('a')] = 2
expected[self.simple_t.find('b')] = 1
expected[self.simple_t.find('c')] = 1
expected[self.simple_t.find('d')] = 1
self.assertEqual(self.simple_t.observed_node_counts(otu_counts),
expected)
otu_counts = {'a': 2, 'b': 1, 'c': 1, 'd': 2}
expected = defaultdict(int)
expected[self.simple_t.find('root')] = 6
expected[self.simple_t.find('i1')] = 3
expected[self.simple_t.find('i2')] = 3
expected[self.simple_t.find('a')] = 2
expected[self.simple_t.find('b')] = 1
expected[self.simple_t.find('c')] = 1
expected[self.simple_t.find('d')] = 2
self.assertEqual(self.simple_t.observed_node_counts(otu_counts),
expected)
# some OTUs observed, others not observed
otu_counts = {'a': 2, 'b': 1}
expected = defaultdict(int)
expected[self.simple_t.find('root')] = 3
expected[self.simple_t.find('i1')] = 3
expected[self.simple_t.find('a')] = 2
expected[self.simple_t.find('b')] = 1
self.assertEqual(self.simple_t.observed_node_counts(otu_counts),
expected)
otu_counts = {'d': 1}
expected = defaultdict(int)
expected[self.simple_t.find('root')] = 1
expected[self.simple_t.find('i2')] = 1
expected[self.simple_t.find('d')] = 1
self.assertEqual(self.simple_t.observed_node_counts(otu_counts),
expected)
# error on non-tips
otu_counts = {'a': 2, 'e': 1}
self.assertRaises(MissingNodeError, self.simple_t.observed_node_counts,
otu_counts)
otu_counts = {'a': 2, 'i1': 1}
self.assertRaises(MissingNodeError, self.simple_t.observed_node_counts,
otu_counts)
# test with another tree
otu_counts = {}
expected = defaultdict(int)
self.assertEqual(self.complex_tree.observed_node_counts(otu_counts),
expected)
otu_counts = {'e': 42, 'f': 1}
expected[self.complex_tree.root()] = 43
expected[self.complex_tree.find('int5')] = 43
expected[self.complex_tree.find('e')] = 42
expected[self.complex_tree.find('f')] = 1
self.assertEqual(self.complex_tree.observed_node_counts(otu_counts),
expected)
def test_count(self):
"""Get node counts"""
exp = 7
obs = self.simple_t.count()
self.assertEqual(obs, exp)
exp = 4
obs = self.simple_t.count(tips=True)
self.assertEqual(obs, exp)
def test_copy(self):
"""copy a tree"""
self.simple_t.children[0].length = 1.2
self.simple_t.children[1].children[0].length = 0.5
cp = self.simple_t.copy()
gen = zip(cp.traverse(include_self=True),
self.simple_t.traverse(include_self=True))
for a, b in gen:
self.assertIsNot(a, b)
self.assertEqual(a.name, b.name)
self.assertEqual(a.length, b.length)
def test_append(self):
"""Append a node to a tree"""
second_tree = TreeNode.read(io.StringIO("(x,y)z;"))
self.simple_t.append(second_tree)
self.assertEqual(self.simple_t.children[0].name, 'i1')
self.assertEqual(self.simple_t.children[1].name, 'i2')
self.assertEqual(self.simple_t.children[2].name, 'z')
self.assertEqual(len(self.simple_t.children), 3)
self.assertEqual(self.simple_t.children[2].children[0].name, 'x')
self.assertEqual(self.simple_t.children[2].children[1].name, 'y')
self.assertEqual(second_tree.parent, self.simple_t)
def test_extend(self):
"""Extend a few nodes"""
second_tree = TreeNode.read(io.StringIO("(x1,y1)z1;"))
third_tree = TreeNode.read(io.StringIO("(x2,y2)z2;"))
first_tree = TreeNode.read(io.StringIO("(x1,y1)z1;"))
fourth_tree = TreeNode.read(io.StringIO("(x2,y2)z2;"))
self.simple_t.extend([second_tree, third_tree])
first_tree.extend(fourth_tree.children)
self.assertEqual(0, len(fourth_tree.children))
self.assertEqual(first_tree.children[0].name, 'x1')
self.assertEqual(first_tree.children[1].name, 'y1')
self.assertEqual(first_tree.children[2].name, 'x2')
self.assertEqual(first_tree.children[3].name, 'y2')
self.assertEqual(self.simple_t.children[0].name, 'i1')
self.assertEqual(self.simple_t.children[1].name, 'i2')
self.assertEqual(self.simple_t.children[2].name, 'z1')
self.assertEqual(self.simple_t.children[3].name, 'z2')
self.assertEqual(len(self.simple_t.children), 4)
self.assertEqual(self.simple_t.children[2].children[0].name, 'x1')
self.assertEqual(self.simple_t.children[2].children[1].name, 'y1')
self.assertEqual(self.simple_t.children[3].children[0].name, 'x2')
self.assertEqual(self.simple_t.children[3].children[1].name, 'y2')
self.assertIs(second_tree.parent, self.simple_t)
self.assertIs(third_tree.parent, self.simple_t)
def test_extend_empty(self):
"""Extend on the empty case should work"""
self.simple_t.extend([])
self.assertEqual(self.simple_t.children[0].name, 'i1')
self.assertEqual(self.simple_t.children[1].name, 'i2')
self.assertEqual(len(self.simple_t.children), 2)
def test_iter(self):
"""iter wraps children"""
exp = ['i1', 'i2']
obs = [n.name for n in self.simple_t]
self.assertEqual(obs, exp)
def test_gops(self):
"""Basic TreeNode operations should work as expected"""
p = TreeNode()
self.assertEqual(str(p), ';\n')
p.name = 'abc'
self.assertEqual(str(p), 'abc;\n')
p.length = 3
self.assertEqual(str(p), 'abc:3;\n') # don't suppress branch from root
q = TreeNode()
p.append(q)
self.assertEqual(str(p), '()abc:3;\n')
r = TreeNode()
q.append(r)
self.assertEqual(str(p), '(())abc:3;\n')
r.name = 'xyz'
self.assertEqual(str(p), '((xyz))abc:3;\n')
q.length = 2
self.assertEqual(str(p), '((xyz):2)abc:3;\n')
def test_pop(self):
"""Pop off a node"""
second_tree = TreeNode.read(io.StringIO("(x1,y1)z1;"))
third_tree = TreeNode.read(io.StringIO("(x2,y2)z2;"))
self.simple_t.extend([second_tree, third_tree])
i1 = self.simple_t.pop(0)
z2 = self.simple_t.pop()
self.assertEqual(i1.name, 'i1')
self.assertEqual(z2.name, 'z2')
self.assertEqual(i1.children[0].name, 'a')
self.assertEqual(i1.children[1].name, 'b')
self.assertEqual(z2.children[0].name, 'x2')
self.assertEqual(z2.children[1].name, 'y2')
self.assertEqual(self.simple_t.children[0].name, 'i2')
self.assertEqual(self.simple_t.children[1].name, 'z1')
self.assertEqual(len(self.simple_t.children), 2)
def test_remove(self):
"""Remove nodes"""
self.assertTrue(self.simple_t.remove(self.simple_t.children[0]))
self.assertEqual(len(self.simple_t.children), 1)
n = TreeNode()
self.assertFalse(self.simple_t.remove(n))
def test_remove_deleted(self):
"""Remove nodes by function"""
def f(node):
return node.name in ['b', 'd']
self.simple_t.remove_deleted(f)
exp = "((a)i1,(c)i2)root;\n"
obs = str(self.simple_t)
self.assertEqual(obs, exp)
def test_adopt(self):
"""Adopt a node!"""
n1 = TreeNode(name='n1')
n2 = TreeNode(name='n2')
n3 = TreeNode(name='n3')
self.simple_t._adopt(n1)
self.simple_t.children[-1]._adopt(n2)
n2._adopt(n3)
# adopt doesn't update .children
self.assertEqual(len(self.simple_t.children), 2)
self.assertIs(n1.parent, self.simple_t)
self.assertIs(n2.parent, self.simple_t.children[-1])
self.assertIs(n3.parent, n2)
def test_remove_node(self):
"""Remove a node by index"""
n = self.simple_t._remove_node(-1)
self.assertEqual(n.parent, None)
self.assertEqual(len(self.simple_t.children), 1)
self.assertEqual(len(n.children), 2)
self.assertNotIn(n, self.simple_t.children)
def test_prune(self):
"""Collapse single descendent nodes"""
# check the identity case
cp = self.simple_t.copy()
self.simple_t.prune()
gen = zip(cp.traverse(include_self=True),
self.simple_t.traverse(include_self=True))
for a, b in gen:
self.assertIsNot(a, b)
self.assertEqual(a.name, b.name)
self.assertEqual(a.length, b.length)
# create a single descendent by removing tip 'a'
n = self.simple_t.children[0]
n.remove(n.children[0])
self.simple_t.prune()
self.assertEqual(len(self.simple_t.children), 2)
self.assertEqual(self.simple_t.children[0].name, 'i2')
self.assertEqual(self.simple_t.children[1].name, 'b')
def test_prune_length(self):
"""Collapse single descendent nodes"""
# check the identity case
cp = self.simple_t.copy()
self.simple_t.prune()
gen = zip(cp.traverse(include_self=True),
self.simple_t.traverse(include_self=True))
for a, b in gen:
self.assertIsNot(a, b)
self.assertEqual(a.name, b.name)
self.assertEqual(a.length, b.length)
for n in self.simple_t.traverse():
n.length = 1.0
# create a single descendent by removing tip 'a'
n = self.simple_t.children[0]
n.remove(n.children[0])
self.simple_t.prune()
self.assertEqual(len(self.simple_t.children), 2)
self.assertEqual(self.simple_t.children[0].name, 'i2')
self.assertEqual(self.simple_t.children[1].name, 'b')
self.assertEqual(self.simple_t.children[1].length, 2.0)
def test_subset(self):
"""subset should return set of leaves that descends from node"""
t = self.simple_t
self.assertEqual(t.subset(), frozenset('abcd'))
c = t.children[0]
self.assertEqual(c.subset(), frozenset('ab'))
leaf = c.children[1]
self.assertEqual(leaf.subset(), frozenset(''))
def test_subsets(self):
"""subsets should return all subsets descending from a set"""
t = self.simple_t
self.assertEqual(t.subsets(), frozenset(
[frozenset('ab'), frozenset('cd')]))
def test_is_tip(self):
"""see if we're a tip or not"""
self.assertFalse(self.simple_t.is_tip())
self.assertFalse(self.simple_t.children[0].is_tip())
self.assertTrue(self.simple_t.children[0].children[0].is_tip())
def test_is_root(self):
"""see if we're at the root or not"""
self.assertTrue(self.simple_t.is_root())
self.assertFalse(self.simple_t.children[0].is_root())
self.assertFalse(self.simple_t.children[0].children[0].is_root())
def test_root(self):
"""Get the root!"""
root = self.simple_t
self.assertIs(root, self.simple_t.root())
self.assertIs(root, self.simple_t.children[0].root())
self.assertIs(root, self.simple_t.children[1].children[1].root())
def test_invalidate_lookup_caches(self):
root = self.simple_t
root.create_caches()
self.assertNotEqual(root._tip_cache, {})
self.assertNotEqual(root._non_tip_cache, {})
root.invalidate_caches()
self.assertEqual(root._tip_cache, {})
self.assertEqual(root._non_tip_cache, {})
def test_invalidate_attr_caches(self):
tree = TreeNode.read(io.StringIO("((a,b,(c,d)e)f,(g,h)i)root;"))
def f(n):
return [n.name] if n.is_tip() else []
tree.cache_attr(f, 'tip_names')
tree.invalidate_caches()
for n in tree.traverse(include_self=True):
self.assertFalse(hasattr(n, 'tip_names'))
def test_create_caches_duplicate_tip_names(self):
with self.assertRaises(DuplicateNodeError):
TreeNode.read(io.StringIO('(a, a);')).create_caches()
def test_find_all(self):
t = TreeNode.read(io.StringIO("((a,b)c,((d,e)c)c,(f,(g,h)c)a)root;"))
exp = [t.children[0],
t.children[1].children[0],
t.children[1],
t.children[2].children[1]]
obs = t.find_all('c')
self.assertEqual(obs, exp)
identity = t.find_all(t)
self.assertEqual(len(identity), 1)
self.assertEqual(identity[0], t)
identity_name = t.find_all('root')
self.assertEqual(len(identity_name), 1)
self.assertEqual(identity_name[0], t)
exp = [t.children[2],
t.children[0].children[0]]
obs = t.find_all('a')
self.assertEqual(obs, exp)
with self.assertRaises(MissingNodeError):
t.find_all('missing')
def test_find(self):
"""Find a node in a tree"""
t = TreeNode.read(io.StringIO("((a,b)c,(d,e)f);"))
exp = t.children[0]
obs = t.find('c')
self.assertEqual(obs, exp)
exp = t.children[0].children[1]
obs = t.find('b')
self.assertEqual(obs, exp)
with self.assertRaises(MissingNodeError):
t.find('does not exist')
def test_find_cache_bug(self):
"""First implementation did not force the cache to be at the root"""
t = TreeNode.read(io.StringIO("((a,b)c,(d,e)f,(g,h)f);"))
exp_tip_cache_keys = set(['a', 'b', 'd', 'e', 'g', 'h'])
exp_non_tip_cache_keys = set(['c', 'f'])
tip_a = t.children[0].children[0]
tip_a.create_caches()
self.assertEqual(tip_a._tip_cache, {})
self.assertEqual(set(t._tip_cache), exp_tip_cache_keys)
self.assertEqual(set(t._non_tip_cache), exp_non_tip_cache_keys)
self.assertEqual(t._non_tip_cache['f'], [t.children[1], t.children[2]])
def test_find_by_id(self):
"""Find a node by id"""
t1 = TreeNode.read(io.StringIO("((,),(,,));"))
t2 = TreeNode.read(io.StringIO("((,),(,,));"))
exp = t1.children[1]
obs = t1.find_by_id(6) # right inner node with 3 children
self.assertEqual(obs, exp)
exp = t2.children[1]
obs = t2.find_by_id(6) # right inner node with 3 children
self.assertEqual(obs, exp)
with self.assertRaises(MissingNodeError):
t1.find_by_id(100)
def test_find_by_func(self):
"""Find nodes by a function"""
t = TreeNode.read(io.StringIO("((a,b)c,(d,e)f);"))
def func(x):
return x.parent == t.find('c')
exp = ['a', 'b']
obs = [n.name for n in t.find_by_func(func)]
self.assertEqual(obs, exp)
def test_ancestors(self):
"""Get all the ancestors"""
exp = ['i1', 'root']
obs = self.simple_t.children[0].children[0].ancestors()
self.assertEqual([o.name for o in obs], exp)
exp = ['root']
obs = self.simple_t.children[0].ancestors()
self.assertEqual([o.name for o in obs], exp)
exp = []
obs = self.simple_t.ancestors()
self.assertEqual([o.name for o in obs], exp)
def test_siblings(self):
"""Get the siblings"""
exp = []
obs = self.simple_t.siblings()
self.assertEqual(obs, exp)
exp = ['i2']
obs = self.simple_t.children[0].siblings()
self.assertEqual([o.name for o in obs], exp)
exp = ['c']
obs = self.simple_t.children[1].children[1].siblings()
self.assertEqual([o.name for o in obs], exp)
self.simple_t.append(TreeNode(name="foo"))
self.simple_t.append(TreeNode(name="bar"))
exp = ['i1', 'foo', 'bar']
obs = self.simple_t.children[1].siblings()
self.assertEqual([o.name for o in obs], exp)
def test_ascii_art(self):
"""Make some ascii trees"""
# unlabeled internal node
tr = TreeNode.read(io.StringIO("(B:0.2,(C:0.3,D:0.4):0.6)F;"))
obs = tr.ascii_art(show_internal=True, compact=False)
exp = " /-B\n-F-------|\n | /-C\n "\
" \\--------|\n \\-D"
self.assertEqual(obs, exp)
obs = tr.ascii_art(show_internal=True, compact=True)
exp = "-F------- /-B\n \-------- /-C\n \-D"
self.assertEqual(obs, exp)
obs = tr.ascii_art(show_internal=False, compact=False)
exp = " /-B\n---------|\n | /-C\n "\
" \\--------|\n \\-D"
self.assertEqual(obs, exp)
def test_ascii_art_three_children(self):
obs = TreeNode.read(io.StringIO('(a,(b,c,d));')).ascii_art()
self.assertEqual(obs, exp_ascii_art_three_children)
def test_accumulate_to_ancestor(self):
"""Get the distance from a node to its ancestor"""
t = TreeNode.read(io.StringIO(
"((a:0.1,b:0.2)c:0.3,(d:0.4,e)f:0.5)root;"))
a = t.find('a')
b = t.find('b')
exp_to_root = 0.1 + 0.3
obs_to_root = a.accumulate_to_ancestor(t)
self.assertEqual(obs_to_root, exp_to_root)
with self.assertRaises(NoParentError):
a.accumulate_to_ancestor(b)
def test_distance(self):
"""Get the distance between two nodes"""
t = TreeNode.read(io.StringIO(
"((a:0.1,b:0.2)c:0.3,(d:0.4,e)f:0.5)root;"))
tips = sorted([n for n in t.tips()], key=lambda x: x.name)
npt.assert_almost_equal(tips[0].distance(tips[0]), 0.0)
npt.assert_almost_equal(tips[0].distance(tips[1]), 0.3)
npt.assert_almost_equal(tips[0].distance(tips[2]), 1.3)
with self.assertRaises(NoLengthError):
tips[0].distance(tips[3])
npt.assert_almost_equal(tips[1].distance(tips[0]), 0.3)
npt.assert_almost_equal(tips[1].distance(tips[1]), 0.0)
npt.assert_almost_equal(tips[1].distance(tips[2]), 1.4)
with self.assertRaises(NoLengthError):
tips[1].distance(tips[3])
self.assertEqual(tips[2].distance(tips[0]), 1.3)
self.assertEqual(tips[2].distance(tips[1]), 1.4)
self.assertEqual(tips[2].distance(tips[2]), 0.0)
with self.assertRaises(NoLengthError):
tips[2].distance(tips[3])
def test_lowest_common_ancestor(self):
"""TreeNode lowestCommonAncestor should return LCA for set of tips"""
t1 = TreeNode.read(io.StringIO("((a,(b,c)d)e,f,(g,h)i)j;"))
t2 = t1.copy()
t3 = t1.copy()
t4 = t1.copy()
input1 = ['a'] # return self
input2 = ['a', 'b'] # return e
input3 = ['b', 'c'] # return d
input4 = ['a', 'h', 'g'] # return j
exp1 = t1.find('a')
exp2 = t2.find('e')
exp3 = t3.find('d')
exp4 = t4
obs1 = t1.lowest_common_ancestor(input1)
obs2 = t2.lowest_common_ancestor(input2)
obs3 = t3.lowest_common_ancestor(input3)
obs4 = t4.lowest_common_ancestor(input4)
self.assertEqual(obs1, exp1)
self.assertEqual(obs2, exp2)
self.assertEqual(obs3, exp3)
self.assertEqual(obs4, exp4)
# verify multiple calls work
t_mul = t1.copy()
exp_1 = t_mul.find('d')
exp_2 = t_mul.find('i')
obs_1 = t_mul.lowest_common_ancestor(['b', 'c'])
obs_2 = t_mul.lowest_common_ancestor(['g', 'h'])
self.assertEqual(obs_1, exp_1)
self.assertEqual(obs_2, exp_2)
# empty case
with self.assertRaises(ValueError):
t1.lowest_common_ancestor([])
def test_get_max_distance(self):
"""get_max_distance should get max tip distance across tree"""
tree = TreeNode.read(io.StringIO(
"((a:0.1,b:0.2)c:0.3,(d:0.4,e:0.5)f:0.6)root;"))
dist, nodes = tree.get_max_distance()
npt.assert_almost_equal(dist, 1.6)
self.assertEqual(sorted([n.name for n in nodes]), ['b', 'e'])
def test_set_max_distance(self):
"""set_max_distance sets MaxDistTips across tree"""
tree = TreeNode.read(io.StringIO(
"((a:0.1,b:0.2)c:0.3,(d:0.4,e:0.5)f:0.6)root;"))
tree._set_max_distance()
tip_a, tip_b = tree.MaxDistTips
self.assertEqual(tip_a[0] + tip_b[0], 1.6)
self.assertEqual(sorted([tip_a[1].name, tip_b[1].name]), ['b', 'e'])
def test_set_max_distance_tie_bug(self):
"""Corresponds to #1077"""
s = io.StringIO("((a:1,b:1)c:2,(d:3,e:4)f:5)root;")
t = TreeNode.read(s)
exp = ((3.0, t.find('a')), (9.0, t.find('e')))
# the above tree would trigger an exception in max. The central issue
# was that the data being passed to max were a tuple of tuple:
# ((left_d, left_n), (right_d, right_n))
# the call to max would break in this scenario as it would fall onto
# idx 1 of each tuple to assess the "max".
t._set_max_distance()
self.assertEqual(t.MaxDistTips, exp)
def test_set_max_distance_inplace_modification_bug(self):
"""Corresponds to #1223"""
s = io.StringIO("((a:1,b:1)c:2,(d:3,e:4)f:5)root;")
t = TreeNode.read(s)
exp = [((0.0, t.find('a')), (0.0, t.find('a'))),
((0.0, t.find('b')), (0.0, t.find('b'))),
((1.0, t.find('a')), (1.0, t.find('b'))),
((0.0, t.find('d')), (0.0, t.find('d'))),
((0.0, t.find('e')), (0.0, t.find('e'))),
((3.0, t.find('d')), (4.0, t.find('e'))),
((3.0, t.find('a')), (9.0, t.find('e')))]
t._set_max_distance()
self.assertEqual([n.MaxDistTips for n in t.postorder()], exp)
def test_shear(self):
"""Shear the nodes"""
t = TreeNode.read(io.StringIO('((H:1,G:1):2,(R:0.5,M:0.7):3);'))
obs = str(t.shear(['G', 'M']))
exp = '(G:3.0,M:3.7);\n'
self.assertEqual(obs, exp)
def test_compare_tip_distances(self):
t = TreeNode.read(io.StringIO('((H:1,G:1):2,(R:0.5,M:0.7):3);'))
t2 = TreeNode.read(io.StringIO('(((H:1,G:1,O:1):2,R:3):1,X:4);'))
obs = t.compare_tip_distances(t2)
# note: common taxa are H, G, R (only)
m1 = np.array([[0, 2, 6.5], [2, 0, 6.5], [6.5, 6.5, 0]])
m2 = np.array([[0, 2, 6], [2, 0, 6], [6, 6, 0]])
r = pearsonr(m1.flat, m2.flat)[0]
self.assertAlmostEqual(obs, (1 - r) / 2)
def test_compare_tip_distances_sample(self):
t = TreeNode.read(io.StringIO('((H:1,G:1):2,(R:0.5,M:0.7):3);'))
t2 = TreeNode.read(io.StringIO('(((H:1,G:1,O:1):2,R:3):1,X:4);'))
obs = t.compare_tip_distances(t2, sample=3, shuffle_f=sorted)
# note: common taxa are H, G, R (only)
m1 = np.array([[0, 2, 6.5], [2, 0, 6.5], [6.5, 6.5, 0]])
m2 = np.array([[0, 2, 6], [2, 0, 6], [6, 6, 0]])
r = pearsonr(m1.flat, m2.flat)[0]
self.assertAlmostEqual(obs, (1 - r) / 2)
# 4 common taxa, still picking H, G, R
s = '((H:1,G:1):2,(R:0.5,M:0.7,Q:5):3);'
t = TreeNode.read(io.StringIO(s))
s3 = '(((H:1,G:1,O:1):2,R:3,Q:10):1,X:4);'
t3 = TreeNode.read(io.StringIO(s3))
obs = t.compare_tip_distances(t3, sample=3, shuffle_f=sorted)
def test_compare_tip_distances_no_common_tips(self):
t = TreeNode.read(io.StringIO('((H:1,G:1):2,(R:0.5,M:0.7):3);'))
t2 = TreeNode.read(io.StringIO('(((Z:1,Y:1,X:1):2,W:3):1,V:4);'))
with self.assertRaises(ValueError):
t.compare_tip_distances(t2)
def test_compare_tip_distances_single_common_tip(self):
t = TreeNode.read(io.StringIO('((H:1,G:1):2,(R:0.5,M:0.7):3);'))
t2 = TreeNode.read(io.StringIO('(((R:1,Y:1,X:1):2,W:3):1,V:4);'))
self.assertEqual(t.compare_tip_distances(t2), 1)
self.assertEqual(t2.compare_tip_distances(t), 1)
def test_tip_tip_distances_endpoints(self):
"""Test getting specifc tip distances with tipToTipDistances"""
t = TreeNode.read(io.StringIO('((H:1,G:1):2,(R:0.5,M:0.7):3);'))
nodes = [t.find('H'), t.find('G'), t.find('M')]
names = ['H', 'G', 'M']
exp = DistanceMatrix(np.array([[0, 2.0, 6.7],
[2.0, 0, 6.7],
[6.7, 6.7, 0.0]]), ['H', 'G', 'M'])
obs = t.tip_tip_distances(endpoints=names)
self.assertEqual(obs, exp)
obs = t.tip_tip_distances(endpoints=nodes)
self.assertEqual(obs, exp)
def test_tip_tip_distances_non_tip_endpoints(self):
t = TreeNode.read(io.StringIO('((H:1,G:1)foo:2,(R:0.5,M:0.7):3);'))
with self.assertRaises(ValueError):
t.tip_tip_distances(endpoints=['foo'])
def test_tip_tip_distances_no_length(self):
t = TreeNode.read(io.StringIO("((a,b)c,(d,e)f);"))
exp_t = TreeNode.read(io.StringIO("((a:0,b:0)c:0,(d:0,e:0)f:0);"))
exp_t_dm = exp_t.tip_tip_distances()
t_dm = npt.assert_warns(RepresentationWarning, t.tip_tip_distances)
self.assertEqual(t_dm, exp_t_dm)
for node in t.preorder():
self.assertIs(node.length, None)
def test_tip_tip_distances_missing_length(self):
t = TreeNode.read(io.StringIO("((a,b:6)c:4,(d,e:0)f);"))
exp_t = TreeNode.read(io.StringIO("((a:0,b:6)c:4,(d:0,e:0)f:0);"))
exp_t_dm = exp_t.tip_tip_distances()
t_dm = npt.assert_warns(RepresentationWarning, t.tip_tip_distances)
self.assertEqual(t_dm, exp_t_dm)
def test_neighbors(self):
"""Get neighbors of a node"""
t = TreeNode.read(io.StringIO("((a,b)c,(d,e)f);"))
exp = t.children
obs = t.neighbors()
self.assertEqual(obs, exp)
exp = t.children[0].children + [t]
obs = t.children[0].neighbors()
self.assertEqual(obs, exp)
exp = [t.children[0].children[0]] + [t]
obs = t.children[0].neighbors(ignore=t.children[0].children[1])
self.assertEqual(obs, exp)
exp = [t.children[0]]
obs = t.children[0].children[0].neighbors()
self.assertEqual(obs, exp)
def test_has_children(self):
"""Test if has children"""
t = TreeNode.read(io.StringIO("((a,b)c,(d,e)f);"))
self.assertTrue(t.has_children())
self.assertTrue(t.children[0].has_children())
self.assertTrue(t.children[1].has_children())
self.assertFalse(t.children[0].children[0].has_children())
self.assertFalse(t.children[0].children[1].has_children())
self.assertFalse(t.children[1].children[0].has_children())
self.assertFalse(t.children[1].children[1].has_children())
def test_tips(self):
"""Tip traversal of tree"""
exp = ['a', 'b', 'c', 'd']
obs = [n.name for n in self.simple_t.tips()]
self.assertEqual(obs, exp)
obs2 = [n.name for n in self.simple_t.traverse(False, False)]
self.assertEqual(obs2, exp)
def test_pre_and_postorder(self):
"""Pre and post order traversal of the tree"""
exp = ['root', 'i1', 'a', 'b', 'i1', 'i2', 'c', 'd', 'i2', 'root']
obs = [n.name for n in self.simple_t.pre_and_postorder()]
self.assertEqual(obs, exp)
obs2 = [n.name for n in self.simple_t.traverse(True, True)]
self.assertEqual(obs2, exp)
def test_pre_and_postorder_no_children(self):
t = TreeNode('brofist')
# include self
exp = ['brofist']
obs = [n.name for n in t.pre_and_postorder()]
self.assertEqual(obs, exp)
# do not include self
obs = list(t.pre_and_postorder(include_self=False))
self.assertEqual(obs, [])
def test_levelorder(self):
"""Test level order traversal of the tree"""
exp = ['root', 'i1', 'i2', 'a', 'b', 'c', 'd']
obs = [n.name for n in self.simple_t.levelorder()]
self.assertEqual(obs, exp)
def test_bifurcate(self):
t1 = TreeNode.read(io.StringIO('(((a,b),c),(d,e));'))
t2 = TreeNode.read(io.StringIO('((a,b,c));'))
t3 = t2.copy()
t1.bifurcate()
t2.bifurcate()
t3.bifurcate(insert_length=0)
self.assertEqual(str(t1), '(((a,b),c),(d,e));\n')
self.assertEqual(str(t2), '((c,(a,b)));\n')
self.assertEqual(str(t3), '((c,(a,b):0));\n')
def test_index_tree_single_node(self):
"""index_tree handles single node tree"""
t1 = TreeNode.read(io.StringIO('root;'))
id_index, child_index = t1.index_tree()
self.assertEqual(id_index[0], t1)
npt.assert_equal(child_index, np.array([[]]))
def test_index_tree(self):
"""index_tree should produce correct index and node map"""
# test for first tree: contains singleton outgroup
t1 = TreeNode.read(io.StringIO('(((a,b),c),(d,e));'))
t2 = TreeNode.read(io.StringIO('(((a,b),(c,d)),(e,f));'))
t3 = TreeNode.read(io.StringIO('(((a,b,c),(d)),(e,f));'))
id_1, child_1 = t1.index_tree()
nodes_1 = [n.id for n in t1.traverse(self_before=False,
self_after=True)]
self.assertEqual(nodes_1, [0, 1, 2, 3, 6, 4, 5, 7, 8])
npt.assert_equal(child_1, np.array([[2, 0, 1], [6, 2, 3], [7, 4, 5],
[8, 6, 7]]))
# test for second tree: strictly bifurcating
id_2, child_2 = t2.index_tree()
nodes_2 = [n.id for n in t2.traverse(self_before=False,
self_after=True)]
self.assertEqual(nodes_2, [0, 1, 4, 2, 3, 5, 8, 6, 7, 9, 10])
npt.assert_equal(child_2, np.array([[4, 0, 1], [5, 2, 3],
[8, 4, 5], [9, 6, 7],
[10, 8, 9]]))
# test for third tree: contains trifurcation and single-child parent
id_3, child_3 = t3.index_tree()
nodes_3 = [n.id for n in t3.traverse(self_before=False,
self_after=True)]
self.assertEqual(nodes_3, [0, 1, 2, 4, 3, 5, 8, 6, 7, 9, 10])
npt.assert_equal(child_3, np.array([[4, 0, 2], [5, 3, 3], [8, 4, 5],
[9, 6, 7], [10, 8, 9]]))
def test_root_at(self):
"""Form a new root"""
t = TreeNode.read(io.StringIO("(((a,b)c,(d,e)f)g,h)i;"))
with self.assertRaises(TreeError):
t.root_at(t.find('h'))
exp = "(a,b,((d,e)f,(h)g)c)root;\n"
rooted = t.root_at('c')
obs = str(rooted)
self.assertEqual(obs, exp)
def test_root_at_midpoint(self):
"""Root at the midpoint"""
tree1 = self.TreeRoot
for n in tree1.traverse():
n.length = 1
result = tree1.root_at_midpoint()
self.assertEqual(result.distance(result.find('e')), 1.5)
self.assertEqual(result.distance(result.find('g')), 2.5)
exp_dist = tree1.tip_tip_distances()
obs_dist = result.tip_tip_distances()
self.assertEqual(obs_dist, exp_dist)
def test_root_at_midpoint_no_lengths(self):
# should get same tree back (a copy)
nwk = '(a,b)c;\n'
t = TreeNode.read(io.StringIO(nwk))
obs = t.root_at_midpoint()
self.assertEqual(str(obs), nwk)
def test_root_at_midpoint_tie(self):
nwk = "(((a:1,b:1)c:2,(d:3,e:4)f:5),g:1)root;"
t = TreeNode.read(io.StringIO(nwk))
exp = "((d:3,e:4)f:2,((a:1,b:1)c:2,(g:1)):3)root;"
texp = TreeNode.read(io.StringIO(exp))
obs = t.root_at_midpoint()
for o, e in zip(obs.traverse(), texp.traverse()):
self.assertEqual(o.name, e.name)
self.assertEqual(o.length, e.length)
def test_compare_subsets(self):
"""compare_subsets should return the fraction of shared subsets"""
t = TreeNode.read(io.StringIO('((H,G),(R,M));'))
t2 = TreeNode.read(io.StringIO('(((H,G),R),M);'))
t4 = TreeNode.read(io.StringIO('(((H,G),(O,R)),X);'))
result = t.compare_subsets(t)
self.assertEqual(result, 0)
result = t2.compare_subsets(t2)
self.assertEqual(result, 0)
result = t.compare_subsets(t2)
self.assertEqual(result, 0.5)
result = t.compare_subsets(t4)
self.assertEqual(result, 1 - 2. / 5)
result = t.compare_subsets(t4, exclude_absent_taxa=True)
self.assertEqual(result, 1 - 2. / 3)
result = t.compare_subsets(self.TreeRoot, exclude_absent_taxa=True)
self.assertEqual(result, 1)
result = t.compare_subsets(self.TreeRoot)
self.assertEqual(result, 1)
def test_compare_rfd(self):
"""compare_rfd should return the Robinson Foulds distance"""
t = TreeNode.read(io.StringIO('((H,G),(R,M));'))
t2 = TreeNode.read(io.StringIO('(((H,G),R),M);'))
t4 = TreeNode.read(io.StringIO('(((H,G),(O,R)),X);'))
obs = t.compare_rfd(t2)
exp = 2.0
self.assertEqual(obs, exp)
self.assertEqual(t.compare_rfd(t2), t2.compare_rfd(t))
obs = t.compare_rfd(t2, proportion=True)
exp = 0.5
self.assertEqual(obs, exp)
with self.assertRaises(ValueError):
t.compare_rfd(t4)
def test_assign_ids(self):
"""Assign IDs to the tree"""
t1 = TreeNode.read(io.StringIO("(((a,b),c),(e,f),(g));"))
t2 = TreeNode.read(io.StringIO("(((a,b),c),(e,f),(g));"))
t3 = TreeNode.read(io.StringIO("((g),(e,f),(c,(a,b)));"))
t1_copy = t1.copy()
t1.assign_ids()
t2.assign_ids()
t3.assign_ids()
t1_copy.assign_ids()
self.assertEqual([(n.name, n.id) for n in t1.traverse()],
[(n.name, n.id) for n in t2.traverse()])
self.assertEqual([(n.name, n.id) for n in t1.traverse()],
[(n.name, n.id) for n in t1_copy.traverse()])
self.assertNotEqual([(n.name, n.id) for n in t1.traverse()],
[(n.name, n.id) for n in t3.traverse()])
def test_assign_ids_index_tree(self):
"""assign_ids and index_tree should assign the same IDs"""
t1 = TreeNode.read(io.StringIO('(((a,b),c),(d,e));'))
t2 = TreeNode.read(io.StringIO('(((a,b),(c,d)),(e,f));'))
t3 = TreeNode.read(io.StringIO('(((a,b,c),(d)),(e,f));'))
t1_copy = t1.copy()
t2_copy = t2.copy()
t3_copy = t3.copy()
t1.assign_ids()
t1_copy.index_tree()
t2.assign_ids()
t2_copy.index_tree()
t3.assign_ids()
t3_copy.index_tree()
self.assertEqual([n.id for n in t1.traverse()],
[n.id for n in t1_copy.traverse()])
self.assertEqual([n.id for n in t2.traverse()],
[n.id for n in t2_copy.traverse()])
self.assertEqual([n.id for n in t3.traverse()],
[n.id for n in t3_copy.traverse()])
def test_unrooted_deepcopy(self):
"""Do an unrooted_copy"""
t = TreeNode.read(io.StringIO("((a,(b,c)d)e,(f,g)h)i;"))
exp = "(b,c,(a,((f,g)h)e)d)root;\n"
obs = t.find('d').unrooted_deepcopy()
self.assertEqual(str(obs), exp)
t_ids = {id(n) for n in t.traverse()}
obs_ids = {id(n) for n in obs.traverse()}
self.assertEqual(t_ids.intersection(obs_ids), set())
def test_descending_branch_length(self):
"""Calculate descending branch_length"""
tr = TreeNode.read(io.StringIO(
"(((A:.1,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,(H:.4,I:.5)J:1.3)K;"))
tdbl = tr.descending_branch_length()
sdbl = tr.descending_branch_length(['A', 'E'])
npt.assert_almost_equal(tdbl, 8.9)
npt.assert_almost_equal(sdbl, 2.2)
self.assertRaises(ValueError, tr.descending_branch_length,
['A', 'DNE'])
self.assertRaises(ValueError, tr.descending_branch_length, ['A', 'C'])
tr = TreeNode.read(io.StringIO(
"(((A,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,(H:.4,I:.5)J:1.3)K;"))
tdbl = tr.descending_branch_length()
npt.assert_almost_equal(tdbl, 8.8)
tr = TreeNode.read(io.StringIO(
"(((A,B:1.2)C:.6,(D:.9,E:.6)F)G:2.4,(H:.4,I:.5)J:1.3)K;"))
tdbl = tr.descending_branch_length()
npt.assert_almost_equal(tdbl, 7.9)
tr = TreeNode.read(io.StringIO(
"(((A,B:1.2)C:.6,(D:.9,E:.6)F)G:2.4,(H:.4,I:.5)J:1.3)K;"))
tdbl = tr.descending_branch_length(['A', 'D', 'E'])
npt.assert_almost_equal(tdbl, 2.1)
tr = TreeNode.read(io.StringIO(
"(((A,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,(H:.4,I:.5)J:1.3)K;"))
tdbl = tr.descending_branch_length(['I', 'D', 'E'])
npt.assert_almost_equal(tdbl, 6.6)
# test with a situation where we have unnamed internal nodes
tr = TreeNode.read(io.StringIO(
"(((A,B:1.2):.6,(D:.9,E:.6)F):2.4,(H:.4,I:.5)J:1.3);"))
tdbl = tr.descending_branch_length()
npt.assert_almost_equal(tdbl, 7.9)
def test_to_array(self):
"""Convert a tree to arrays"""
t = TreeNode.read(io.StringIO(
'(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7,(e:8,f:9)z:10);'))
id_index, child_index = t.index_tree()
arrayed = t.to_array()
self.assertEqual(id_index, arrayed['id_index'])
npt.assert_equal(child_index, arrayed['child_index'])
exp = np.array([1, 2, 3, 5, 4, 6, 8, 9, 7, 10, np.nan])
obs = arrayed['length']
npt.assert_equal(obs, exp)
exp = np.array(['a', 'b', 'c', 'd', 'x',
'y', 'e', 'f', 'z', 'z', None])
obs = arrayed['name']
npt.assert_equal(obs, exp)
exp = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
obs = arrayed['id']
npt.assert_equal(obs, exp)
def test_to_array_attrs(self):
t = TreeNode.read(io.StringIO(
'(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7,(e:8,f:9)z:10);'))
id_index, child_index = t.index_tree()
arrayed = t.to_array(attrs=[('name', object)])
# should only have id_index, child_index, and name since we specified
# attrs
self.assertEqual(len(arrayed), 3)
self.assertEqual(id_index, arrayed['id_index'])
npt.assert_equal(child_index, arrayed['child_index'])
exp = np.array(['a', 'b', 'c', 'd', 'x',
'y', 'e', 'f', 'z', 'z', None])
obs = arrayed['name']
npt.assert_equal(obs, exp)
# invalid attrs
with self.assertRaises(AttributeError):
t.to_array(attrs=[('name', object), ('brofist', int)])
def test_to_array_nan_length_value(self):
t = TreeNode.read(io.StringIO("((a:1, b:2)c:3)root;"))
indexed = t.to_array(nan_length_value=None)
npt.assert_equal(indexed['length'],
np.array([1, 2, 3, np.nan], dtype=float))
indexed = t.to_array(nan_length_value=0.0)
npt.assert_equal(indexed['length'],
np.array([1, 2, 3, 0.0], dtype=float))
indexed = t.to_array(nan_length_value=42.0)
npt.assert_equal(indexed['length'],
np.array([1, 2, 3, 42.0], dtype=float))
t = TreeNode.read(io.StringIO("((a:1, b:2)c:3)root:4;"))
indexed = t.to_array(nan_length_value=42.0)
npt.assert_equal(indexed['length'],
np.array([1, 2, 3, 4], dtype=float))
t = TreeNode.read(io.StringIO("((a:1, b:2)c)root;"))
indexed = t.to_array(nan_length_value=42.0)
npt.assert_equal(indexed['length'],
np.array([1, 2, 42.0, 42.0], dtype=float))
def test_from_taxonomy(self):
input_lineages = {'1': ['a', 'b', 'c', 'd', 'e', 'f', 'g'],
'2': ['a', 'b', 'c', None, None, 'x', 'y'],
'3': ['h', 'i', 'j', 'k', 'l', 'm', 'n'],
'4': ['h', 'i', 'j', 'k', 'l', 'm', 'q'],
'5': ['h', 'i', 'j', 'k', 'l', 'm', 'n']}
exp = TreeNode.read(io.StringIO(
"((((((((1)g)f)e)d,((((2)y)x)))c)b)a,"
"(((((((3,5)n,(4)q)m)l)k)j)i)h);"))
root = TreeNode.from_taxonomy(input_lineages.items())
self.assertEqual(root.compare_subsets(exp), 0.0)
def test_to_taxonomy(self):
input_lineages = {'1': ['a', 'b', 'c', 'd', 'e', 'f', 'g'],
'2': ['a', 'b', 'c', None, None, 'x', 'y'],
'3': ['h', 'i', 'j', 'k', 'l', 'm', 'n'],
'4': ['h', 'i', 'j', 'k', 'l', 'm', 'q'],
'5': ['h', 'i', 'j', 'k', 'l', 'm', 'n']}
tree = TreeNode.from_taxonomy(input_lineages.items())
exp = sorted(input_lineages.items())
obs = [(n.name, lin) for n, lin in tree.to_taxonomy(allow_empty=True)]
self.assertEqual(sorted(obs), exp)
def test_to_taxonomy_filter(self):
input_lineages = {'1': ['a', 'b', 'c', 'd', 'e', 'f', 'g'],
'2': ['a', 'b', 'c', None, None, 'x', 'y'],
'3': ['h', 'i', 'j', 'k', 'l'], # test jagged
'4': ['h', 'i', 'j', 'k', 'l', 'm', 'q'],
'5': ['h', 'i', 'j', 'k', 'l', 'm', 'n']}
tree = TreeNode.from_taxonomy(input_lineages.items())
def f(node, lin):
return 'k' in lin or 'x' in lin
exp = [('2', ['a', 'b', 'c', 'x', 'y']),
('3', ['h', 'i', 'j', 'k', 'l']),
('4', ['h', 'i', 'j', 'k', 'l', 'm', 'q']),
('5', ['h', 'i', 'j', 'k', 'l', 'm', 'n'])]
obs = [(n.name, lin) for n, lin in tree.to_taxonomy(filter_f=f)]
self.assertEqual(sorted(obs), exp)
def test_linkage_matrix(self):
# Ensure matches: http://www.southampton.ac.uk/~re1u06/teaching/upgma/
id_list = ['A', 'B', 'C', 'D', 'E', 'F', 'G']
linkage = np.asarray([[1.0, 5.0, 1.0, 2.0],
[0.0, 3.0, 8.0, 2.0],
[6.0, 7.0, 12.5, 3.0],
[8.0, 9.0, 16.5, 5.0],
[2.0, 10.0, 29.0, 6.0],
[4.0, 11.0, 34.0, 7.0]])
tree = TreeNode.from_linkage_matrix(linkage, id_list)
self.assertEqual("(E:17.0,(C:14.5,((A:4.0,D:4.0):4.25,(G:6.25,(B:0.5,"
"F:0.5):5.75):2.0):6.25):2.5);\n",
str(tree))
def test_shuffle_invalid_iter(self):
shuffler = self.simple_t.shuffle(n=-1)
with self.assertRaises(ValueError):
next(shuffler)
def test_shuffle_n_2(self):
exp = ["((a,b)i1,(d,c)i2)root;\n",
"((a,b)i1,(c,d)i2)root;\n",
"((a,b)i1,(d,c)i2)root;\n",
"((a,b)i1,(c,d)i2)root;\n",
"((a,b)i1,(d,c)i2)root;\n"]
obs_g = self.simple_t.shuffle(k=2, shuffle_f=self.rev_f, n=np.inf)
obs = [str(next(obs_g)) for i in range(5)]
self.assertEqual(obs, exp)
def test_shuffle_n_none(self):
exp = ["((d,c)i1,(b,a)i2)root;\n",
"((a,b)i1,(c,d)i2)root;\n",
"((d,c)i1,(b,a)i2)root;\n",
"((a,b)i1,(c,d)i2)root;\n"]
obs_g = self.simple_t.shuffle(shuffle_f=self.rev_f, n=4)
obs = [str(next(obs_g)) for i in range(4)]
self.assertEqual(obs, exp)
def test_shuffle_complex(self):
exp = ["(((a,b)int1,(x,y,(w,z)int2,(f,e)int3)int4),(d,c)int5);\n",
"(((a,b)int1,(x,y,(w,z)int2,(c,d)int3)int4),(e,f)int5);\n",
"(((a,b)int1,(x,y,(w,z)int2,(f,e)int3)int4),(d,c)int5);\n",
"(((a,b)int1,(x,y,(w,z)int2,(c,d)int3)int4),(e,f)int5);\n"]
obs_g = self.complex_tree.shuffle(shuffle_f=self.rev_f,
names=['c', 'd', 'e', 'f'], n=4)
obs = [str(next(obs_g)) for i in range(4)]
self.assertEqual(obs, exp)
def test_shuffle_names(self):
exp = ["((c,a)i1,(b,d)i2)root;\n",
"((b,c)i1,(a,d)i2)root;\n",
"((a,b)i1,(c,d)i2)root;\n",
"((c,a)i1,(b,d)i2)root;\n"]
obs_g = self.simple_t.shuffle(names=['a', 'b', 'c'],
shuffle_f=self.rotate_f, n=np.inf)
obs = [str(next(obs_g)) for i in range(4)]
self.assertEqual(obs, exp)
def test_shuffle_raises(self):
with self.assertRaises(ValueError):
next(self.simple_t.shuffle(k=1))
with self.assertRaises(ValueError):
next(self.simple_t.shuffle(k=5, names=['a', 'b']))
with self.assertRaises(MissingNodeError):
next(self.simple_t.shuffle(names=['x', 'y']))
sample = """
(
(
xyz:0.28124,
(
def:0.24498,
mno:0.03627)
:0.17710)
:0.04870,
abc:0.05925,
(
ghi:0.06914,
jkl:0.13776)
:0.09853);
"""
node_data_sample = """
(
(
xyz:0.28124,
(
def:0.24498,
mno:0.03627)
'A':0.17710)
B:0.04870,
abc:0.05925,
(
ghi:0.06914,
jkl:0.13776)
C:0.09853);
"""
minimal = "();"
no_names = "((,),(,));"
missing_tip_name = "((a,b),(c,));"
empty = '();'
single = '(abc:3);'
double = '(abc:3, def:4);'
onenest = '(abc:3, (def:4, ghi:5):6 );'
nodedata = '(abc:3, (def:4, ghi:5)jkl:6 );'
exp_ascii_art_three_children = """\
/-a
|
---------| /-b
| |
\--------|--c
|
\-d\
"""
if __name__ == '__main__':
main()
|
|
import os
import sys
import platform
def is_active():
return True
def get_name():
return "X11"
def can_build():
if (os.name!="posix"):
return False
if sys.platform == "darwin":
return False # no x11 on mac for now
errorval=os.system("pkg-config --version > /dev/null")
if (errorval):
print("pkg-config not found.. x11 disabled.")
return False
x11_error=os.system("pkg-config x11 --modversion > /dev/null ")
if (x11_error):
print("X11 not found.. x11 disabled.")
return False
ssl_error=os.system("pkg-config openssl --modversion > /dev/null ")
if (ssl_error):
print("OpenSSL not found.. x11 disabled.")
return False
x11_error=os.system("pkg-config xcursor --modversion > /dev/null ")
if (x11_error):
print("xcursor not found.. x11 disabled.")
return False
x11_error=os.system("pkg-config xinerama --modversion > /dev/null ")
if (x11_error):
print("xinerama not found.. x11 disabled.")
return False
return True # X11 enabled
def get_opts():
return [
('use_llvm','Use llvm compiler','no'),
('use_sanitizer','Use llvm compiler sanitize address','no'),
('use_leak_sanitizer','Use llvm compiler sanitize memory leaks','no'),
('pulseaudio','Detect & Use pulseaudio','yes'),
('gamepad','Gamepad support, requires libudev and libevdev','yes'),
('new_wm_api', 'Use experimental window management API','no'),
('debug_release', 'Add debug symbols to release version','no'),
]
def get_flags():
return [
('builtin_zlib', 'no'),
("openssl", "yes"),
#("theora","no"),
]
def configure(env):
is64=sys.maxsize > 2**32
if (env["bits"]=="default"):
if (is64):
env["bits"]="64"
else:
env["bits"]="32"
env.Append(CPPPATH=['#platform/x11'])
if (env["use_llvm"]=="yes"):
if 'clang++' not in env['CXX']:
env["CC"]="clang"
env["CXX"]="clang++"
env["LD"]="clang++"
env.Append(CPPFLAGS=['-DTYPED_METHOD_BIND'])
env.extra_suffix=".llvm"
if (env["colored"]=="yes"):
if sys.stdout.isatty():
env.Append(CXXFLAGS=["-fcolor-diagnostics"])
if (env["use_sanitizer"]=="yes"):
env.Append(CXXFLAGS=['-fsanitize=address','-fno-omit-frame-pointer'])
env.Append(LINKFLAGS=['-fsanitize=address'])
env.extra_suffix+="s"
if (env["use_leak_sanitizer"]=="yes"):
env.Append(CXXFLAGS=['-fsanitize=address','-fno-omit-frame-pointer'])
env.Append(LINKFLAGS=['-fsanitize=address'])
env.extra_suffix+="s"
#if (env["tools"]=="no"):
# #no tools suffix
# env['OBJSUFFIX'] = ".nt"+env['OBJSUFFIX']
# env['LIBSUFFIX'] = ".nt"+env['LIBSUFFIX']
if (env["target"]=="release"):
if (env["debug_release"]=="yes"):
env.Append(CCFLAGS=['-g2'])
else:
env.Append(CCFLAGS=['-O3','-ffast-math'])
elif (env["target"]=="release_debug"):
env.Append(CCFLAGS=['-O2','-ffast-math','-DDEBUG_ENABLED'])
if (env["debug_release"]=="yes"):
env.Append(CCFLAGS=['-g2'])
elif (env["target"]=="debug"):
env.Append(CCFLAGS=['-g2', '-Wall','-DDEBUG_ENABLED','-DDEBUG_MEMORY_ENABLED'])
env.ParseConfig('pkg-config x11 --cflags --libs')
env.ParseConfig('pkg-config xinerama --cflags --libs')
env.ParseConfig('pkg-config xcursor --cflags --libs')
if (env["openssl"]=="yes"):
env.ParseConfig('pkg-config openssl --cflags --libs')
if (env["freetype"]=="yes"):
env.ParseConfig('pkg-config freetype2 --cflags --libs')
if (env["freetype"]!="no"):
env.Append(CCFLAGS=['-DFREETYPE_ENABLED'])
if (env["freetype"]=="builtin"):
env.Append(CPPPATH=['#tools/freetype'])
env.Append(CPPPATH=['#tools/freetype/freetype/include'])
env.Append(CPPFLAGS=['-DOPENGL_ENABLED','-DGLEW_ENABLED'])
if os.system("pkg-config --exists alsa")==0:
print("Enabling ALSA")
env.Append(CPPFLAGS=["-DALSA_ENABLED"])
env.Append(LIBS=['asound'])
else:
print("ALSA libraries not found, disabling driver")
if (env["gamepad"]=="yes" and platform.system() == "Linux"):
# pkg-config returns 0 when the lib exists...
found_udev = not os.system("pkg-config --exists libudev")
if (found_udev):
print("Enabling gamepad support with udev")
env.Append(CPPFLAGS=["-DJOYDEV_ENABLED"])
env.ParseConfig('pkg-config libudev --cflags --libs')
else:
print("libudev development libraries not found")
print("Some libraries are missing for the required gamepad support, aborting!")
print("Install the mentioned libraries or build with 'gamepad=no' to disable gamepad support.")
sys.exit(255)
if (env["pulseaudio"]=="yes"):
if not os.system("pkg-config --exists libpulse-simple"):
print("Enabling PulseAudio")
env.Append(CPPFLAGS=["-DPULSEAUDIO_ENABLED"])
env.ParseConfig('pkg-config --cflags --libs libpulse-simple')
else:
print("PulseAudio development libraries not found, disabling driver")
env.Append(CPPFLAGS=['-DX11_ENABLED','-DUNIX_ENABLED','-DGLES2_ENABLED','-DGLES_OVER_GL'])
env.Append(LIBS=['GL', 'GLU', 'pthread', 'z'])
#env.Append(CPPFLAGS=['-DMPC_FIXED_POINT'])
#host compiler is default..
if (is64 and env["bits"]=="32"):
env.Append(CPPFLAGS=['-m32'])
env.Append(LINKFLAGS=['-m32','-L/usr/lib/i386-linux-gnu'])
elif (not is64 and env["bits"]=="64"):
env.Append(CPPFLAGS=['-m64'])
env.Append(LINKFLAGS=['-m64','-L/usr/lib/i686-linux-gnu'])
import methods
env.Append( BUILDERS = { 'GLSL120' : env.Builder(action = methods.build_legacygl_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
env.Append( BUILDERS = { 'GLSL' : env.Builder(action = methods.build_glsl_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
env.Append( BUILDERS = { 'GLSL120GLES' : env.Builder(action = methods.build_gles2_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
#env.Append( BUILDERS = { 'HLSL9' : env.Builder(action = methods.build_hlsl_dx9_headers, suffix = 'hlsl.h',src_suffix = '.hlsl') } )
if(env["new_wm_api"]=="yes"):
env.Append(CPPFLAGS=['-DNEW_WM_API'])
env.ParseConfig('pkg-config xinerama --cflags --libs')
env["x86_opt_gcc"]=True
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module contains the implementation of RNN cell wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import numbers
import sys
import types as python_types
import warnings
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.util import nest
class DropoutWrapperBase(object):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self,
cell,
input_keep_prob=1.0,
output_keep_prob=1.0,
state_keep_prob=1.0,
variational_recurrent=False,
input_size=None,
dtype=None,
seed=None,
dropout_state_filter_visitor=None,
**kwargs):
"""Create a cell with added input, state, and/or output dropout.
If `variational_recurrent` is set to `True` (**NOT** the default behavior),
then the same dropout mask is applied at every step, as described in:
[A Theoretically Grounded Application of Dropout in Recurrent
Neural Networks. Y. Gal, Z. Ghahramani](https://arxiv.org/abs/1512.05287).
Otherwise a different dropout mask is applied at every time step.
Note, by default (unless a custom `dropout_state_filter` is provided),
the memory state (`c` component of any `LSTMStateTuple`) passing through
a `DropoutWrapper` is never modified. This behavior is described in the
above article.
Args:
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is constant and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is constant and 1, no output dropout will be added.
state_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is constant and 1, no output dropout will be added.
State dropout is performed on the outgoing states of the cell. **Note**
the state components to which dropout is applied when `state_keep_prob`
is in `(0, 1)` are also determined by the argument
`dropout_state_filter_visitor` (e.g. by default dropout is never applied
to the `c` component of an `LSTMStateTuple`).
variational_recurrent: Python bool. If `True`, then the same dropout
pattern is applied across all time steps per run call. If this parameter
is set, `input_size` **must** be provided.
input_size: (optional) (possibly nested tuple of) `TensorShape` objects
containing the depth(s) of the input tensors expected to be passed in to
the `DropoutWrapper`. Required and used **iff** `variational_recurrent
= True` and `input_keep_prob < 1`.
dtype: (optional) The `dtype` of the input, state, and output tensors.
Required and used **iff** `variational_recurrent = True`.
seed: (optional) integer, the randomness seed.
dropout_state_filter_visitor: (optional), default: (see below). Function
that takes any hierarchical level of the state and returns a scalar or
depth=1 structure of Python booleans describing which terms in the state
should be dropped out. In addition, if the function returns `True`,
dropout is applied across this sublevel. If the function returns
`False`, dropout is not applied across this entire sublevel.
Default behavior: perform dropout on all terms except the memory (`c`)
state of `LSTMCellState` objects, and don't try to apply dropout to
`TensorArray` objects: ```
def dropout_state_filter_visitor(s):
if isinstance(s, LSTMCellState): # Never perform dropout on the c
state. return LSTMCellState(c=False, h=True)
elif isinstance(s, TensorArray): return False return True ```
**kwargs: dict of keyword arguments for base layer.
Raises:
TypeError: if `cell` is not an `RNNCell`, or `keep_state_fn` is provided
but not `callable`.
ValueError: if any of the keep_probs are not between 0 and 1.
"""
super(DropoutWrapperBase, self).__init__(cell, dtype=dtype, **kwargs)
if (dropout_state_filter_visitor is not None and
not callable(dropout_state_filter_visitor)):
raise TypeError("dropout_state_filter_visitor must be callable")
self._dropout_state_filter = (
dropout_state_filter_visitor or _default_dropout_state_filter_visitor)
with ops.name_scope("DropoutWrapperInit"):
def tensor_and_const_value(v):
tensor_value = ops.convert_to_tensor(v)
const_value = tensor_util.constant_value(tensor_value)
return (tensor_value, const_value)
for prob, attr in [(input_keep_prob, "input_keep_prob"),
(state_keep_prob, "state_keep_prob"),
(output_keep_prob, "output_keep_prob")]:
tensor_prob, const_prob = tensor_and_const_value(prob)
if const_prob is not None:
if const_prob < 0 or const_prob > 1:
raise ValueError("Parameter %s must be between 0 and 1: %d" %
(attr, const_prob))
setattr(self, "_%s" % attr, float(const_prob))
else:
setattr(self, "_%s" % attr, tensor_prob)
# Set variational_recurrent, seed before running the code below
self._variational_recurrent = variational_recurrent
self._input_size = input_size
self._seed = seed
self._recurrent_input_noise = None
self._recurrent_state_noise = None
self._recurrent_output_noise = None
if variational_recurrent:
if dtype is None:
raise ValueError(
"When variational_recurrent=True, dtype must be provided")
def convert_to_batch_shape(s):
# Prepend a 1 for the batch dimension; for recurrent
# variational dropout we use the same dropout mask for all
# batch elements.
return array_ops.concat(([1], tensor_shape.TensorShape(s).as_list()), 0)
def batch_noise(s, inner_seed):
shape = convert_to_batch_shape(s)
return random_ops.random_uniform(shape, seed=inner_seed, dtype=dtype)
if (not isinstance(self._input_keep_prob, numbers.Real) or
self._input_keep_prob < 1.0):
if input_size is None:
raise ValueError(
"When variational_recurrent=True and input_keep_prob < 1.0 or "
"is unknown, input_size must be provided")
self._recurrent_input_noise = _enumerated_map_structure_up_to(
input_size,
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("input", i)),
input_size)
self._recurrent_state_noise = _enumerated_map_structure_up_to(
cell.state_size,
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("state", i)),
cell.state_size)
self._recurrent_output_noise = _enumerated_map_structure_up_to(
cell.output_size,
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("output", i)),
cell.output_size)
def _gen_seed(self, salt_prefix, index):
if self._seed is None:
return None
salt = "%s_%d" % (salt_prefix, index)
string = (str(self._seed) + salt).encode("utf-8")
return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF
@property
def wrapped_cell(self):
return self.cell
@property
def state_size(self):
return self.cell.state_size
@property
def output_size(self):
return self.cell.output_size
def build(self, inputs_shape):
self.cell.build(inputs_shape)
self.built = True
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self.cell.zero_state(batch_size, dtype)
def _variational_recurrent_dropout_value(
self, unused_index, value, noise, keep_prob):
"""Performs dropout given the pre-calculated noise tensor."""
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob + noise
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = math_ops.floor(random_tensor)
ret = math_ops.divide(value, keep_prob) * binary_tensor
ret.set_shape(value.get_shape())
return ret
def _dropout(self,
values,
salt_prefix,
recurrent_noise,
keep_prob,
shallow_filtered_substructure=None):
"""Decides whether to perform standard dropout or recurrent dropout."""
if shallow_filtered_substructure is None:
# Put something so we traverse the entire structure; inside the
# dropout function we check to see if leafs of this are bool or not.
shallow_filtered_substructure = values
if not self._variational_recurrent:
def dropout(i, do_dropout, v):
if not isinstance(do_dropout, bool) or do_dropout:
return nn_ops.dropout_v2(
v, rate=1. - keep_prob, seed=self._gen_seed(salt_prefix, i))
else:
return v
return _enumerated_map_structure_up_to(
shallow_filtered_substructure, dropout,
*[shallow_filtered_substructure, values])
else:
def dropout(i, do_dropout, v, n):
if not isinstance(do_dropout, bool) or do_dropout:
return self._variational_recurrent_dropout_value(i, v, n, keep_prob)
else:
return v
return _enumerated_map_structure_up_to(
shallow_filtered_substructure, dropout,
*[shallow_filtered_substructure, values, recurrent_noise])
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
"""Runs the wrapped cell and applies dropout.
Args:
inputs: A tensor with wrapped cell's input.
state: A tensor or tuple of tensors with wrapped cell's state.
cell_call_fn: Wrapped cell's method to use for step computation (cell's
`__call__` or 'call' method).
**kwargs: Additional arguments.
Returns:
A pair containing:
- Output: A tensor with cell's output.
- New state: A tensor or tuple of tensors with new wrapped cell's state.
"""
def _should_dropout(p):
return (not isinstance(p, float)) or p < 1
if _should_dropout(self._input_keep_prob):
inputs = self._dropout(inputs, "input", self._recurrent_input_noise,
self._input_keep_prob)
output, new_state = cell_call_fn(inputs, state, **kwargs)
if _should_dropout(self._state_keep_prob):
# Identify which subsets of the state to perform dropout on and
# which ones to keep.
shallow_filtered_substructure = nest.get_traverse_shallow_structure(
self._dropout_state_filter, new_state)
new_state = self._dropout(new_state, "state", self._recurrent_state_noise,
self._state_keep_prob,
shallow_filtered_substructure)
if _should_dropout(self._output_keep_prob):
output = self._dropout(output, "output", self._recurrent_output_noise,
self._output_keep_prob)
return output, new_state
def get_config(self):
"""Returns the config of the dropout wrapper."""
config = {
"input_keep_prob": self._input_keep_prob,
"output_keep_prob": self._output_keep_prob,
"state_keep_prob": self._state_keep_prob,
"variational_recurrent": self._variational_recurrent,
"input_size": self._input_size,
"seed": self._seed,
}
if self._dropout_state_filter != _default_dropout_state_filter_visitor:
function, function_type, function_module = _serialize_function_to_config(
self._dropout_state_filter)
config.update({"dropout_fn": function,
"dropout_fn_type": function_type,
"dropout_fn_module": function_module})
base_config = super(DropoutWrapperBase, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
if "dropout_fn" in config:
config = config.copy()
dropout_state_filter = _parse_config_to_function(
config, custom_objects, "dropout_fn", "dropout_fn_type",
"dropout_fn_module")
config.pop("dropout_fn")
config["dropout_state_filter_visitor"] = dropout_state_filter
return super(DropoutWrapperBase, cls).from_config(
config, custom_objects=custom_objects)
class ResidualWrapperBase(object):
"""RNNCell wrapper that ensures cell inputs are added to the outputs."""
def __init__(self, cell, residual_fn=None, **kwargs):
"""Constructs a `ResidualWrapper` for `cell`.
Args:
cell: An instance of `RNNCell`.
residual_fn: (Optional) The function to map raw cell inputs and raw cell
outputs to the actual cell outputs of the residual network.
Defaults to calling nest.map_structure on (lambda i, o: i + o), inputs
and outputs.
**kwargs: dict of keyword arguments for base layer.
"""
super(ResidualWrapperBase, self).__init__(cell, **kwargs)
self._residual_fn = residual_fn
@property
def state_size(self):
return self.cell.state_size
@property
def output_size(self):
return self.cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self.cell.zero_state(batch_size, dtype)
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
"""Run the cell and then apply the residual_fn on its inputs to its outputs.
Args:
inputs: cell inputs.
state: cell state.
cell_call_fn: Wrapped cell's method to use for step computation (cell's
`__call__` or 'call' method).
**kwargs: Additional arguments passed to the wrapped cell's `call`.
Returns:
Tuple of cell outputs and new state.
Raises:
TypeError: If cell inputs and outputs have different structure (type).
ValueError: If cell inputs and outputs have different structure (value).
"""
outputs, new_state = cell_call_fn(inputs, state, **kwargs)
# Ensure shapes match
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
def default_residual_fn(inputs, outputs):
nest.assert_same_structure(inputs, outputs)
nest.map_structure(assert_shape_match, inputs, outputs)
return nest.map_structure(lambda inp, out: inp + out, inputs, outputs)
res_outputs = (self._residual_fn or default_residual_fn)(inputs, outputs)
return (res_outputs, new_state)
def get_config(self):
"""Returns the config of the residual wrapper."""
if self._residual_fn is not None:
function, function_type, function_module = _serialize_function_to_config(
self._residual_fn)
config = {
"residual_fn": function,
"residual_fn_type": function_type,
"residual_fn_module": function_module
}
else:
config = {}
base_config = super(ResidualWrapperBase, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
if "residual_fn" in config:
config = config.copy()
residual_function = _parse_config_to_function(config, custom_objects,
"residual_fn",
"residual_fn_type",
"residual_fn_module")
config["residual_fn"] = residual_function
return super(ResidualWrapperBase, cls).from_config(
config, custom_objects=custom_objects)
class DeviceWrapperBase(object):
"""Operator that ensures an RNNCell runs on a particular device."""
def __init__(self, cell, device, **kwargs):
"""Construct a `DeviceWrapper` for `cell` with device `device`.
Ensures the wrapped `cell` is called with `tf.device(device)`.
Args:
cell: An instance of `RNNCell`.
device: A device string or function, for passing to `tf.device`.
**kwargs: dict of keyword arguments for base layer.
"""
super(DeviceWrapperBase, self).__init__(cell, **kwargs)
self._device = device
@property
def state_size(self):
return self.cell.state_size
@property
def output_size(self):
return self.cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
with ops.device(self._device):
return self.cell.zero_state(batch_size, dtype)
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
"""Run the cell on specified device."""
with ops.device(self._device):
return cell_call_fn(inputs, state, **kwargs)
def get_config(self):
config = {"device": self._device}
base_config = super(DeviceWrapperBase, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _serialize_function_to_config(function):
"""Serialize the function for get_config()."""
if isinstance(function, python_types.LambdaType):
output = generic_utils.func_dump(function)
output_type = "lambda"
module = function.__module__
elif callable(function):
output = function.__name__
output_type = "function"
module = function.__module__
else:
raise ValueError("Unrecognized function type for input: {}".format(
type(function)))
return output, output_type, module
def _parse_config_to_function(config, custom_objects, func_attr_name,
func_type_attr_name, module_attr_name):
"""Reconstruct the function from the config."""
globs = globals()
module = config.pop(module_attr_name, None)
if module in sys.modules:
globs.update(sys.modules[module].__dict__)
elif module is not None:
# Note: we don't know the name of the function if it's a lambda.
warnings.warn("{} is not loaded, but a layer uses it. "
"It may cause errors.".format(module), UserWarning)
if custom_objects:
globs.update(custom_objects)
function_type = config.pop(func_type_attr_name)
if function_type == "function":
# Simple lookup in custom objects
function = generic_utils.deserialize_keras_object(
config[func_attr_name],
custom_objects=custom_objects,
printable_module_name="function in wrapper")
elif function_type == "lambda":
# Unsafe deserialization from bytecode
function = generic_utils.func_load(
config[func_attr_name], globs=globs)
else:
raise TypeError("Unknown function type:", function_type)
return function
def _default_dropout_state_filter_visitor(substate):
from tensorflow.python.keras.layers.legacy_rnn.rnn_cell_impl import LSTMStateTuple # pylint: disable=g-import-not-at-top
if isinstance(substate, LSTMStateTuple):
# Do not perform dropout on the memory state.
return LSTMStateTuple(c=False, h=True)
elif isinstance(substate, tensor_array_ops.TensorArray):
return False
return True
def _enumerated_map_structure_up_to(shallow_structure, map_fn, *args, **kwargs):
ix = [0]
def enumerated_fn(*inner_args, **inner_kwargs):
r = map_fn(ix[0], *inner_args, **inner_kwargs)
ix[0] += 1
return r
return nest.map_structure_up_to(shallow_structure, enumerated_fn, *args,
**kwargs)
|
|
# Copyright (C) 2009, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# WebKit's Python module for understanding the various ports
import os
import platform
import sys
from webkitpy.common.system.executive import Executive
class WebKitPort(object):
# We might need to pass scm into this function for scm.checkout_root
@classmethod
def script_path(cls, script_name):
return os.path.join("Tools", "Scripts", script_name)
@classmethod
def script_shell_command(cls, script_name):
script_path = cls.script_path(script_name)
# Win32 does not support shebang. We need to detect the interpreter ourself.
if sys.platform == 'win32':
interpreter = Executive.interpreter_for_script(script_path)
if interpreter:
return [interpreter, script_path]
return [script_path]
@staticmethod
def port(port_name):
ports = {
"chromium": ChromiumPort,
"chromium-xvfb": ChromiumXVFBPort,
"gtk": GtkPort,
"mac": MacPort,
"win": WinPort,
"qt": QtPort,
"efl": EflPort,
}
default_port = {
"Windows": WinPort,
"Darwin": MacPort,
}
# Do we really need MacPort as the ultimate default?
return ports.get(port_name, default_port.get(platform.system(), MacPort))
@staticmethod
def makeArgs():
args = '--makeargs="-j%s"' % Executive().cpu_count()
if os.environ.has_key('MAKEFLAGS'):
args = '--makeargs="%s"' % os.environ['MAKEFLAGS']
return args
@classmethod
def name(cls):
raise NotImplementedError("subclasses must implement")
@classmethod
def flag(cls):
raise NotImplementedError("subclasses must implement")
@classmethod
def update_webkit_command(cls):
return cls.script_shell_command("update-webkit")
@classmethod
def check_webkit_style_command(cls):
return cls.script_shell_command("check-webkit-style")
@classmethod
def prepare_changelog_command(cls):
return cls.script_shell_command("prepare-ChangeLog")
@classmethod
def build_webkit_command(cls, build_style=None):
command = cls.script_shell_command("build-webkit")
if build_style == "debug":
command.append("--debug")
if build_style == "release":
command.append("--release")
return command
@classmethod
def run_javascriptcore_tests_command(cls):
return cls.script_shell_command("run-javascriptcore-tests")
@classmethod
def run_webkit_tests_command(cls):
return cls.script_shell_command("run-webkit-tests")
@classmethod
def run_python_unittests_command(cls):
return cls.script_shell_command("test-webkitpy")
@classmethod
def run_perl_unittests_command(cls):
return cls.script_shell_command("test-webkitperl")
@classmethod
def layout_tests_results_path(cls):
return "/tmp/layout-test-results/results.html"
class MacPort(WebKitPort):
@classmethod
def name(cls):
return "Mac"
@classmethod
def flag(cls):
return "--port=mac"
@classmethod
def _system_version(cls):
version_string = platform.mac_ver()[0] # e.g. "10.5.6"
version_tuple = version_string.split('.')
return map(int, version_tuple)
@classmethod
def is_leopard(cls):
return tuple(cls._system_version()[:2]) == (10, 5)
class WinPort(WebKitPort):
@classmethod
def name(cls):
return "Win"
@classmethod
def flag(cls):
# FIXME: This is lame. We should autogenerate this from a codename or something.
return "--port=win"
class GtkPort(WebKitPort):
@classmethod
def name(cls):
return "Gtk"
@classmethod
def flag(cls):
return "--port=gtk"
@classmethod
def build_webkit_command(cls, build_style=None):
command = WebKitPort.build_webkit_command(build_style=build_style)
command.append("--gtk")
command.append(WebKitPort.makeArgs())
return command
@classmethod
def run_webkit_tests_command(cls):
command = WebKitPort.run_webkit_tests_command()
command.append("--gtk")
return command
class QtPort(WebKitPort):
@classmethod
def name(cls):
return "Qt"
@classmethod
def flag(cls):
return "--port=qt"
@classmethod
def build_webkit_command(cls, build_style=None):
command = WebKitPort.build_webkit_command(build_style=build_style)
command.append("--qt")
command.append(WebKitPort.makeArgs())
return command
class EflPort(WebKitPort):
@classmethod
def name(cls):
return "Efl"
@classmethod
def flag(cls):
return "--port=efl"
@classmethod
def build_webkit_command(cls, build_style=None):
command = WebKitPort.build_webkit_command(build_style=build_style)
command.append("--efl")
command.append(WebKitPort.makeArgs())
return command
class ChromiumPort(WebKitPort):
@classmethod
def name(cls):
return "Chromium"
@classmethod
def flag(cls):
return "--port=chromium"
@classmethod
def update_webkit_command(cls):
command = WebKitPort.update_webkit_command()
command.append("--chromium")
return command
@classmethod
def build_webkit_command(cls, build_style=None):
command = WebKitPort.build_webkit_command(build_style=build_style)
command.append("--chromium")
command.append("--update-chromium")
return command
@classmethod
def run_webkit_tests_command(cls):
command = cls.script_shell_command("new-run-webkit-tests")
command.append("--chromium")
command.append("--no-pixel-tests")
return command
@classmethod
def run_javascriptcore_tests_command(cls):
return None
class ChromiumXVFBPort(ChromiumPort):
@classmethod
def flag(cls):
return "--port=chromium-xvfb"
@classmethod
def run_webkit_tests_command(cls):
# FIXME: We should find a better way to do this.
return ["xvfb-run"] + ChromiumPort.run_webkit_tests_command()
|
|
import re
import six
from . import messages
from .unicode_block import (
unicode_block,
UNICODE_BASIC_LATIN,
UNICODE_LATIN_1_SUPPLEMENT,
UNICODE_LATIN_EXTENDED_B,
UNICODE_GENERAL_PUNCTUATION,
UNICODE_ARABIC,
UNICODE_LATIN_EXTENDED_ADDITIONAL,
UNICODE_HIRAGANA,
UNICODE_KATAKANA,
UNICODE_BOPOMOFO,
UNICODE_BOPOMOFO_EXTENDED,
UNICODE_CJK_UNIFIED_IDEOGRAPHS,
UNICODE_HANGUL_SYLLABLES,
)
class NGram(object):
LATIN1_EXCLUDED = messages.get_string('NGram.LATIN1_EXCLUDE')
N_GRAM = 3
def __init__(self):
self.grams = ' '
self.capitalword = False
def add_char(self, ch):
'''Append a character into ngram buffer.'''
ch = self.normalize(ch)
last_char = self.grams[-1]
if last_char == ' ':
self.grams = ' '
self.capitalword = False
if ch == ' ':
return
elif len(self.grams) >= self.N_GRAM:
self.grams = self.grams[1:]
self.grams += ch
if ch.isupper():
if last_char.isupper():
self.capitalword = True
else:
self.capitalword = False
def get(self, n):
'''Get n-gram.'''
if self.capitalword:
return
if n < 1 or n > self.N_GRAM or len(self.grams) < n:
return
if n == 1:
ch = self.grams[-1]
if ch == ' ':
return
return ch
else:
return self.grams[-n:]
@classmethod
def normalize(cls, ch):
block = unicode_block(ch)
if block == UNICODE_BASIC_LATIN:
if ch < 'A' or ('Z' < ch < 'a') or 'z' < ch:
ch = ' '
elif block == UNICODE_LATIN_1_SUPPLEMENT:
if cls.LATIN1_EXCLUDED.find(ch) >= 0:
ch = ' '
elif block == UNICODE_LATIN_EXTENDED_B:
# normalization for Romanian
if ch == six.u('\u0219'): # Small S with comma below => with cedilla
ch = six.u('\u015f')
if ch == six.u('\u021b'): # Small T with comma below => with cedilla
ch = six.u('\u0163')
elif block == UNICODE_GENERAL_PUNCTUATION:
ch = ' '
elif block == UNICODE_ARABIC:
if ch == six.u('\u06cc'):
ch = six.u('\u064a') # Farsi yeh => Arabic yeh
elif block == UNICODE_LATIN_EXTENDED_ADDITIONAL:
if ch >= six.u('\u1ea0'):
ch = six.u('\u1ec3')
elif block == UNICODE_HIRAGANA:
ch = six.u('\u3042')
elif block == UNICODE_KATAKANA:
ch = six.u('\u30a2')
elif block in (UNICODE_BOPOMOFO, UNICODE_BOPOMOFO_EXTENDED):
ch = six.u('\u3105')
elif block == UNICODE_CJK_UNIFIED_IDEOGRAPHS:
ch = cls.CJK_MAP.get(ch, ch)
elif block == UNICODE_HANGUL_SYLLABLES:
ch = six.u('\uac00')
return ch
@classmethod
def normalize_vi(cls, text):
'''Normalizer for Vietnamese.
Normalize Alphabet + Diacritical Mark(U+03xx) into U+1Exx.
'''
def repl(m):
alphabet = cls.TO_NORMALIZE_VI_CHARS.find(m.group(1))
dmark = cls.DMARK_CLASS.find(m.group(2)) # Diacritical Mark
return cls.NORMALIZED_VI_CHARS[dmark][alphabet]
return cls.ALPHABET_WITH_DMARK.sub(repl, text)
NORMALIZED_VI_CHARS = [
messages.get_string('NORMALIZED_VI_CHARS_0300'),
messages.get_string('NORMALIZED_VI_CHARS_0301'),
messages.get_string('NORMALIZED_VI_CHARS_0303'),
messages.get_string('NORMALIZED_VI_CHARS_0309'),
messages.get_string('NORMALIZED_VI_CHARS_0323')]
TO_NORMALIZE_VI_CHARS = messages.get_string('TO_NORMALIZE_VI_CHARS')
DMARK_CLASS = messages.get_string('DMARK_CLASS')
ALPHABET_WITH_DMARK = re.compile(
'([' + TO_NORMALIZE_VI_CHARS + '])([' + DMARK_CLASS + '])',
re.UNICODE)
# CJK Kanji Normalization Mapping
CJK_CLASS = [
messages.get_string('NGram.KANJI_1_0'),
messages.get_string('NGram.KANJI_1_2'),
messages.get_string('NGram.KANJI_1_4'),
messages.get_string('NGram.KANJI_1_8'),
messages.get_string('NGram.KANJI_1_11'),
messages.get_string('NGram.KANJI_1_12'),
messages.get_string('NGram.KANJI_1_13'),
messages.get_string('NGram.KANJI_1_14'),
messages.get_string('NGram.KANJI_1_16'),
messages.get_string('NGram.KANJI_1_18'),
messages.get_string('NGram.KANJI_1_22'),
messages.get_string('NGram.KANJI_1_27'),
messages.get_string('NGram.KANJI_1_29'),
messages.get_string('NGram.KANJI_1_31'),
messages.get_string('NGram.KANJI_1_35'),
messages.get_string('NGram.KANJI_2_0'),
messages.get_string('NGram.KANJI_2_1'),
messages.get_string('NGram.KANJI_2_4'),
messages.get_string('NGram.KANJI_2_9'),
messages.get_string('NGram.KANJI_2_10'),
messages.get_string('NGram.KANJI_2_11'),
messages.get_string('NGram.KANJI_2_12'),
messages.get_string('NGram.KANJI_2_13'),
messages.get_string('NGram.KANJI_2_15'),
messages.get_string('NGram.KANJI_2_16'),
messages.get_string('NGram.KANJI_2_18'),
messages.get_string('NGram.KANJI_2_21'),
messages.get_string('NGram.KANJI_2_22'),
messages.get_string('NGram.KANJI_2_23'),
messages.get_string('NGram.KANJI_2_28'),
messages.get_string('NGram.KANJI_2_29'),
messages.get_string('NGram.KANJI_2_30'),
messages.get_string('NGram.KANJI_2_31'),
messages.get_string('NGram.KANJI_2_32'),
messages.get_string('NGram.KANJI_2_35'),
messages.get_string('NGram.KANJI_2_36'),
messages.get_string('NGram.KANJI_2_37'),
messages.get_string('NGram.KANJI_2_38'),
messages.get_string('NGram.KANJI_3_1'),
messages.get_string('NGram.KANJI_3_2'),
messages.get_string('NGram.KANJI_3_3'),
messages.get_string('NGram.KANJI_3_4'),
messages.get_string('NGram.KANJI_3_5'),
messages.get_string('NGram.KANJI_3_8'),
messages.get_string('NGram.KANJI_3_9'),
messages.get_string('NGram.KANJI_3_11'),
messages.get_string('NGram.KANJI_3_12'),
messages.get_string('NGram.KANJI_3_13'),
messages.get_string('NGram.KANJI_3_15'),
messages.get_string('NGram.KANJI_3_16'),
messages.get_string('NGram.KANJI_3_18'),
messages.get_string('NGram.KANJI_3_19'),
messages.get_string('NGram.KANJI_3_22'),
messages.get_string('NGram.KANJI_3_23'),
messages.get_string('NGram.KANJI_3_27'),
messages.get_string('NGram.KANJI_3_29'),
messages.get_string('NGram.KANJI_3_30'),
messages.get_string('NGram.KANJI_3_31'),
messages.get_string('NGram.KANJI_3_32'),
messages.get_string('NGram.KANJI_3_35'),
messages.get_string('NGram.KANJI_3_36'),
messages.get_string('NGram.KANJI_3_37'),
messages.get_string('NGram.KANJI_3_38'),
messages.get_string('NGram.KANJI_4_0'),
messages.get_string('NGram.KANJI_4_9'),
messages.get_string('NGram.KANJI_4_10'),
messages.get_string('NGram.KANJI_4_16'),
messages.get_string('NGram.KANJI_4_17'),
messages.get_string('NGram.KANJI_4_18'),
messages.get_string('NGram.KANJI_4_22'),
messages.get_string('NGram.KANJI_4_24'),
messages.get_string('NGram.KANJI_4_28'),
messages.get_string('NGram.KANJI_4_34'),
messages.get_string('NGram.KANJI_4_39'),
messages.get_string('NGram.KANJI_5_10'),
messages.get_string('NGram.KANJI_5_11'),
messages.get_string('NGram.KANJI_5_12'),
messages.get_string('NGram.KANJI_5_13'),
messages.get_string('NGram.KANJI_5_14'),
messages.get_string('NGram.KANJI_5_18'),
messages.get_string('NGram.KANJI_5_26'),
messages.get_string('NGram.KANJI_5_29'),
messages.get_string('NGram.KANJI_5_34'),
messages.get_string('NGram.KANJI_5_39'),
messages.get_string('NGram.KANJI_6_0'),
messages.get_string('NGram.KANJI_6_3'),
messages.get_string('NGram.KANJI_6_9'),
messages.get_string('NGram.KANJI_6_10'),
messages.get_string('NGram.KANJI_6_11'),
messages.get_string('NGram.KANJI_6_12'),
messages.get_string('NGram.KANJI_6_16'),
messages.get_string('NGram.KANJI_6_18'),
messages.get_string('NGram.KANJI_6_20'),
messages.get_string('NGram.KANJI_6_21'),
messages.get_string('NGram.KANJI_6_22'),
messages.get_string('NGram.KANJI_6_23'),
messages.get_string('NGram.KANJI_6_25'),
messages.get_string('NGram.KANJI_6_28'),
messages.get_string('NGram.KANJI_6_29'),
messages.get_string('NGram.KANJI_6_30'),
messages.get_string('NGram.KANJI_6_32'),
messages.get_string('NGram.KANJI_6_34'),
messages.get_string('NGram.KANJI_6_35'),
messages.get_string('NGram.KANJI_6_37'),
messages.get_string('NGram.KANJI_6_39'),
messages.get_string('NGram.KANJI_7_0'),
messages.get_string('NGram.KANJI_7_3'),
messages.get_string('NGram.KANJI_7_6'),
messages.get_string('NGram.KANJI_7_7'),
messages.get_string('NGram.KANJI_7_9'),
messages.get_string('NGram.KANJI_7_11'),
messages.get_string('NGram.KANJI_7_12'),
messages.get_string('NGram.KANJI_7_13'),
messages.get_string('NGram.KANJI_7_16'),
messages.get_string('NGram.KANJI_7_18'),
messages.get_string('NGram.KANJI_7_19'),
messages.get_string('NGram.KANJI_7_20'),
messages.get_string('NGram.KANJI_7_21'),
messages.get_string('NGram.KANJI_7_23'),
messages.get_string('NGram.KANJI_7_25'),
messages.get_string('NGram.KANJI_7_28'),
messages.get_string('NGram.KANJI_7_29'),
messages.get_string('NGram.KANJI_7_32'),
messages.get_string('NGram.KANJI_7_33'),
messages.get_string('NGram.KANJI_7_35'),
messages.get_string('NGram.KANJI_7_37')]
CJK_MAP = {}
@classmethod
def _init_cjk_map(cls):
for cjk_list in cls.CJK_CLASS:
representative = cjk_list[0]
for ch in cjk_list:
cls.CJK_MAP[ch] = representative
NGram._init_cjk_map()
|
|
__author__ = 'rensholmer'
from gffsubpart import GffSubPart
from itertools import groupby #groupby for fasta parsing
import pprint
from intervaltree import IntervalTree, Interval
class Gff(object):
"""
Work in progess: holds GffSubParts object
"""
_combos = [{'gene':{'mRNA':['CDS','exon','five_prime_UTR','three_prime_UTR']}},
{'match':'match_part'},
{'protein_match':'match_part'}]
_featuretypes = ('gene','mRNA','CDS','exon','five_prime_UTR','three_prime_UTR',
'match','protein_match','transcript_match','match_part',
'biological_region','polypeptide')
def __init__(self,*args,**kwargs):
"""
Fire it up!
"""
self.features = {} #dict with {_key1:feature1,_key2:feature2,...} OLD:{seqid:[GffSubPart1,GffSubPart2,etc]}
self.seq = {} #sequencedict with {header:seq}
self._removedkeys = set()
self._uniqueID = 0 #unique IDs for subfeature._key
self.filename = kwargs.get('filename','')
self.name_index = {} #dict with {ID:set(_key1,_key2,),..} to access features based on non unique ID
self.position_index = {}#Intervaltree
self.type_index = {l:set() for l in self._featuretypes} #dict with {featuretype:set(uniqueID1,uniqueID2,),...} to access features based on featuretype
def __iter__(self):
"""
Iterate over all values
"""
for item in self.features.values():
yield item
def __getitem__(self,key):
"""
Allow square bracket access based on ID, like this: gff[ID]
"""
if not isinstance(key,basestring):
e = 'Object of type {0} is not a valid key'.format(type(key))
raise TypeError(e)
for uniqueID in self.name_index[key]:
yield self.features[uniqueID]
def __len__(self):
return len(self.features.values())
def __str__(self):
return self.filename #temporary
def __repr__(self):
return self.__str__()
def __add__(self,other):
"""
Add two Gff objects together
"""
new_gff = Gff()
for self_obj in self:
new_gff.update(self_obj)
for other_obj in other:
new_gff.update(other_obj)
#new_gff.set_children()
return new_gff
def split(self):
for seqid in self.position_index:
new_gff = Gff()
new_gff.seq[seqid] = self.seq[seqid]
for sub in self.getitems(seqid=seqid):
new_gff.update(sub)
yield new_gff
@property
def uniqueID(self):
self._uniqueID += 1
return self._uniqueID
def typecounts(self):
"""
Args:
None
Returns:
Dictionary with counts per GffSubPart type
"""
return {k:len(v) for k,v in self.type_index.iteritems() if len(v) > 0}
def stringify(self):
"""
Args:
None
Returns:
Entire Gff object as gff formatted string
"""
string = []
for sub in self:
string.append(sub.stringify())
return ''.join(string)
def getclosest(self, seqid, pos, strand = None, featuretype = None):
"""
Args:
seqid: Scaffold name (string) [REQUIRED]
pos: Position on scaffold (int) [REQUIRED]
strand: search on positive or negative strand or both ('+','-' or None for both). Default is both
featuretype: returned featuretypes (string or list of strings)
Returns:
GffSubPart closest to given position on given scaffold
"""
features = self.getitems(seqid = seqid, strand = strand, featuretype = featuretype)
sorted_features = sorted(features, key = lambda x: x.start)
for i,feature in enumerate(sorted_features):
next_feature = sorted_features[i+1]
if pos >= feature.end and pos <= next_feature.start:
break
if pos - feature.end < next_feature.start - pos:
closest = feature
else:
closest = next_feature
return closest
def getitems(self, seqid = None, start = None, end = None, strand = None, featuretype = None):
"""
Args:
seqid: Scaffold name (string)
start: leftbound position (int)
end: rightbound position (int)
strand: search on positive or negative strand or both ('+','-' or None for both). Default is both
featuretype: returned featuretypes (string or list of strings)
Returns:
Generator object containing all GffSubParts within the given specs (interval, featuretype)
"""
coords = (start,end)
if coords.count(None) == 1:
e = '({0},{1}) are not valid coordinates'.format(*coords)
raise Exception(e)
if featuretype != None:
if isinstance(featuretype,basestring) and featuretype in self._featuretypes:
featuretype = [featuretype]
elif isinstance(featuretype,(list,tuple)):
pass
else:
e = '{0} is not a valid type for featuretype'.format(type(featuretype))
raise TypeError(e)
if seqid == None:
if coords.count(None) != 2:
e = 'Can not provide end when no seqid is provided'
raise NotImplementedError(e)
elif strand != None:
e = 'Can not provide strand when no seqid is provided'
raise NotImplementedError(e)
else:
seqids = self.position_index.keys()
elif not isinstance(seqid,basestring):
e = '{0} is not a valid type for seqid'.format(type(seqid))
raise TypeError(e)
else:
seqids = [seqid]
for seqid in seqids:
if seqid not in self.position_index:
continue #False
if start == None and end == None:
subs = (self.features[_key] for _key in (interval.data['_key'] for interval in self.position_index[seqid].items()) if _key not in self._removedkeys)
else:
subs = (self.features[_key] for _key in (interval.data['_key'] for interval in self.position_index[seqid].search(start,end)) if _key not in self._removedkeys)
for sub in subs:
if (featuretype == None or sub.featuretype in featuretype) and (strand == None or sub.strand == strand) and not (sub.start == end or sub.end == start):
yield sub
return
def getseq(self,feature=None,subfeaturetype=None,topfeaturetype=None):
"""
This is replaced by a combination of properties on GffSubPart: seq,pep and siblings
"""
if isinstance(feature,basestring):
features = self[feature]
elif isinstance(feature,GffSubPart):
features = [feature]
elif feature == None:
features = self.getitems(featuretype=topfeaturetype)
else:
raise TypeError('feature is not of type GffSubPart, or String')
for feature in features:
if feature.seq:
feature.seq = ''
if feature.strand == '+':
reverse = False
else:
reverse = True
children = self.get_children(feature,featuretype=subfeaturetype)
children = sorted([c for c in children],key = lambda x: x.get_start(),reverse=reverse)
for index,cds in enumerate(children):
#if cds.end - cds.start == 1 and this is the last cds, only select one nucleotide, otherwise two
if cds.start + 1 == cds.end and index == len(children) - 1:
cds.seq = self.seq[cds.seqid][cds.start-1]
else:
cds.seq = self.seq[cds.seqid][cds.start-1:cds.end]
if reverse:
cds._revcomp()
#print cds.seq
feature.seq += cds.seq
def remove(self,key,nested=True):
"""
Args:
key: string or GffSubPart or list
nested: bool
Returns:
None
"""
if isinstance(key,basestring):
keys = self[key]
elif isinstance(key,GffSubPart):
keys = [key]
elif hasattr(key,'__iter__'):#isinstance(key,(list,tuple)):
keys = []
for k in key:
if isinstance(k,basestring):
keys += [x for x in self[k]]
elif isinstance(k,GffSubPart):
keys.append(k)
else:
e = '{0} is not a valid type'.format(k)
raise TypeError(e)
else:
e = '{0} is not a valid type'.format(key)
raise TypeError(e)
for key in keys:
if nested:
nestedkeys = list(self.get_children(key,reverse=True))
else:
nestedkeys = [key]
remove_parents = set()
for nestedkey in nestedkeys:
self.type_index[nestedkey.featuretype].remove(nestedkey._key)
self.position_index[nestedkey.seqid].discardi(nestedkey.start,nestedkey.end,{'_key':nestedkey._key})
#self.position_index[k.seqid].remove(Interval(k.start,k.end,{'_key':k._key}))
self.name_index[nestedkey.ID].remove(nestedkey._key)
if len(self.name_index[nestedkey.ID]) == 0:
del self.name_index[nestedkey.ID]
parents = {pp for p in nestedkey.parents for pp in self[p]}
for p in parents:
if nestedkey.ID not in self.name_index:
p.children.remove(nestedkey.ID)
if not p.children and p not in nestedkeys:
remove_parents.add(p)
del self.features[nestedkey._key]
self._removedkeys.add(nestedkey._key)
if remove_parents:
self.remove(list(remove_parents),nested=False)
return True
def update(self,subfeature):
"""
:param subfeature: GffSubFeature object
:return:
"""
if not isinstance(subfeature,GffSubPart):
raise NotImplementedError()
ID = self.uniqueID
subfeature._key = ID
self.features[ID] = subfeature
self.name_index.setdefault(subfeature.ID,set()).add(ID)
self.type_index[subfeature.featuretype].add(ID)
interval = Interval(subfeature._start,subfeature.end,{'_key':ID})
self.position_index.setdefault(subfeature.seqid,IntervalTree()).add(interval)
subfeature.container = self
def set_children(self):
"""
Sets the children attribute of all subfeatures
:return:
"""
for sub in self:
for p_name in sub.parents:
for p_obj in self[p_name]:
if sub.ID not in p_obj.children:
p_obj.children.append(sub.ID)
def get_parents(self,key,reverse=True,featuretype=None):
"""
"""
pass
def get_children(self,key,reverse=False,featuretype=None,seen=None):
"""
:param key: subfeature ID or subfeature object
:param reverse: reverses return order. I.e.: reverse=True return CDS->mRNA->gene. reverse=False returns gene->mRNA->CDS
:param featuretype: string or list with featuretypes to be returned
:return: nested generator of subfeature objects
TODO: add something that prevents double yields
"""
if seen == None:
seen = set()
if isinstance(key,GffSubPart):
keys = [key]
elif isinstance(key,basestring):
keys = [k for k in self[key]]
elif isinstance(key,(list,tuple)):
for k in key:
keys = []
if isinstance(k,GffSubPart):
keys.append(k)
elif isinstance(k,basestring):
keys += [x for x in self[k]]
else:
e = '{0} is not a valid key for Gff.get_children()'.format(k)
raise TypeError(e)
keys = [key]
else:
print type(key),key
e = '{0} is not a valid key for Gff.get_children()'.format(key)
raise TypeError(e)
if featuretype != None:
if isinstance(featuretype,basestring):
if featuretype in self._featuretypes:
featuretype = [featuretype]
else:
e = '{0} is not a valid featuretype'.format(featuretype)
raise TypeError(e)
elif isinstance(featuretype,(list,tuple)):
pass
else:
e = '{0} is not a valid type for featuretype'.format(type(featuretype))
raise TypeError(e)
#print [s.ID for s in seen]
for k in keys:
if k._key in self._removedkeys:
continue
if not reverse and (featuretype == None or k.featuretype in featuretype) and k not in seen:
seen.add(k)
yield k
for child in k.children:
for nested_child in self.get_children(child,seen=seen):
if featuretype == None or nested_child.featuretype in featuretype and k not in seen:
seen.add(nested_child)
yield nested_child
if reverse and (featuretype == None or k.featuretype in featuretype) and k not in seen:
seen.add(k)
yield k
def add_fasta(self,filename):
"""
:param filehandle: fasta formatted DNA sequence file
:return:
"""
with open(filename,'rU') as filehandle:
faiter = (x[1] for x in groupby(filehandle, lambda line: line[0] == '>'))
for header in faiter:
header = header.next()[1:].strip().split()[0]
seq = ''.join(s.strip() for s in faiter.next())
self.seq[header] = seq
def write_tbl(self):
"""
Args: None
Return:
.tbl formatted string
"""
dic = {}
for x in self.getitems(featuretype='gene'):
string = ''
for y in self.get_children(x,featuretype=['gene','mRNA','CDS']):
string += y.stringify(filetype='tbl') + '\n'
dic.setdefault(x.seqid,[]).append(string)
#print string
for s in dic:
print '>Feature {0}'.format(s)
for t in dic[s]:
print t
#for uniqueID in self.type_index['gene']:
# gene = self.features[uniqueID]
# print gene.stringify(filetype='tbl')
def _range_map(self,subfeature):
"""
:param subfeature: GffSubFeature object with children
:return: dictionary with {subfeature.seq.coordinate : scaffold.seq.coordinate}
"""
pos = 0
cds = []
range_map = {}
children = self.get_children(subfeature,featuretype='CDS')
if subfeature.strand == '+':
reverse = False
step = 1
else:
reverse = True
step = -1
children = sorted((c for c in children),key = lambda x:x.start,reverse=reverse)
for c in children:
mrna_range = range(pos,pos+c.end-c.start+1)
genome_range = range(c.get_start(),c.get_end()+step,step)
assert len(mrna_range) == len(genome_range)
x = {a:b for a,b in zip(mrna_range,genome_range)}
range_map.update(x)
pos += c.end - c.start + 1
range_map[max(range_map) + 1] = range_map[max(range_map)] + step
return range_map
def _change_cds(self,subfeature,genome_orf):
"""
:param self:
:param subfeature: subfeature with CDS children
:param orf: tuple with genome start and genome stop of ORF in subfeature mRNA
:return: True if succesful change else False
"""
new_start = genome_orf[0]
new_stop = genome_orf[1]
forward = subfeature.strand == '+'
reverse = subfeature.strand == '-'
remove = set()
for cds in self.get_children(subfeature,featuretype='CDS'):
found_start = False
found_stop = False
if cds.end < new_start or cds.start > new_stop:
#print 'remove'
remove.add(cds)
continue
found_start = cds.start <= new_start <= cds.end
found_stop = cds.start <= new_stop <= cds.end
if found_start:
print 'found start, stop ==',found_stop
if found_stop:
if reverse:
new_start += 1
cds.start = new_start
cds.phase = 0
if found_stop:
print 'found stop, start ==',found_start
if found_start:
if forward:
new_stop -= 1
cds.end = new_stop
'''
children = self.get_children(subfeature,featuretype='CDS')
forward = subfeature.strand == '+'
if forward:
new_start = genome_orf[0]
new_end = genome_orf[1] #- 1
children = sorted(children,key=lambda x: x.get_start())
else:
new_start = genome_orf[1]
new_end = genome_orf[0] #+ 1
children = sorted(children,key=lambda x: x.get_start(), reverse=True)
print 'GENOME ORF',genome_orf
for c in children:
old_start = c.get_start()
old_end = c.get_end()
forward_start = old_start <= new_start <= old_end
reverse_start = old_start >= new_start >= old_end
forward_stop = old_start <= new_end <= old_end
reverse_stop = old_start >= new_end >= old_end
#print '---'
#change start
if (forward and forward_start) or (not forward and reverse_start) and not found_stop:
print 'start',old_start,(new_start,old_end)
found_start = True
#fix problem with exons of length 1
if abs(new_start - old_end) == 1:
# if forward:
# pass
#new_start -= 1
#c.set_end(old_end + 1)
print 'this just happened',old_start,(new_start,old_end)
#if old_end == new_start:
# if forward:
# pass
# #new_start -= 1
# else:
# pass
# #new_start += 1
# print 'this just happened',old_start,(new_start,old_end)
c.set_start(new_start)
#change stop and continue loop from the top
if (forward and forward_stop) or (not forward and reverse_stop) and found_start and not found_stop:
print 'end',(old_start,new_end),old_end
found_stop = True
#fix problem if start and stop are in the same exon
if forward_start:
new_end -= 1
if reverse_start:
new_end += 1
#if abs(old_start - old_end) == 1:
# if forward:
# pass
#new_end += 1
# else:
# pass
#new_end -= 1
#remove exons of length 0
if old_start == new_end:
remove.append(c)
else:
c.set_end(new_end)
continue
#remove exon before start
if not found_start:
remove.append(c)
#remove exon after stop
if found_stop and found_start:
remove.append(c)
'''
#remove non coding exons
print [x.ID for x in remove]
if remove:
self.remove(remove,nested=False)
#recalculate transcript coding sequence
self.getseq(subfeature,topfeaturetype=subfeature.featuretype,subfeaturetype='CDS')
#assertions to make sure ORF is fixed
assert len(subfeature.seq) >= 6,(subfeature.ID,len(subfeature.seq),subfeature.seq)
assert len(subfeature.seq) % 3 == 0,(subfeature.ID,subfeature.strand,len(subfeature.seq),subfeature.seq)
assert subfeature.seq[:3] in ('CTG','ATG'),(subfeature.ID,subfeature.seq[:3],subfeature.seq)
assert subfeature.seq[-3:] in ('TGA','TAA','TAG')
assert '*' not in subfeature.pep[-1],(subfeature.ID,subfeature.pep)
return True
def fix_orf(self,subfeature,starts=('ATG','CTG'),stops=('TAA','TGA','TAG'),min_len=6):
"""
Finds longest ORF in spliced transcript.
Args:
subfeature: class: GffSubPart()
starts: list/tuple with start codons
stops: list/tuple with stop codons
min_len: minimum ORF length
Returns:
True if ORF is found
"""
orf = subfeature._find_orf()
if not orf:
return False
if max(orf) - min(orf) < min_len:
return False
range_map = self._range_map(subfeature)
if not range_map:
return False
#print orf
#print range_map
genome_orf = range_map[orf[0]],range_map[orf[1]] #self._map_start_stop(orf,range_map)
#print genome_orf
if subfeature.strand == '-':
genome_orf = (genome_orf[1],genome_orf[0])
if not genome_orf:
return False
change = self._change_cds(subfeature,genome_orf)
return change
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from gym.spaces import Tuple, Discrete, Dict
import os
import logging
import numpy as np
import torch as th
import torch.nn as nn
from torch.optim import RMSprop
from torch.distributions import Categorical
import ray
from ray.rllib.agents.qmix.mixers import VDNMixer, QMixer
from ray.rllib.agents.qmix.model import RNNModel, _get_size
from ray.rllib.evaluation.metrics import LEARNER_STATS_KEY
from ray.rllib.policy.policy import TupleActions, Policy
from ray.rllib.policy.rnn_sequencing import chop_into_sequences
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.models.model import _unpack_obs
from ray.rllib.env.constants import GROUP_REWARDS
from ray.rllib.utils.annotations import override
logger = logging.getLogger(__name__)
# if the obs space is Dict type, look for the global state under this key
ENV_STATE = "state"
class QMixLoss(nn.Module):
def __init__(self,
model,
target_model,
mixer,
target_mixer,
n_agents,
n_actions,
double_q=True,
gamma=0.99):
nn.Module.__init__(self)
self.model = model
self.target_model = target_model
self.mixer = mixer
self.target_mixer = target_mixer
self.n_agents = n_agents
self.n_actions = n_actions
self.double_q = double_q
self.gamma = gamma
def forward(self,
rewards,
actions,
terminated,
mask,
obs,
next_obs,
action_mask,
next_action_mask,
state=None,
next_state=None):
"""Forward pass of the loss.
Arguments:
rewards: Tensor of shape [B, T, n_agents]
actions: Tensor of shape [B, T, n_agents]
terminated: Tensor of shape [B, T, n_agents]
mask: Tensor of shape [B, T, n_agents]
obs: Tensor of shape [B, T, n_agents, obs_size]
next_obs: Tensor of shape [B, T, n_agents, obs_size]
action_mask: Tensor of shape [B, T, n_agents, n_actions]
next_action_mask: Tensor of shape [B, T, n_agents, n_actions]
state: Tensor of shape [B, T, state_dim] (optional)
next_state: Tensor of shape [B, T, state_dim] (optional)
"""
# Assert either none or both of state and next_state are given
if state is None and next_state is None:
state = obs # default to state being all agents' observations
next_state = next_obs
elif (state is None) != (next_state is None):
raise ValueError("Expected either neither or both of `state` and "
"`next_state` to be given. Got: "
"\n`state` = {}\n`next_state` = {}".format(
state, next_state))
# Calculate estimated Q-Values
mac_out = _unroll_mac(self.model, obs)
# Pick the Q-Values for the actions taken -> [B * n_agents, T]
chosen_action_qvals = th.gather(
mac_out, dim=3, index=actions.unsqueeze(3)).squeeze(3)
# Calculate the Q-Values necessary for the target
target_mac_out = _unroll_mac(self.target_model, next_obs)
# Mask out unavailable actions for the t+1 step
ignore_action_tp1 = (next_action_mask == 0) & (mask == 1).unsqueeze(-1)
target_mac_out[ignore_action_tp1] = -np.inf
# Max over target Q-Values
if self.double_q:
# Double Q learning computes the target Q values by selecting the
# t+1 timestep action according to the "policy" neural network and
# then estimating the Q-value of that action with the "target"
# neural network
# Compute the t+1 Q-values to be used in action selection
# using next_obs
mac_out_tp1 = _unroll_mac(self.model, next_obs)
# mask out unallowed actions
mac_out_tp1[ignore_action_tp1] = -np.inf
# obtain best actions at t+1 according to policy NN
cur_max_actions = mac_out_tp1.argmax(dim=3, keepdim=True)
# use the target network to estimate the Q-values of policy
# network's selected actions
target_max_qvals = th.gather(target_mac_out, 3,
cur_max_actions).squeeze(3)
else:
target_max_qvals = target_mac_out.max(dim=3)[0]
assert target_max_qvals.min().item() != -np.inf, \
"target_max_qvals contains a masked action; \
there may be a state with no valid actions."
# Mix
if self.mixer is not None:
chosen_action_qvals = self.mixer(chosen_action_qvals, state)
target_max_qvals = self.target_mixer(target_max_qvals, next_state)
# Calculate 1-step Q-Learning targets
targets = rewards + self.gamma * (1 - terminated) * target_max_qvals
# Td-error
td_error = (chosen_action_qvals - targets.detach())
mask = mask.expand_as(td_error)
# 0-out the targets that came from padded data
masked_td_error = td_error * mask
# Normal L2 loss, take mean over actual data
loss = (masked_td_error**2).sum() / mask.sum()
return loss, mask, masked_td_error, chosen_action_qvals, targets
class QMixTorchPolicy(Policy):
"""QMix impl. Assumes homogeneous agents for now.
You must use MultiAgentEnv.with_agent_groups() to group agents
together for QMix. This creates the proper Tuple obs/action spaces and
populates the '_group_rewards' info field.
Action masking: to specify an action mask for individual agents, use a
dict space with an action_mask key, e.g. {"obs": ob, "action_mask": mask}.
The mask space must be `Box(0, 1, (n_actions,))`.
"""
def __init__(self, obs_space, action_space, config):
_validate(obs_space, action_space)
config = dict(ray.rllib.agents.qmix.qmix.DEFAULT_CONFIG, **config)
self.config = config
self.observation_space = obs_space
self.action_space = action_space
self.n_agents = len(obs_space.original_space.spaces)
self.n_actions = action_space.spaces[0].n
self.h_size = config["model"]["lstm_cell_size"]
self.has_env_global_state = False
self.has_action_mask = False
self.device = (th.device("cuda")
if bool(os.environ.get("CUDA_VISIBLE_DEVICES", None))
else th.device("cpu"))
agent_obs_space = obs_space.original_space.spaces[0]
if isinstance(agent_obs_space, Dict):
space_keys = set(agent_obs_space.spaces.keys())
if "obs" not in space_keys:
raise ValueError(
"Dict obs space must have subspace labeled `obs`")
self.obs_size = _get_size(agent_obs_space.spaces["obs"])
if "action_mask" in space_keys:
mask_shape = tuple(agent_obs_space.spaces["action_mask"].shape)
if mask_shape != (self.n_actions, ):
raise ValueError(
"Action mask shape must be {}, got {}".format(
(self.n_actions, ), mask_shape))
self.has_action_mask = True
if ENV_STATE in space_keys:
self.env_global_state_shape = _get_size(
agent_obs_space.spaces[ENV_STATE])
self.has_env_global_state = True
else:
self.env_global_state_shape = (self.obs_size, self.n_agents)
# The real agent obs space is nested inside the dict
config["model"]["full_obs_space"] = agent_obs_space
agent_obs_space = agent_obs_space.spaces["obs"]
else:
self.obs_size = _get_size(agent_obs_space)
self.model = ModelCatalog.get_model_v2(
agent_obs_space,
action_space.spaces[0],
self.n_actions,
config["model"],
framework="torch",
name="model",
default_model=RNNModel).to(self.device)
self.target_model = ModelCatalog.get_model_v2(
agent_obs_space,
action_space.spaces[0],
self.n_actions,
config["model"],
framework="torch",
name="target_model",
default_model=RNNModel).to(self.device)
# Setup the mixer network.
if config["mixer"] is None:
self.mixer = None
self.target_mixer = None
elif config["mixer"] == "qmix":
self.mixer = QMixer(self.n_agents, self.env_global_state_shape,
config["mixing_embed_dim"]).to(self.device)
self.target_mixer = QMixer(
self.n_agents, self.env_global_state_shape,
config["mixing_embed_dim"]).to(self.device)
elif config["mixer"] == "vdn":
self.mixer = VDNMixer().to(self.device)
self.target_mixer = VDNMixer().to(self.device)
else:
raise ValueError("Unknown mixer type {}".format(config["mixer"]))
self.cur_epsilon = 1.0
self.update_target() # initial sync
# Setup optimizer
self.params = list(self.model.parameters())
if self.mixer:
self.params += list(self.mixer.parameters())
self.loss = QMixLoss(self.model, self.target_model, self.mixer,
self.target_mixer, self.n_agents, self.n_actions,
self.config["double_q"], self.config["gamma"])
self.optimiser = RMSprop(
params=self.params,
lr=config["lr"],
alpha=config["optim_alpha"],
eps=config["optim_eps"])
@override(Policy)
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
**kwargs):
obs_batch, action_mask, _ = self._unpack_observation(obs_batch)
# We need to ensure we do not use the env global state
# to compute actions
# Compute actions
with th.no_grad():
q_values, hiddens = _mac(
self.model,
th.as_tensor(obs_batch, dtype=th.float, device=self.device), [
th.as_tensor(
np.array(s), dtype=th.float, device=self.device)
for s in state_batches
])
avail = th.as_tensor(
action_mask, dtype=th.float, device=self.device)
masked_q_values = q_values.clone()
masked_q_values[avail == 0.0] = -float("inf")
# epsilon-greedy action selector
random_numbers = th.rand_like(q_values[:, :, 0])
pick_random = (random_numbers < self.cur_epsilon).long()
random_actions = Categorical(avail).sample().long()
actions = (pick_random * random_actions +
(1 - pick_random) * masked_q_values.argmax(dim=2))
actions = actions.cpu().numpy()
hiddens = [s.cpu().numpy() for s in hiddens]
return TupleActions(list(actions.transpose([1, 0]))), hiddens, {}
@override(Policy)
def learn_on_batch(self, samples):
obs_batch, action_mask, env_global_state = self._unpack_observation(
samples[SampleBatch.CUR_OBS])
(next_obs_batch, next_action_mask,
next_env_global_state) = self._unpack_observation(
samples[SampleBatch.NEXT_OBS])
group_rewards = self._get_group_rewards(samples[SampleBatch.INFOS])
input_list = [
group_rewards, action_mask, next_action_mask,
samples[SampleBatch.ACTIONS], samples[SampleBatch.DONES],
obs_batch, next_obs_batch
]
if self.has_env_global_state:
input_list.extend([env_global_state, next_env_global_state])
output_list, _, seq_lens = \
chop_into_sequences(
samples[SampleBatch.EPS_ID],
samples[SampleBatch.UNROLL_ID],
samples[SampleBatch.AGENT_INDEX],
input_list,
[], # RNN states not used here
max_seq_len=self.config["model"]["max_seq_len"],
dynamic_max=True)
# These will be padded to shape [B * T, ...]
if self.has_env_global_state:
(rew, action_mask, next_action_mask, act, dones, obs, next_obs,
env_global_state, next_env_global_state) = output_list
else:
(rew, action_mask, next_action_mask, act, dones, obs,
next_obs) = output_list
B, T = len(seq_lens), max(seq_lens)
def to_batches(arr, dtype):
new_shape = [B, T] + list(arr.shape[1:])
return th.as_tensor(
np.reshape(arr, new_shape), dtype=dtype, device=self.device)
rewards = to_batches(rew, th.float)
actions = to_batches(act, th.long)
obs = to_batches(obs, th.float).reshape(
[B, T, self.n_agents, self.obs_size])
action_mask = to_batches(action_mask, th.float)
next_obs = to_batches(next_obs, th.float).reshape(
[B, T, self.n_agents, self.obs_size])
next_action_mask = to_batches(next_action_mask, th.float)
if self.has_env_global_state:
env_global_state = to_batches(env_global_state, th.float)
next_env_global_state = to_batches(next_env_global_state, th.float)
# TODO(ekl) this treats group termination as individual termination
terminated = to_batches(dones, th.float).unsqueeze(2).expand(
B, T, self.n_agents)
# Create mask for where index is < unpadded sequence length
filled = np.reshape(
np.tile(np.arange(T, dtype=np.float32), B),
[B, T]) < np.expand_dims(seq_lens, 1)
mask = th.as_tensor(
filled, dtype=th.float, device=self.device).unsqueeze(2).expand(
B, T, self.n_agents)
# Compute loss
loss_out, mask, masked_td_error, chosen_action_qvals, targets = (
self.loss(rewards, actions, terminated, mask, obs, next_obs,
action_mask, next_action_mask, env_global_state,
next_env_global_state))
# Optimise
self.optimiser.zero_grad()
loss_out.backward()
grad_norm = th.nn.utils.clip_grad_norm_(
self.params, self.config["grad_norm_clipping"])
self.optimiser.step()
mask_elems = mask.sum().item()
stats = {
"loss": loss_out.item(),
"grad_norm": grad_norm
if isinstance(grad_norm, float) else grad_norm.item(),
"td_error_abs": masked_td_error.abs().sum().item() / mask_elems,
"q_taken_mean": (chosen_action_qvals * mask).sum().item() /
mask_elems,
"target_mean": (targets * mask).sum().item() / mask_elems,
}
return {LEARNER_STATS_KEY: stats}
@override(Policy)
def get_initial_state(self): # initial RNN state
return [
s.expand([self.n_agents, -1]).cpu().numpy()
for s in self.model.get_initial_state()
]
@override(Policy)
def get_weights(self):
return {
"model": self._cpu_dict(self.model.state_dict()),
"target_model": self._cpu_dict(self.target_model.state_dict()),
"mixer": self._cpu_dict(self.mixer.state_dict())
if self.mixer else None,
"target_mixer": self._cpu_dict(self.target_mixer.state_dict())
if self.mixer else None,
}
@override(Policy)
def set_weights(self, weights):
self.model.load_state_dict(self._device_dict(weights["model"]))
self.target_model.load_state_dict(
self._device_dict(weights["target_model"]))
if weights["mixer"] is not None:
self.mixer.load_state_dict(self._device_dict(weights["mixer"]))
self.target_mixer.load_state_dict(
self._device_dict(weights["target_mixer"]))
@override(Policy)
def get_state(self):
state = self.get_weights()
state["cur_epsilon"] = self.cur_epsilon
return state
@override(Policy)
def set_state(self, state):
self.set_weights(state)
self.set_epsilon(state["cur_epsilon"])
def update_target(self):
self.target_model.load_state_dict(self.model.state_dict())
if self.mixer is not None:
self.target_mixer.load_state_dict(self.mixer.state_dict())
logger.debug("Updated target networks")
def set_epsilon(self, epsilon):
self.cur_epsilon = epsilon
def _get_group_rewards(self, info_batch):
group_rewards = np.array([
info.get(GROUP_REWARDS, [0.0] * self.n_agents)
for info in info_batch
])
return group_rewards
def _device_dict(self, state_dict):
return {
k: th.as_tensor(v, device=self.device)
for k, v in state_dict.items()
}
@staticmethod
def _cpu_dict(state_dict):
return {k: v.cpu().detach().numpy() for k, v in state_dict.items()}
def _unpack_observation(self, obs_batch):
"""Unpacks the observation, action mask, and state (if present)
from agent grouping.
Returns:
obs (np.ndarray): obs tensor of shape [B, n_agents, obs_size]
mask (np.ndarray): action mask, if any
state (np.ndarray or None): state tensor of shape [B, state_size]
or None if it is not in the batch
"""
unpacked = _unpack_obs(
np.array(obs_batch, dtype=np.float32),
self.observation_space.original_space,
tensorlib=np)
if self.has_action_mask:
obs = np.concatenate(
[o["obs"] for o in unpacked],
axis=1).reshape([len(obs_batch), self.n_agents, self.obs_size])
action_mask = np.concatenate(
[o["action_mask"] for o in unpacked], axis=1).reshape(
[len(obs_batch), self.n_agents, self.n_actions])
else:
if isinstance(unpacked[0], dict):
unpacked_obs = [u["obs"] for u in unpacked]
else:
unpacked_obs = unpacked
obs = np.concatenate(
unpacked_obs,
axis=1).reshape([len(obs_batch), self.n_agents, self.obs_size])
action_mask = np.ones(
[len(obs_batch), self.n_agents, self.n_actions],
dtype=np.float32)
if self.has_env_global_state:
state = unpacked[0][ENV_STATE]
else:
state = None
return obs, action_mask, state
def _validate(obs_space, action_space):
if not hasattr(obs_space, "original_space") or \
not isinstance(obs_space.original_space, Tuple):
raise ValueError("Obs space must be a Tuple, got {}. Use ".format(
obs_space) + "MultiAgentEnv.with_agent_groups() to group related "
"agents for QMix.")
if not isinstance(action_space, Tuple):
raise ValueError(
"Action space must be a Tuple, got {}. ".format(action_space) +
"Use MultiAgentEnv.with_agent_groups() to group related "
"agents for QMix.")
if not isinstance(action_space.spaces[0], Discrete):
raise ValueError(
"QMix requires a discrete action space, got {}".format(
action_space.spaces[0]))
if len({str(x) for x in obs_space.original_space.spaces}) > 1:
raise ValueError(
"Implementation limitation: observations of grouped agents "
"must be homogeneous, got {}".format(
obs_space.original_space.spaces))
if len({str(x) for x in action_space.spaces}) > 1:
raise ValueError(
"Implementation limitation: action space of grouped agents "
"must be homogeneous, got {}".format(action_space.spaces))
def _mac(model, obs, h):
"""Forward pass of the multi-agent controller.
Arguments:
model: TorchModelV2 class
obs: Tensor of shape [B, n_agents, obs_size]
h: List of tensors of shape [B, n_agents, h_size]
Returns:
q_vals: Tensor of shape [B, n_agents, n_actions]
h: Tensor of shape [B, n_agents, h_size]
"""
B, n_agents = obs.size(0), obs.size(1)
if not isinstance(obs, dict):
obs = {"obs": obs}
obs_agents_as_batches = {k: _drop_agent_dim(v) for k, v in obs.items()}
h_flat = [s.reshape([B * n_agents, -1]) for s in h]
q_flat, h_flat = model(obs_agents_as_batches, h_flat, None)
return q_flat.reshape(
[B, n_agents, -1]), [s.reshape([B, n_agents, -1]) for s in h_flat]
def _unroll_mac(model, obs_tensor):
"""Computes the estimated Q values for an entire trajectory batch"""
B = obs_tensor.size(0)
T = obs_tensor.size(1)
n_agents = obs_tensor.size(2)
mac_out = []
h = [s.expand([B, n_agents, -1]) for s in model.get_initial_state()]
for t in range(T):
q, h = _mac(model, obs_tensor[:, t], h)
mac_out.append(q)
mac_out = th.stack(mac_out, dim=1) # Concat over time
return mac_out
def _drop_agent_dim(T):
shape = list(T.shape)
B, n_agents = shape[0], shape[1]
return T.reshape([B * n_agents] + shape[2:])
def _add_agent_dim(T, n_agents):
shape = list(T.shape)
B = shape[0] // n_agents
assert shape[0] % n_agents == 0
return T.reshape([B, n_agents] + shape[1:])
|
|
from pyramid.view import view_config
from pyramid.renderers import render
import deform
import colander
from pyramid.response import Response
import pyramid.httpexceptions as exc
from pyramid.view import (
view_config,
forbidden_view_config,
)
from pyramid.security import (
remember,
forget,
)
from .security import USERS, PASS
from crypt import crypt
def render_form(request,form, appstruct=colander.null, submitted='submit', success=None, readonly=False):
captured = None
if submitted in request.POST:
# the request represents a form submission
try:
# try to validate the submitted values
controls = request.POST.items()
captured = form.validate(controls)
if success:
response = success(captured['text'],captured['lang'])
if response is not None:
return response
html = form.render(captured)
except deform.ValidationFailure as e:
# the submitted values could not be validated
html = e.render()
else:
# the request requires a simple form rendering
html = form.render(appstruct, readonly=readonly)
if request.is_xhr:
return Response(html)
# values passed to template for rendering
return {
'form':html
}
@view_config(route_name='home', renderer='templates/login.jinja2')
@forbidden_view_config(renderer='templates/login.jinja2')
def my_view(request):
login_url = request.route_url('home')
referrer = request.url
if referrer == login_url:
referrer = '/' # never use the login form itself as came_from
came_from = request.params.get('came_from', referrer)
message = ''
login = ''
password = ''
if 'form.submitted' in request.POST:
login = request.POST['login']
password = request.POST['password']
if crypt(password,PASS.get(login))==PASS.get(login):
headers = remember(request, login)
return exc.HTTPFound(location = '/paste',
headers = headers)
message = 'Failed login'
return dict(
message = message,
url = request.application_url + '/',
came_from = came_from,
login = login,
password = password,
)
@view_config(route_name='paste', renderer='templates/paste.jinja2', permission='edit')
def paste(request):
choices = (
('abap','ABAP'),
('ada','Ada'),
('ahk','autohotkey'),
('antlr','ANTLR'),
('antlr-as','ANTLR With ActionScript Target'),
('antlr-cpp','ANTLR With CPP Target'),
('antlr-csharp','ANTLR With C# Target'),
('antlr-java','ANTLR With Java Target'),
('antlr-objc','ANTLR With ObjectiveC Target'),
('antlr-perl','ANTLR With Perl Target'),
('antlr-python','ANTLR With Python Target'),
('antlr-ruby','ANTLR With Ruby Target'),
('apacheconf','ApacheConf'),
('applescript','AppleScript'),
('as','ActionScript'),
('as3','ActionScript 3'),
('aspectj','AspectJ'),
('aspx-cs','aspx-cs'),
('aspx-vb','aspx-vb'),
('asy','Asymptote'),
('autoit','AutoIt'),
('awk','Awk'),
('basemake','Base Makefile'),
('bash','Bash'),
('bat','Batchfile'),
('bbcode','BBCode'),
('befunge','Befunge'),
('blitzmax','BlitzMax'),
('boo','Boo'),
('brainfuck','Brainfuck'),
('bro','Bro'),
('bugs','BUGS'),
('c','C'),
('ca65','ca65'),
('cbmbas','CBM BASIC V2'),
('ceylon','Ceylon'),
('cfengine3','CFEngine3'),
('cfm','Coldfusion HTML'),
('cfs','cfstatement'),
('cheetah','Cheetah'),
('Clipper','FoxPro'),
('clojure','Clojure'),
('cmake','CMake'),
('c-objdump','c-objdump'),
('cobol','COBOL'),
('cobolfree','COBOLFree'),
('coffee-script','CoffeeScript'),
('common-lisp','Common Lisp'),
('console','Bash Session'),
('control','Debian Control file'),
('coq','Coq'),
('cpp','C++'),
('cpp-objdump','cpp-objdump'),
('croc','Croc'),
('csharp','C#'),
('css','CSS'),
('css+django','CSS+Django/Jinja'),
('css+erb','CSS+Ruby'),
('css+genshitext','CSS+Genshi Text'),
('css+lasso','CSS+Lasso'),
('css+mako','CSS+Mako'),
('css+myghty','CSS+Myghty'),
('css+php','CSS+PHP'),
('css+smarty','CSS+Smarty'),
('Cucumber','Gherkin'),
('cuda','CUDA'),
('cython','Cython'),
('d','D'),
('dart','Dart'),
('delphi','Delphi'),
('dg','dg'),
('diff','Diff'),
('django','Django/Jinja'),
('d-objdump','d-objdump'),
('dpatch','Darcs Patch'),
('dtd','DTD'),
('duel','Duel'),
('dylan','Dylan'),
('dylan-console','Dylan session'),
('dylan-lid','DylanLID'),
('ec','eC'),
('ecl','ECL'),
('elixir','Elixir'),
('erb','ERB'),
('erl','Erlang erl session'),
('erlang','Erlang'),
('evoque','Evoque'),
('factor','Factor'),
('fan','Fantom'),
('fancy','Fancy'),
('felix','Felix'),
('fortran','Fortran'),
('fsharp','FSharp'),
('gas','GAS'),
('genshi','Genshi'),
('genshitext','Genshi Text'),
('glsl','GLSL'),
('gnuplot','Gnuplot'),
('go','Go'),
('gooddata-cl','GoodData-CL'),
('gosu','Gosu'),
('groff','Groff'),
('groovy','Groovy'),
('gst','Gosu Template'),
('haml','Haml'),
('haskell','Haskell'),
('haxeml','Hxml'),
('html','HTML'),
('html+cheetah','HTML+Cheetah'),
('html+django','HTML+Django/Jinja'),
('html+evoque','HTML+Evoque'),
('html+genshi','HTML+Genshi'),
('html+lasso','HTML+Lasso'),
('html+mako','HTML+Mako'),
('html+myghty','HTML+Myghty'),
('html+php','HTML+PHP'),
('html+smarty','HTML+Smarty'),
('html+velocity','HTML+Velocity'),
('http','HTTP'),
('hx','haXe'),
('hybris','Hybris'),
('idl','IDL'),
('iex','Elixir iex session'),
('ini','INI'),
('io','Io'),
('ioke','Ioke'),
('irc','IRC logs'),
('jade','Jade'),
('jags','JAGS'),
('java','Java'),
('jlcon','Julia console'),
('js','JavaScript'),
('js+cheetah','JavaScript+Cheetah'),
('js+django','JavaScript+Django/Jinja'),
('js+erb','JavaScript+Ruby'),
('js+genshitext','JavaScript+Genshi Text'),
('js+lasso','JavaScript+Lasso'),
('js+mako','JavaScript+Mako'),
('js+myghty','JavaScript+Myghty'),
('js+php','JavaScript+PHP'),
('js+smarty','JavaScript+Smarty'),
('json','JSON'),
('jsp','Java Server Page'),
('julia','Julia'),
('kconfig','Kconfig'),
('koka','Koka'),
('kotlin','Kotlin'),
('lasso','Lasso'),
('lhs','Literate Haskell'),
('lighty','Lighttpd configuration file'),
('live-script','LiveScript'),
('llvm','LLVM'),
('logos','Logos'),
('logtalk','Logtalk'),
('lua','Lua'),
('make','Makefile'),
('mako','Mako'),
('maql','MAQL'),
('mason','Mason'),
('matlab','Matlab'),
('matlabsession','Matlab session'),
('minid','MiniD'),
('modelica','Modelica'),
('modula2','Modula-2'),
('monkey','Monkey'),
('moocode','MOOCode'),
('moon','MoonScript'),
('mscgen','Mscgen'),
('mupad','MuPAD'),
('mxml','MXML'),
('myghty','Myghty'),
('mysql','MySQL'),
('nasm','NASM'),
('nemerle','Nemerle'),
('newlisp','NewLisp'),
('newspeak','Newspeak'),
('nginx','Nginx configuration file'),
('nimrod','Nimrod'),
('nsis','NSIS'),
('numpy','NumPy'),
('objdump','objdump'),
('objective-c','Objective-C'),
('objective-c++','Objective-C++'),
('objective-j','Objective-J'),
('ocaml','OCaml'),
('octave','Octave'),
('ooc','Ooc'),
('opa','Opa'),
('openedge','OpenEdge ABL'),
('perl','Perl'),
('php','PHP'),
('plpgsql','PL/pgSQL'),
('postgresql','PostgreSQL SQL dialect'),
('postscript','PostScript'),
('pot','Gettext Catalog'),
('pov','POVRay'),
('powershell','PowerShell'),
('prolog','Prolog'),
('properties','Properties'),
('protobuf','Protocol Buffer'),
('psql','PostgreSQL console (psql)'),
('puppet','Puppet'),
('py3tb','Python 3.0 Traceback'),
('pycon','Python console session'),
('pypylog','PyPy Log'),
('pytb','Python Traceback'),
('python','Python'),
('python3','Python 3'),
('qml','QML'),
('racket','Racket'),
('ragel','Ragel'),
('ragel-c','Ragel in C Host'),
('ragel-cpp','Ragel in CPP Host'),
('ragel-d','Ragel in D Host'),
('ragel-em','Embedded Ragel'),
('ragel-java','Ragel in Java Host'),
('ragel-objc','Ragel in Objective C Host'),
('ragel-ruby','Ragel in Ruby Host'),
('raw','Raw token data'),
('rb','Ruby'),
('rbcon','Ruby irb session'),
('rconsole','RConsole'),
('rd','Rd'),
('rebol','REBOL'),
('redcode','Redcode'),
('registry','reg'),
('rhtml','RHTML'),
('RobotFramework','RobotFramework'),
('rst','reStructuredText'),
('rust','Rust'),
('sass','Sass'),
('scala','Scala'),
('scaml','Scaml'),
('scheme','Scheme'),
('scilab','Scilab'),
('scss','SCSS'),
('shell-session','Shell Session'),
('smali','Smali'),
('smalltalk','Smalltalk'),
('smarty','Smarty'),
('sml','Standard ML'),
('snobol','Snobol'),
('sourceslist','Debian Sourcelist'),
('sp','SourcePawn'),
('spec','RPMSpec'),
('splus','S'),
('sql','SQL'),
('sqlite3','sqlite3con'),
('squidconf','SquidConf'),
('ssp','Scalate Server Page'),
('stan','Stan'),
('systemverilog','systemverilog'),
('tcl','Tcl'),
('tcsh','Tcsh'),
('tea','Tea'),
('tex','TeX'),
('text','Text only'),
('trac-wiki','MoinMoin/Trac Wiki markup'),
('treetop','Treetop'),
('ts','TypeScript'),
('urbiscript','UrbiScript'),
('vala','Vala'),
('vb.net','VB.net'),
('velocity','Velocity'),
('verilog','verilog'),
('vgl','VGL'),
('vhdl','vhdl'),
('vim','VimL'),
('xml','XML'),
('xml+cheetah','XML+Cheetah'),
('xml+django','XML+Django/Jinja'),
('xml+erb','XML+Ruby'),
('xml+evoque','XML+Evoque'),
('xml+lasso','XML+Lasso'),
('xml+mako','XML+Mako'),
('xml+myghty','XML+Myghty'),
('xml+php','XML+PHP'),
('xml+smarty','XML+Smarty'),
('xml+velocity','XML+Velocity'),
('xquery','XQuery'),
('xslt','XSLT'),
('xtend','Xtend'),
('yaml','YAML'),
)
class Schema(colander.Schema):
lang = colander.SchemaNode(
colander.String(),
title='Language',
widget=deform.widget.SelectWidget(values=choices),
default='python'
)
text = colander.SchemaNode(
colander.String(),
validator=colander.Length(max=10000),
widget=deform.widget.TextAreaWidget(rows=20, cols=85),
description='Enter some text')
schema = Schema()
options = """
{success:
function (rText, sText, xhr, form) {
var loc = xhr.getResponseHeader('X-Relocate');
if (loc) {
document.location = loc;
};
}
}
"""
def succeed(text,lang):
id = make_paste(text,lang)
location = '/view/' + id
# To appease jquery 1.6+, we need to return something that smells
# like HTML, or we get a "Node cannot be inserted at the
# specified point in the hierarchy" Javascript error. This didn't
# used to be required under JQuery 1.4.
#return Response(
# '<div>Please wait</div>',
# headers=[('X-Relocate', location), ('Content-Type','text/html')]
# )
raise exc.HTTPFound(location)
form = deform.Form(schema, buttons=('submit',), use_ajax=True,
ajax_options=options)
return render_form(request,form, success=succeed)
@view_config(route_name='view', renderer='templates/view.jinja2')
def view(request):
id = request.matchdict.get('id', -1)
if os.path.exists('simplewebpaste/pastes/%s' % id):
with open('simplewebpaste/pastes/%s' % id, 'rb') as f:
html = f.read().decode('utf-8')
f.close()
else:
html = ''
return({'html':html})
import tempfile
import pygments
#Python 2 doesn't like this??
#import pygments.lexers as p_lexers
from pygments.lexers import get_lexer_by_name
import pygments.formatters as p_formatters
import pygments.styles as p_styles
import irc_highlight
import base64
import datetime
import os
import os.path
import sys
STYLE = 'monokai'
def generate_token():
now = datetime.datetime.now()
stamp = now.strftime('%y%m%d.%H%M%S.')
key = base64.b32encode(open('/dev/urandom', 'rb').read(5)).decode('latin-1').lower()
return stamp+key
def upload(data, destpath):
with open('simplewebpaste/pastes/%s' % destpath, 'wb') as f:
f.write(data)
def format_text(config, text):
if config['in_lang'] == 'irc':
return irc_highlight.highlight(text)
else:
lexer = get_lexer_by_name(config['in_lang'])
style = p_styles.get_style_by_name(STYLE)
formatter = p_formatters.HtmlFormatter(linenos=config['linenos'], cssclass='pb', style=style)
html = pygments.highlight(text, lexer, formatter)
return html
def make_paste(data,lang):
configuration = {
'in_lang': lang,
'linenos': 'table',
}
result = format_text(configuration, data)
output = render('/templates/render.jinja2',{'conf':configuration, 'text':result})
token = generate_token()
upload(output.encode('utf-8'), token)
upload(data.encode('utf-8'), 'raw/%s' % token)
return(token)
|
|
from django.forms import models as model_forms
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponseRedirect
from django.views.generic.base import TemplateResponseMixin, ContextMixin, View
from django.views.generic.detail import (SingleObjectMixin,
SingleObjectTemplateResponseMixin, BaseDetailView)
class FormMixin(ContextMixin):
"""
A mixin that provides a way to show and handle a form in a request.
"""
initial = {}
form_class = None
success_url = None
def get_initial(self):
"""
Returns the initial data to use for forms on this view.
"""
return self.initial.copy()
def get_form_class(self):
"""
Returns the form class to use in this view
"""
return self.form_class
def get_form(self, form_class):
"""
Returns an instance of the form to be used in this view.
"""
return form_class(**self.get_form_kwargs())
def get_form_kwargs(self):
"""
Returns the keyword arguments for instanciating the form.
"""
kwargs = {'initial': self.get_initial()}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def get_success_url(self):
if self.success_url:
url = self.success_url
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
return url
def form_valid(self, form):
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
return self.render_to_response(self.get_context_data(form=form))
class ModelFormMixin(FormMixin, SingleObjectMixin):
"""
A mixin that provides a way to show and handle a modelform in a request.
"""
def get_form_class(self):
"""
Returns the form class to use in this view
"""
if self.form_class:
return self.form_class
else:
if self.model is not None:
# If a model has been explicitly provided, use it
model = self.model
elif hasattr(self, 'object') and self.object is not None:
# If this view is operating on a single object, use
# the class of that object
model = self.object.__class__
else:
# Try to get a queryset and extract the model class
# from that
model = self.get_queryset().model
return model_forms.modelform_factory(model)
def get_form_kwargs(self):
"""
Returns the keyword arguments for instanciating the form.
"""
kwargs = super(ModelFormMixin, self).get_form_kwargs()
kwargs.update({'instance': self.object})
return kwargs
def get_success_url(self):
if self.success_url:
url = self.success_url % self.object.__dict__
else:
try:
url = self.object.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured(
"No URL to redirect to. Either provide a url or define"
" a get_absolute_url method on the Model.")
return url
def form_valid(self, form):
self.object = form.save()
return super(ModelFormMixin, self).form_valid(form)
def get_context_data(self, **kwargs):
context = {}
if self.object:
context['object'] = self.object
context_object_name = self.get_context_object_name(self.object)
if context_object_name:
context[context_object_name] = self.object
context.update(kwargs)
return super(ModelFormMixin, self).get_context_data(**context)
class ProcessFormView(View):
"""
A mixin that processes a form on POST.
"""
def get(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
return self.render_to_response(self.get_context_data(form=form))
def post(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
# PUT is a valid HTTP verb for creating (with a known URL) or editing an
# object, note that browsers only support POST for now.
def put(self, *args, **kwargs):
return self.post(*args, **kwargs)
class BaseFormView(FormMixin, ProcessFormView):
"""
A base view for displaying a form
"""
class FormView(TemplateResponseMixin, BaseFormView):
"""
A view for displaying a form, and rendering a template response.
"""
class BaseCreateView(ModelFormMixin, ProcessFormView):
"""
Base view for creating an new object instance.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = None
return super(BaseCreateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = None
return super(BaseCreateView, self).post(request, *args, **kwargs)
class CreateView(SingleObjectTemplateResponseMixin, BaseCreateView):
"""
View for creating an new object instance,
with a response rendered by template.
"""
template_name_suffix = '_form'
class BaseUpdateView(ModelFormMixin, ProcessFormView):
"""
Base view for updating an existing object.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseUpdateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseUpdateView, self).post(request, *args, **kwargs)
class UpdateView(SingleObjectTemplateResponseMixin, BaseUpdateView):
"""
View for updating an object,
with a response rendered by template..
"""
template_name_suffix = '_form'
class DeletionMixin(object):
"""
A mixin providing the ability to delete objects
"""
success_url = None
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.delete()
return HttpResponseRedirect(self.get_success_url())
# Add support for browsers which only accept GET and POST for now.
def post(self, *args, **kwargs):
return self.delete(*args, **kwargs)
def get_success_url(self):
if self.success_url:
return self.success_url
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
class BaseDeleteView(DeletionMixin, BaseDetailView):
"""
Base view for deleting an object.
Using this base class requires subclassing to provide a response mixin.
"""
class DeleteView(SingleObjectTemplateResponseMixin, BaseDeleteView):
"""
View for deleting an object retrieved with `self.get_object()`,
with a response rendered by template.
"""
template_name_suffix = '_confirm_delete'
|
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 1337
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
|
"""Detaches any ISO file (if any) currently attached to a virtual machine."""
from baseCmd import *
from baseResponse import *
class detachIsoCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""The ID of the virtual machine"""
"""Required"""
self.virtualmachineid = None
self.typeInfo['virtualmachineid'] = 'uuid'
self.required = ["virtualmachineid", ]
class detachIsoResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the ID of the virtual machine"""
self.id = None
self.typeInfo['id'] = 'string'
"""the account associated with the virtual machine"""
self.account = None
self.typeInfo['account'] = 'string'
"""the number of cpu this virtual machine is running with"""
self.cpunumber = None
self.typeInfo['cpunumber'] = 'integer'
"""the speed of each cpu"""
self.cpuspeed = None
self.typeInfo['cpuspeed'] = 'integer'
"""the amount of the vm's CPU currently used"""
self.cpuused = None
self.typeInfo['cpuused'] = 'string'
"""the date when this virtual machine was created"""
self.created = None
self.typeInfo['created'] = 'date'
"""Vm details in key/value pairs."""
self.details = None
self.typeInfo['details'] = 'map'
"""the read (io) of disk on the vm"""
self.diskioread = None
self.typeInfo['diskioread'] = 'long'
"""the write (io) of disk on the vm"""
self.diskiowrite = None
self.typeInfo['diskiowrite'] = 'long'
"""the read (bytes) of disk on the vm"""
self.diskkbsread = None
self.typeInfo['diskkbsread'] = 'long'
"""the write (bytes) of disk on the vm"""
self.diskkbswrite = None
self.typeInfo['diskkbswrite'] = 'long'
"""the ID of the disk offering of the virtual machine"""
self.diskofferingid = None
self.typeInfo['diskofferingid'] = 'string'
"""the name of the disk offering of the virtual machine"""
self.diskofferingname = None
self.typeInfo['diskofferingname'] = 'string'
"""user generated name. The name of the virtual machine is returned if no displayname exists."""
self.displayname = None
self.typeInfo['displayname'] = 'string'
"""an optional field whether to the display the vm to the end user or not."""
self.displayvm = None
self.typeInfo['displayvm'] = 'boolean'
"""the name of the domain in which the virtual machine exists"""
self.domain = None
self.typeInfo['domain'] = 'string'
"""the ID of the domain in which the virtual machine exists"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""the virtual network for the service offering"""
self.forvirtualnetwork = None
self.typeInfo['forvirtualnetwork'] = 'boolean'
"""the group name of the virtual machine"""
self.group = None
self.typeInfo['group'] = 'string'
"""the group ID of the virtual machine"""
self.groupid = None
self.typeInfo['groupid'] = 'string'
"""Os type ID of the virtual machine"""
self.guestosid = None
self.typeInfo['guestosid'] = 'string'
"""true if high-availability is enabled, false otherwise"""
self.haenable = None
self.typeInfo['haenable'] = 'boolean'
"""the ID of the host for the virtual machine"""
self.hostid = None
self.typeInfo['hostid'] = 'string'
"""the name of the host for the virtual machine"""
self.hostname = None
self.typeInfo['hostname'] = 'string'
"""the hypervisor on which the template runs"""
self.hypervisor = None
self.typeInfo['hypervisor'] = 'string'
"""instance name of the user vm; this parameter is returned to the ROOT admin only"""
self.instancename = None
self.typeInfo['instancename'] = 'string'
"""true if vm contains XS tools inorder to support dynamic scaling of VM cpu/memory."""
self.isdynamicallyscalable = None
self.typeInfo['isdynamicallyscalable'] = 'boolean'
"""an alternate display text of the ISO attached to the virtual machine"""
self.isodisplaytext = None
self.typeInfo['isodisplaytext'] = 'string'
"""the ID of the ISO attached to the virtual machine"""
self.isoid = None
self.typeInfo['isoid'] = 'string'
"""the name of the ISO attached to the virtual machine"""
self.isoname = None
self.typeInfo['isoname'] = 'string'
"""ssh key-pair"""
self.keypair = None
self.typeInfo['keypair'] = 'string'
"""the memory allocated for the virtual machine"""
self.memory = None
self.typeInfo['memory'] = 'integer'
"""the name of the virtual machine"""
self.name = None
self.typeInfo['name'] = 'string'
"""the incoming network traffic on the vm"""
self.networkkbsread = None
self.typeInfo['networkkbsread'] = 'long'
"""the outgoing network traffic on the host"""
self.networkkbswrite = None
self.typeInfo['networkkbswrite'] = 'long'
"""OS type id of the vm"""
self.ostypeid = None
self.typeInfo['ostypeid'] = 'long'
"""the password (if exists) of the virtual machine"""
self.password = None
self.typeInfo['password'] = 'string'
"""true if the password rest feature is enabled, false otherwise"""
self.passwordenabled = None
self.typeInfo['passwordenabled'] = 'boolean'
"""the project name of the vm"""
self.project = None
self.typeInfo['project'] = 'string'
"""the project id of the vm"""
self.projectid = None
self.typeInfo['projectid'] = 'string'
"""public IP address id associated with vm via Static nat rule"""
self.publicip = None
self.typeInfo['publicip'] = 'string'
"""public IP address id associated with vm via Static nat rule"""
self.publicipid = None
self.typeInfo['publicipid'] = 'string'
"""device ID of the root volume"""
self.rootdeviceid = None
self.typeInfo['rootdeviceid'] = 'long'
"""device type of the root volume"""
self.rootdevicetype = None
self.typeInfo['rootdevicetype'] = 'string'
"""the ID of the service offering of the virtual machine"""
self.serviceofferingid = None
self.typeInfo['serviceofferingid'] = 'string'
"""the name of the service offering of the virtual machine"""
self.serviceofferingname = None
self.typeInfo['serviceofferingname'] = 'string'
"""State of the Service from LB rule"""
self.servicestate = None
self.typeInfo['servicestate'] = 'string'
"""the state of the virtual machine"""
self.state = None
self.typeInfo['state'] = 'string'
"""an alternate display text of the template for the virtual machine"""
self.templatedisplaytext = None
self.typeInfo['templatedisplaytext'] = 'string'
"""the ID of the template for the virtual machine. A -1 is returned if the virtual machine was created from an ISO file."""
self.templateid = None
self.typeInfo['templateid'] = 'string'
"""the name of the template for the virtual machine"""
self.templatename = None
self.typeInfo['templatename'] = 'string'
"""the user's ID who deployed the virtual machine"""
self.userid = None
self.typeInfo['userid'] = 'string'
"""the user's name who deployed the virtual machine"""
self.username = None
self.typeInfo['username'] = 'string'
"""the vgpu type used by the virtual machine"""
self.vgpu = None
self.typeInfo['vgpu'] = 'string'
"""the ID of the availablility zone for the virtual machine"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
"""the name of the availability zone for the virtual machine"""
self.zonename = None
self.typeInfo['zonename'] = 'string'
"""list of affinity groups associated with the virtual machine"""
self.affinitygroup = []
"""the list of nics associated with vm"""
self.nic = []
"""list of security groups associated with the virtual machine"""
self.securitygroup = []
"""the list of resource tags associated with vm"""
self.tags = []
"""the ID of the latest async job acting on this object"""
self.jobid = None
self.typeInfo['jobid'] = ''
"""the current status of the latest async job acting on this object"""
self.jobstatus = None
self.typeInfo['jobstatus'] = ''
class affinitygroup:
def __init__(self):
""""the ID of the affinity group"""
self.id = None
""""the account owning the affinity group"""
self.account = None
""""the description of the affinity group"""
self.description = None
""""the domain name of the affinity group"""
self.domain = None
""""the domain ID of the affinity group"""
self.domainid = None
""""the name of the affinity group"""
self.name = None
""""the project name of the affinity group"""
self.project = None
""""the project ID of the affinity group"""
self.projectid = None
""""the type of the affinity group"""
self.type = None
""""virtual machine IDs associated with this affinity group"""
self.virtualmachineIds = None
class nic:
def __init__(self):
""""the ID of the nic"""
self.id = None
""""the broadcast uri of the nic"""
self.broadcasturi = None
""""device id for the network when plugged into the virtual machine"""
self.deviceid = None
""""the gateway of the nic"""
self.gateway = None
""""the IPv6 address of network"""
self.ip6address = None
""""the cidr of IPv6 network"""
self.ip6cidr = None
""""the gateway of IPv6 network"""
self.ip6gateway = None
""""the ip address of the nic"""
self.ipaddress = None
""""true if nic is default, false otherwise"""
self.isdefault = None
""""the isolation uri of the nic"""
self.isolationuri = None
""""true if nic is default, false otherwise"""
self.macaddress = None
""""the netmask of the nic"""
self.netmask = None
""""the ID of the corresponding network"""
self.networkid = None
""""the name of the corresponding network"""
self.networkname = None
""""the Secondary ipv4 addr of nic"""
self.secondaryip = None
""""the traffic type of the nic"""
self.traffictype = None
""""the type of the nic"""
self.type = None
""""Id of the vm to which the nic belongs"""
self.virtualmachineid = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class egressrule:
def __init__(self):
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class ingressrule:
def __init__(self):
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class securitygroup:
def __init__(self):
""""the ID of the security group"""
self.id = None
""""the account owning the security group"""
self.account = None
""""the description of the security group"""
self.description = None
""""the domain name of the security group"""
self.domain = None
""""the domain ID of the security group"""
self.domainid = None
""""the name of the security group"""
self.name = None
""""the project name of the group"""
self.project = None
""""the project id of the group"""
self.projectid = None
""""the number of virtualmachines associated with this securitygroup"""
self.virtualmachinecount = None
""""the list of virtualmachine ids associated with this securitygroup"""
self.virtualmachineids = None
""""the list of egress rules associated with the security group"""
self.egressrule = []
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
""""the list of ingress rules associated with the security group"""
self.ingressrule = []
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
""""the ID of the latest async job acting on this object"""
self.jobid = None
""""the current status of the latest async job acting on this object"""
self.jobstatus = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
|
|
"""This module allows the user to place text on the screen.
"""
# Author: Prabhu Ramachandran <[email protected]>
# Copyright (c) 2005, Enthought, Inc.
# License: BSD Style.
from distutils.version import StrictVersion
# Enthought library imports.
from traits.api import Instance, Range, Str, Bool, Property, \
Float
from traitsui.api import View, Group, Item, InstanceEditor
from tvtk.api import tvtk
from apptools.persistence import state_pickler
# Local imports
from mayavi.core.module import Module
from mayavi.core.pipeline_info import PipelineInfo
VTK_VER = StrictVersion(tvtk.Version().vtk_version)
######################################################################
# `Text` class.
######################################################################
class Text(Module):
# The version of this class. Used for persistence.
__version__ = 0
# The tvtk TextActor.
actor = Instance(tvtk.TextActor, allow_none=False, record=True)
# The property of the axes (color etc.).
property = Property(record=True)
# The text to be displayed. Note that this should really be `Str`
# but wxGTK only returns unicode.
text = Str('Text', desc='the text to be displayed')
# The x-position of this actor.
x_position = Float(0.0, desc='the x-coordinate of the text')
# The y-position of this actor.
y_position = Float(0.0, desc='the y-coordinate of the text')
# The z-position of this actor.
z_position = Float(0.0, desc='the z-coordinate of the text')
# Shadow the positions as ranges for 2D. Simply using a RangeEditor
# does not work as it resets the 3D positions to 1 when the dialog is
# loaded.
_x_position_2d = Range(0., 1., 0., enter_set=True, auto_set=False,
desc='the x-coordinate of the text')
_y_position_2d = Range(0., 1., 0., enter_set=True, auto_set=False,
desc='the y-coordinate of the text')
# 3D position
position_in_3d = Bool(False,
desc='whether the position of the object is given in 2D or in 3D')
# The width of the text.
width = Range(0.0, 1.0, 0.4, enter_set=True, auto_set=False,
desc='the width of the text as a fraction of the viewport')
input_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
########################################
# The view of this object.
if VTK_VER > '5.1':
_text_actor_group = Group(Item(name='visibility'),
Item(name='text_scale_mode'),
Item(name='alignment_point'),
Item(name='minimum_size'),
Item(name='maximum_line_height'),
show_border=True,
label='Text Actor')
else:
_text_actor_group = Group(Item(name='visibility'),
Item(name='scaled_text'),
Item(name='alignment_point'),
Item(name='minimum_size'),
Item(name='maximum_line_height'),
show_border=True,
label='Text Actor')
_position_group_2d = Group(Item(name='_x_position_2d',
label='X position'),
Item(name='_y_position_2d',
label='Y position'),
visible_when='not position_in_3d')
_position_group_3d = Group(Item(name='x_position', label='X',
springy=True),
Item(name='y_position', label='Y',
springy=True),
Item(name='z_position', label='Z',
springy=True),
show_border=True,
label='Position',
orientation='horizontal',
visible_when='position_in_3d')
view = View(Group(Group(Item(name='text'),
Item(name='position_in_3d'),
_position_group_2d,
_position_group_3d,
Item(name='width',
enabled_when='object.actor.scaled_text'),
),
Group(Item(name='actor', style='custom',
editor=\
InstanceEditor(view=View(_text_actor_group))
),
show_labels=False),
label='TextActor',
show_labels=False
),
Group(Item(name='_property', style='custom', resizable=True),
label='TextProperty',
show_labels=False),
)
########################################
# Private traits.
_updating = Bool(False)
_property = Instance(tvtk.TextProperty)
######################################################################
# `object` interface
######################################################################
def __set_pure_state__(self, state):
self._updating = True
state_pickler.set_state(self, state, first=['actor'],
ignore=['_updating'])
self._updating = False
######################################################################
# `Module` interface
######################################################################
def setup_pipeline(self):
"""Override this method so that it *creates* the tvtk
pipeline.
This method is invoked when the object is initialized via
`__init__`. Note that at the time this method is called, the
tvtk data pipeline will *not* yet be setup. So upstream data
will not be available. The idea is that you simply create the
basic objects and setup those parts of the pipeline not
dependent on upstream sources and filters. You should also
set the `actors` attribute up at this point.
"""
actor = self.actor = tvtk.TextActor(input=str(self.text))
if VTK_VER > '5.1':
actor.set(text_scale_mode='prop', width=0.4, height=1.0)
else:
actor.set(scaled_text=True, width=0.4, height=1.0)
c = actor.position_coordinate
c.set(coordinate_system='normalized_viewport',
value=(self.x_position, self.y_position, 0.0))
c = actor.position2_coordinate
c.set(coordinate_system='normalized_viewport')
self._property.opacity = 1.0
self._text_changed(self.text)
self._width_changed(self.width)
self._shadow_positions(True)
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when any of the inputs
sends a `pipeline_changed` event.
"""
self.pipeline_changed = True
def update_data(self):
"""Override this method so that it flushes the vtk pipeline if
that is necessary.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
# Just set data_changed, the component should do the rest.
self.data_changed = True
######################################################################
# Non-public interface
######################################################################
def _text_changed(self, value):
actor = self.actor
if actor is None:
return
if self._updating:
return
actor.input = str(value)
self.render()
def _shadow_positions(self, value):
self.sync_trait('x_position', self, '_x_position_2d',
remove=(not value))
self.sync_trait('y_position', self, '_y_position_2d',
remove=(not value))
if not value:
self._x_position_2d = self.x_position
self._y_position_2d = self.y_position
def _position_in_3d_changed(self, value):
if value:
self.actor.position_coordinate.coordinate_system='world'
self.actor.position2_coordinate.coordinate_system='world'
else:
self.actor.position2_coordinate.coordinate_system=\
'normalized_viewport'
self.actor.position_coordinate.coordinate_system=\
'normalized_viewport'
x = self.x_position
y = self.y_position
if x < 0:
x = 0
elif x > 1:
x = 1
if y < 0:
y = 0
elif y > 1:
y = 1
self.set(x_position=x, y_position=y,
trait_change_notify=False)
self._shadow_positions(not value)
self._change_position()
self.actor._width_changed(self.width, self.width)
self.pipeline_changed = True
def _change_position(self):
""" Callback for _x_position, _y_position, and z_position.
"""
actor = self.actor
if actor is None:
return
if self._updating:
return
x = self.x_position
y = self.y_position
z = self.z_position
if self.position_in_3d:
actor.position_coordinate.value = x, y, z
else:
actor.position = x, y
self.render()
_x_position_changed = _change_position
_y_position_changed = _change_position
_z_position_changed = _change_position
def _width_changed(self, value):
actor = self.actor
if actor is None:
return
if self._updating:
return
actor.width = value
self.render()
def _update_traits(self):
self._updating = True
try:
actor = self.actor
self.text = actor.input
pos = actor.position
self.x_position, self.y_position = pos
self.width = actor.width
finally:
self._updating = False
def _get_property(self):
return self._property
def _actor_changed(self, old, new):
if old is not None:
for obj in (old, self._property):
obj.on_trait_change(self.render, remove=True)
old.on_trait_change(self._update_traits, remove=True)
self._property = new.text_property
for obj in (new, self._property):
obj.on_trait_change(self.render)
new.on_trait_change(self._update_traits)
self.actors = [new]
self.render()
def _foreground_changed_for_scene(self, old, new):
# Change the default color for the actor.
self.property.color = new
self.render()
def _scene_changed(self, old, new):
super(Text, self)._scene_changed(old, new)
self._foreground_changed_for_scene(None, new.foreground)
|
|
# -*- coding: utf-8 -*-
"""
Limiters for high order DG methods
"""
import numpy as nm
from sfepy.discrete.dg.poly_spaces import iter_by_order
from sfepy.discrete.dg.fields import get_raveler, get_unraveler
from sfepy.base.base import output
MACHINE_EPS = 1e-30
def minmod(a, b, c):
"""Minmod function of three variables, returns:
0 , where sign(a) != sign(b) != sign(c)
min(a,b,c) , elsewhere
Parameters
----------
a : array_like
c : array_like
b : array_like
Returns
-------
out : ndarray
"""
seq = (nm.sign(a) == nm.sign(b)) & (nm.sign(b) == nm.sign(c))
res = nm.zeros(nm.shape(a))
res[seq] = nm.sign(a[seq]) * nm.minimum.reduce([nm.abs(b[seq]),
nm.abs(a[seq]),
nm.abs(c[seq])])
return res
def minmod_seq(abc):
"""Minmod function of n variables, returns:
0 , where sign(a_1) != sign(a_2) != ... != sign(a_n)
min(a_1, a_2, a_3, ... , a_n) , elsewhere
Parameters
----------
abc : sequence of array_like
Returns
-------
out : ndarray
"""
seq = nm.hstack([nm.sign(x) for x in abc])
seq = seq[:, 0, None] == seq
seq = seq.prod(axis=1).astype(bool)
res = nm.zeros(nm.shape(abc[0]))
res[seq] = nm.sign(abc[0][seq]) * \
nm.minimum.reduce([nm.abs(x[seq]) for x in abc])
return res
class DGLimiter:
name = "abstract DG limiter"
def __init__(self, field, verbose=False):
self.field = field
self.extended = field.extended
self.n_el_nod = field.n_el_nod
self.n_cell = field.n_cell
self.ravel = get_raveler(self.n_el_nod, self.n_cell)
self.unravel = get_unraveler(self.n_el_nod, self.n_cell)
self.verbose = verbose
output("Setting up limiter: {} for {}.".format(self.name,
self.field.family_name))
def __call__(self, u):
raise NotImplementedError("Called abstract limiter")
class IdentityLimiter(DGLimiter):
"""Neutral limiter returning unchanged solution."""
name = "identity"
def __call__(self, u):
if self.verbose: output(self.name + " limiter")
return u
class MomentLimiter1D(DGLimiter):
""" Moment limiter for 1D based on [1]_
.. [1] Krivodonova (2007):
Limiters for high-order discontinuous Galerkin methods"""
name = "moment_1D_limiter"
def __call__(self, u):
""""
Parameters
----------
u : array_like
raveled solution at time step n in shape (order * n_space_nod, ...)
Returns
-------
u : ndarray
unraveled limited solution
"""
# for convenience do not try to limit FV
if self.n_el_nod == 1:
if self.verbose: output(self.name + " no limiting for FV.")
return u
u = self.unravel(u).swapaxes(0, 1)
idx = nm.arange(nm.shape(u[0, 1:-1])[0])
idx_bc = nm.arange(nm.shape(u[0, :])[0])
nu = nm.copy(u)
tilu = nm.zeros(u.shape[1:])
for ll in range(self.n_el_nod - 1, 0, -1):
tilu[idx] = minmod(nu[ll, 1:-1][idx],
nu[ll - 1, 2:][idx] - nu[ll - 1, 1:-1][idx],
nu[ll - 1, 1:-1][idx] - nu[ll - 1, :-2][idx])
idx = idx[nm.where(abs(tilu[idx] - nu[ll, 1:-1][idx])
> MACHINE_EPS)[0]]
if self.verbose:
output("{} limiting in {} cells out of {} :".
format(self.name, len(idx_bc), self.n_cell))
output(idx_bc)
if len(idx_bc) == 0:
break
nu[ll, 1:-1][idx] = tilu[idx]
return self.ravel(nu.swapaxes(0, 1))[:, 0]
class MomentLimiter2D(DGLimiter):
""" Moment limiter for 2D based on [1]_
.. [1] Krivodonova (2007):
Limiters for high-order discontinuous Galerkin methods"""
name = "moment_limiter_2D"
def __call__(self, u):
"""
Parameters
----------
u : array_like
raveled solution at time step n in shape (order * n_space_nod, ...)
Returns
-------
u : ndarray
unraveled limited solution
"""
if self.n_el_nod == 1:
if self.verbose: output(self.name + " no limiting for FV.")
return u
ex = self.extended
nbrhd_idx = self.field.get_facet_neighbor_idx()
inner_mask = nbrhd_idx[:, :, 0] > 0
idx = nm.where(inner_mask.prod(axis=1))[0]
nbrhd_idx = nbrhd_idx[:, :, 0]
u = self.unravel(u).swapaxes(0, 1)
nu = nm.zeros((self.field.approx_order + 1,) * 2 + u.shape[1:])
tilu = nm.zeros(u.shape[1:])
for ll, (ii, jj) in enumerate(iter_by_order(self.field.approx_order, 2,
extended=ex)):
nu[ii, jj, ...] = u[ll]
for ii, jj in reversed(list(iter_by_order(self.field.approx_order, 2,
extended=ex))):
minmod_args = [nu[ii, jj, idx]]
nbrhs = nbrhd_idx[idx]
if ii - 1 >= 0:
alf = nm.sqrt((2 * ii - 1) / (2 * ii + 1))
# right difference in x axis
dx_r = alf * (nu[ii - 1, jj, nbrhs[:, 1]] - nu[ii - 1, jj, idx])
# left differnce in x axis
dx_l = alf * (nu[ii - 1, jj, idx] - nu[ii - 1, jj, nbrhs[:, 3]])
minmod_args += [dx_r, dx_l]
if jj - 1 >= 0:
alf = nm.sqrt((2 * jj - 1) / (2 * jj + 1))
# right i.e. element "up" difference in y axis
dy_up = alf * (nu[ii, jj - 1, nbrhs[:, 2]] - nu[ii, jj - 1, idx])
# left i.e. element "down" difference in y axis
dy_dn = alf * (nu[ii, jj - 1, idx] - nu[ii, jj - 1, nbrhs[:, 0]])
minmod_args += [dy_up, dy_dn]
tilu[idx] = minmod_seq(minmod_args)
idx = idx[nm.where(abs(tilu[idx] - nu[ii, jj, idx]) > MACHINE_EPS)[0]]
if self.verbose:
output("{} limiting {} in {} cells out of {} :".
format(self.name, (ii, jj), len(idx), self.n_cell))
output(idx)
if len(idx) == 0:
break
nu[ii, jj, idx] = tilu[idx]
resu = nm.zeros(u.shape)
for ll, (ii, jj) in enumerate(iter_by_order(self.field.approx_order, 2,
extended=ex)):
resu[ll] = nu[ii, jj]
return self.ravel(resu.swapaxes(0, 1))[:, 0]
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._virtual_machines_operations import build_capture_request_initial, build_create_or_update_request_initial, build_deallocate_request_initial, build_delete_request_initial, build_generalize_request, build_get_extensions_request, build_get_request, build_list_all_request, build_list_available_sizes_request, build_list_request, build_power_off_request_initial, build_redeploy_request_initial, build_restart_request_initial, build_start_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachinesOperations:
"""VirtualMachinesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2016_03_30.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get_extensions(
self,
resource_group_name: str,
vm_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.VirtualMachineExtensionsListResult":
"""The operation to get all extensions of a Virtual Machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine containing the extension.
:type vm_name: str
:param expand: The expand expression to apply on the operation.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineExtensionsListResult, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2016_03_30.models.VirtualMachineExtensionsListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineExtensionsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_extensions_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
expand=expand,
template_url=self.get_extensions.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineExtensionsListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_extensions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions'} # type: ignore
async def _capture_initial(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachineCaptureParameters",
**kwargs: Any
) -> Optional["_models.VirtualMachineCaptureResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VirtualMachineCaptureResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'VirtualMachineCaptureParameters')
request = build_capture_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._capture_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_capture_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture'} # type: ignore
@distributed_trace_async
async def begin_capture(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachineCaptureParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualMachineCaptureResult"]:
"""Captures the VM by copying virtual hard disks of the VM and outputs a template that can be used
to create similar VMs.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Capture Virtual Machine operation.
:type parameters: ~azure.mgmt.compute.v2016_03_30.models.VirtualMachineCaptureParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachineCaptureResult or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2016_03_30.models.VirtualMachineCaptureResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineCaptureResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._capture_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachineCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_capture.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachine",
**kwargs: Any
) -> "_models.VirtualMachine":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachine"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'VirtualMachine')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachine",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualMachine"]:
"""The operation to create or update a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Create Virtual Machine operation.
:type parameters: ~azure.mgmt.compute.v2016_03_30.models.VirtualMachine
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachine or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2016_03_30.models.VirtualMachine]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachine"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> Optional["_models.OperationStatusResponse"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.OperationStatusResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.OperationStatusResponse"]:
"""The operation to delete a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatusResponse or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatusResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
vm_name: str,
expand: Optional[str] = "instanceView",
**kwargs: Any
) -> "_models.VirtualMachine":
"""Retrieves information about the model view or the instance view of a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param expand: The expand expression to apply on the operation. The default value is
"instanceView".
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachine, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2016_03_30.models.VirtualMachine
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachine"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
expand=expand,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
async def _deallocate_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> Optional["_models.OperationStatusResponse"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.OperationStatusResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_deallocate_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._deallocate_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_deallocate_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate'} # type: ignore
@distributed_trace_async
async def begin_deallocate(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.OperationStatusResponse"]:
"""Shuts down the virtual machine and releases the compute resources. You are not billed for the
compute resources that this virtual machine uses.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatusResponse or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatusResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._deallocate_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_deallocate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate'} # type: ignore
@distributed_trace_async
async def generalize(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> "_models.OperationStatusResponse":
"""Sets the state of the virtual machine to generalized.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OperationStatusResponse, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatusResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_generalize_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self.generalize.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
generalize.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/generalize'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachineListResult"]:
"""Lists all of the virtual machines in the specified resource group. Use the nextLink property in
the response to get the next page of virtual machines.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2016_03_30.models.VirtualMachineListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines'} # type: ignore
@distributed_trace
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachineListResult"]:
"""Lists all of the virtual machines in the specified subscription. Use the nextLink property in
the response to get the next page of virtual machines.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2016_03_30.models.VirtualMachineListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_all_request(
subscription_id=self._config.subscription_id,
template_url=self.list_all.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_all_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachines'} # type: ignore
@distributed_trace
def list_available_sizes(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachineSizeListResult"]:
"""Lists all available virtual machine sizes to which the specified virtual machine can be
resized.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineSizeListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2016_03_30.models.VirtualMachineSizeListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineSizeListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_available_sizes_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self.list_available_sizes.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_available_sizes_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineSizeListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_available_sizes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/vmSizes'} # type: ignore
async def _power_off_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> Optional["_models.OperationStatusResponse"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.OperationStatusResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_power_off_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._power_off_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_power_off_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff'} # type: ignore
@distributed_trace_async
async def begin_power_off(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.OperationStatusResponse"]:
"""The operation to power off (stop) a virtual machine. The virtual machine can be restarted with
the same provisioned resources. You are still charged for this virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatusResponse or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatusResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._power_off_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_power_off.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff'} # type: ignore
async def _restart_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> Optional["_models.OperationStatusResponse"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.OperationStatusResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_restart_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._restart_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_restart_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart'} # type: ignore
@distributed_trace_async
async def begin_restart(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.OperationStatusResponse"]:
"""The operation to restart a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatusResponse or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatusResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._restart_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_restart.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart'} # type: ignore
async def _start_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> Optional["_models.OperationStatusResponse"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.OperationStatusResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_start_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._start_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start'} # type: ignore
@distributed_trace_async
async def begin_start(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.OperationStatusResponse"]:
"""The operation to start a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatusResponse or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatusResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start'} # type: ignore
async def _redeploy_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> Optional["_models.OperationStatusResponse"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.OperationStatusResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_redeploy_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._redeploy_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_redeploy_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy'} # type: ignore
@distributed_trace_async
async def begin_redeploy(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.OperationStatusResponse"]:
"""Shuts down the virtual machine, moves it to a new node, and powers it back on.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatusResponse or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatusResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._redeploy_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_redeploy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy'} # type: ignore
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2011 Midokura KK
# Copyright (C) 2011 Nicira, Inc
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""VIF drivers for libvirt."""
from nova import exception
from nova.network import linux_net
from nova.network import model as network_model
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import designer
LOG = logging.getLogger(__name__)
libvirt_vif_opts = [
# quantum_ovs_bridge is used, if Quantum provides Nova
# the 'vif_type' portbinding field
cfg.StrOpt('libvirt_ovs_bridge',
default='br-int',
help='Name of Integration Bridge used by Open vSwitch'),
cfg.BoolOpt('libvirt_use_virtio_for_bridges',
default=True,
help='Use virtio for bridge interfaces with KVM/QEMU'),
]
CONF = cfg.CONF
CONF.register_opts(libvirt_vif_opts)
CONF.import_opt('libvirt_type', 'nova.virt.libvirt.driver')
CONF.import_opt('use_ipv6', 'nova.netconf')
class LibvirtBaseVIFDriver(object):
def get_vif_devname(self, mapping):
if 'vif_devname' in mapping:
return mapping['vif_devname']
return ("nic" + mapping['vif_uuid'])[:network_model.NIC_NAME_LEN]
def get_config(self, instance, network, mapping):
conf = vconfig.LibvirtConfigGuestInterface()
model = None
driver = None
if (CONF.libvirt_type in ('kvm', 'qemu') and
CONF.libvirt_use_virtio_for_bridges):
model = "virtio"
# Workaround libvirt bug, where it mistakenly
# enables vhost mode, even for non-KVM guests
if CONF.libvirt_type == "qemu":
driver = "qemu"
designer.set_vif_guest_frontend_config(
conf, mapping['mac'], model, driver)
return conf
def plug(self, instance, vif):
pass
def unplug(self, instance, vif):
pass
class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
"""Generic VIF driver for libvirt networking."""
def get_bridge_name(self, network):
return network['bridge']
def get_config_bridge(self, instance, network, mapping):
"""Get VIF configurations for bridge type."""
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance,
network,
mapping)
designer.set_vif_host_backend_bridge_config(
conf, self.get_bridge_name(network),
self.get_vif_devname(mapping))
mac_id = mapping['mac'].replace(':', '')
name = "nova-instance-" + instance['name'] + "-" + mac_id
primary_addr = mapping['ips'][0]['ip']
dhcp_server = ra_server = ipv4_cidr = ipv6_cidr = None
if mapping['dhcp_server']:
dhcp_server = mapping['dhcp_server']
if CONF.use_ipv6:
ra_server = mapping.get('gateway_v6') + "/128"
if CONF.allow_same_net_traffic:
ipv4_cidr = network['cidr']
if CONF.use_ipv6:
ipv6_cidr = network['cidr_v6']
designer.set_vif_host_backend_filter_config(
conf, name, primary_addr, dhcp_server,
ra_server, ipv4_cidr, ipv6_cidr)
return conf
def get_config(self, instance, network, mapping):
vif_type = mapping.get('vif_type')
LOG.debug(_("vif_type=%(vif_type)s instance=%(instance)s "
"network=%(network)s mapping=%(mapping)s")
% locals())
if vif_type is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
if vif_type == network_model.VIF_TYPE_BRIDGE:
return self.get_config_bridge(instance, network, mapping)
else:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
def plug_bridge(self, instance, vif):
"""Ensure that the bridge exists, and add VIF to it."""
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
network, mapping = vif
if (not network.get('multi_host') and
mapping.get('should_create_bridge')):
if mapping.get('should_create_vlan'):
iface = CONF.vlan_interface or network['bridge_interface']
LOG.debug(_('Ensuring vlan %(vlan)s and bridge %(bridge)s'),
{'vlan': network['vlan'],
'bridge': self.get_bridge_name(network)},
instance=instance)
linux_net.LinuxBridgeInterfaceDriver.ensure_vlan_bridge(
network['vlan'],
self.get_bridge_name(network),
iface)
else:
iface = CONF.flat_interface or network['bridge_interface']
LOG.debug(_("Ensuring bridge %s"),
self.get_bridge_name(network), instance=instance)
linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(
self.get_bridge_name(network),
iface)
def plug(self, instance, vif):
network, mapping = vif
vif_type = mapping.get('vif_type')
LOG.debug(_("vif_type=%(vif_type)s instance=%(instance)s "
"network=%(network)s mapping=%(mapping)s")
% locals())
if vif_type is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
if vif_type == network_model.VIF_TYPE_BRIDGE:
self.plug_bridge(instance, vif)
else:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
def unplug_bridge(self, instance, vif):
"""No manual unplugging required."""
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
def unplug(self, instance, vif):
network, mapping = vif
vif_type = mapping.get('vif_type')
LOG.debug(_("vif_type=%(vif_type)s instance=%(instance)s "
"network=%(network)s mapping=%(mapping)s")
% locals())
if vif_type is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
if vif_type == network_model.VIF_TYPE_BRIDGE:
self.unplug_bridge(instance, vif)
else:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
class LibvirtBridgeDriver(LibvirtGenericVIFDriver):
"""Deprecated in favour of LibvirtGenericVIFDriver.
Retained in Grizzly for compatibility with Quantum
drivers which do not yet report 'vif_type' port binding.
To be removed in Hxxxx."""
def __init__(self):
LOG.deprecated(
_("LibvirtBridgeDriver is deprecated and "
"will be removed in the Hxxxx release. Please "
"update the 'libvirt_vif_driver' config parameter "
"to use the LibvirtGenericVIFDriver class instead"))
def get_config(self, instance, network, mapping):
return self.get_config_bridge(instance, network, mapping)
def plug(self, instance, vif):
self.plug_bridge(instance, vif)
def unplug(self, instance, vif):
self.unplug_bridge(instance, vif)
class LibvirtOpenVswitchDriver(LibvirtBaseVIFDriver):
"""VIF driver for Open vSwitch that uses libivrt type='ethernet'
Used for libvirt versions that do not support
OVS virtual port XML (0.9.10 or earlier).
"""
def get_bridge_name(self, network):
return network.get('bridge') or CONF.libvirt_ovs_bridge
def get_ovs_interfaceid(self, mapping):
return mapping.get('ovs_interfaceid') or mapping['vif_uuid']
def get_config(self, instance, network, mapping):
dev = self.get_vif_devname(mapping)
conf = super(LibvirtOpenVswitchDriver,
self).get_config(instance,
network,
mapping)
designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
def plug(self, instance, vif):
network, mapping = vif
iface_id = self.get_ovs_interfaceid(mapping)
dev = self.get_vif_devname(mapping)
linux_net.create_tap_dev(dev)
linux_net.create_ovs_vif_port(self.get_bridge_name(network),
dev, iface_id, mapping['mac'],
instance['uuid'])
def unplug(self, instance, vif):
"""Unplug the VIF by deleting the port from the bridge."""
try:
network, mapping = vif
linux_net.delete_ovs_vif_port(self.get_bridge_name(network),
self.get_vif_devname(mapping))
except exception.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver):
"""VIF driver that uses OVS + Linux Bridge for iptables compatibility.
Enables the use of OVS-based Quantum plugins while at the same
time using iptables-based filtering, which requires that vifs be
plugged into a linux bridge, not OVS. IPtables filtering is useful for
in particular for Nova security groups.
"""
def get_br_name(self, iface_id):
return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN]
def get_veth_pair_names(self, iface_id):
return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN],
("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN])
def get_bridge_name(self, network):
return network.get('bridge') or CONF.libvirt_ovs_bridge
def get_ovs_interfaceid(self, mapping):
return mapping.get('ovs_interfaceid') or mapping['vif_uuid']
def get_config(self, instance, network, mapping):
br_name = self.get_br_name(mapping['vif_uuid'])
network['bridge'] = br_name
return super(LibvirtHybridOVSBridgeDriver,
self).get_config(instance,
network,
mapping)
def plug(self, instance, vif):
"""Plug using hybrid strategy
Create a per-VIF linux bridge, then link that bridge to the OVS
integration bridge via a veth device, setting up the other end
of the veth device just like a normal OVS port. Then boot the
VIF on the linux bridge using standard libvirt mechanisms
"""
network, mapping = vif
iface_id = self.get_ovs_interfaceid(mapping)
br_name = self.get_br_name(mapping['vif_uuid'])
v1_name, v2_name = self.get_veth_pair_names(mapping['vif_uuid'])
if not linux_net.device_exists(br_name):
utils.execute('brctl', 'addbr', br_name, run_as_root=True)
if not linux_net.device_exists(v2_name):
linux_net._create_veth_pair(v1_name, v2_name)
utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True)
utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True)
linux_net.create_ovs_vif_port(self.get_bridge_name(network),
v2_name, iface_id, mapping['mac'],
instance['uuid'])
def unplug(self, instance, vif):
"""UnPlug using hybrid strategy
Unhook port from OVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
try:
network, mapping = vif
br_name = self.get_br_name(mapping['vif_uuid'])
v1_name, v2_name = self.get_veth_pair_names(mapping['vif_uuid'])
utils.execute('brctl', 'delif', br_name, v1_name, run_as_root=True)
utils.execute('ip', 'link', 'set', br_name, 'down',
run_as_root=True)
utils.execute('brctl', 'delbr', br_name, run_as_root=True)
linux_net.delete_ovs_vif_port(self.get_bridge_name(network),
v2_name)
except exception.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
class LibvirtOpenVswitchVirtualPortDriver(LibvirtBaseVIFDriver):
"""VIF driver for Open vSwitch that uses integrated libvirt
OVS virtual port XML (introduced in libvirt 0.9.11)."""
def get_bridge_name(self, network):
return network.get('bridge') or CONF.libvirt_ovs_bridge
def get_ovs_interfaceid(self, mapping):
return mapping.get('ovs_interfaceid') or mapping['vif_uuid']
def get_config(self, instance, network, mapping):
"""Pass data required to create OVS virtual port element."""
conf = super(LibvirtOpenVswitchVirtualPortDriver,
self).get_config(instance,
network,
mapping)
designer.set_vif_host_backend_ovs_config(
conf, self.get_bridge_name(network),
self.get_ovs_interfaceid(mapping),
self.get_vif_devname(mapping))
return conf
def plug(self, instance, vif):
pass
def unplug(self, instance, vif):
"""No action needed. Libvirt takes care of cleanup."""
pass
class QuantumLinuxBridgeVIFDriver(LibvirtBaseVIFDriver):
"""VIF driver for Linux Bridge when running Quantum."""
def get_bridge_name(self, network):
def_bridge = ("brq" + network['id'])[:network_model.NIC_NAME_LEN]
return network.get('bridge') or def_bridge
def get_config(self, instance, network, mapping):
linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(
self.get_bridge_name(network),
None,
filtering=False)
conf = super(QuantumLinuxBridgeVIFDriver,
self).get_config(instance,
network,
mapping)
designer.set_vif_host_backend_bridge_config(
conf, self.get_bridge_name(network),
self.get_vif_devname(mapping))
return conf
def plug(self, instance, vif):
pass
def unplug(self, instance, vif):
"""No action needed. Libvirt takes care of cleanup."""
pass
|
|
from __future__ import print_function
import sys
import logging
from . import _stringio
from .types import TOKEN_TYPES, TokenType
logger = logging.getLogger(__name__)
class TokenOffsets(dict):
def __init__(self, parent, default_type=None):
self.logger = logger.getChild(self.__class__.__name__)
if default_type is not None:
assert isinstance(default_type, TokenType), 'end must be ' \
'a TokenType'
if parent is None:
parent = DefaultTokenOffset(self)
assert isinstance(parent, TokenOffset), 'parent must be a ' \
'TokenOffsets instance, was %r instead' % parent
self.default_type = default_type
self.parent = parent
def __repr__(self):
return '<%s\n %s\n>' % (
self.__class__.__name__,
'\n '.join(sorted(repr(v) for v in set(self.values()))),
)
def get_key(self, key):
if isinstance(key, str):
# Only a token given, no string. Return the default token type
assert self.default_type, 'Token type must be given'
key = self.default_type, key
elif hasattr(key, 'tok_type') and hasattr(key, 'token'):
# Something that looks like a Token was given, extract the
# parameters
key = key.tok_type, key.token
elif isinstance(key, TokenType):
# A TokenType was given, this means we have no token so we have to
# assume there isn't one
key = TOKEN_TYPES[key], None
# To make lookups work in any case, convert to actual token types
if not isinstance(key[0], TokenType):
key = TOKEN_TYPES[key[0]], key[1]
return key
def __getitem__(self, key):
return self.get(key)
def get(self, key, recurse=False):
# Unpack the type and the token
type_, token = key = self.get_key(key)
if key in self:
# Return if it completely exists
value = dict.__getitem__(self, key)
elif recurse and not isinstance(self.parent, DefaultTokenOffset):
value = self.parent.parent[key]
else:
# Create one, it doesn't exist apparently
if (type_, None) in self:
value = dict.__getitem__(self, (type_, None))
value = value.copy(type_, token)
else:
value = TokenOffset(self, type_, token)
self[key] = value
return value
def update(self, other):
for k, v in other.items():
if k not in self:
self[k].update(v)
def __setitem__(self, key, value):
if isinstance(key, str):
assert self.default_type, 'Token type must be given'
key = self.default_type, key
dict.__setitem__(self, key, value)
class TokenOffset(object):
def __init__(self, parent, type_, token, pre=0, post=0, children=None,
end=None, pre_collapse=True, post_collapse=True):
self.logger = logger.getChild(self.__class__.__name__)
self.token = token
self.pre = pre
self.post = post
self.type = type_
self.parent = parent
self.end = end
self.pre_collapse = pre_collapse
self.post_collapse = post_collapse
if children is None:
children = TokenOffsets(
self,
default_type=TOKEN_TYPES[type_],
)
self.children = children
def __gt__(self, other):
return (self.pre, self.post) > (other.pre, other.post)
def update(self, other):
self.pre = other.pre
self.post = other.post
self.end = other.end
self.pre_collapse = other.pre_collapse
self.post_collapse = other.post_collapse
self.children.update(other.children)
def copy(self, type_=None, token=None):
return TokenOffset(
parent=self.parent,
type_=type_ or self.type,
token=token or self.token,
pre=self.pre,
post=self.post,
pre_collapse=self.pre_collapse,
post_collapse=self.post_collapse,
)
def _get_parent(self):
return self._parent
def _set_parent(self, parent):
assert isinstance(parent, TokenOffsets), 'parent must be a ' \
'TokenOffsets instance, was %s instead' % type(parent)
self._parent = parent
def _get_type(self):
return self._type
def _set_type(self, type_):
assert type_ in TOKEN_TYPES, 'Expected %r to be in %r' % (
type_,
self,
)
self._type = TOKEN_TYPES[type_]
def _get_surround(self):
return self.pre, self.post
def _set_surround(self, surround):
if isinstance(surround, int):
pre = post = surround
else:
pre, post = surround
self.pre = pre
self.post = post
def _get_end(self):
return self._end
def _set_end(self, end):
if end is not None:
if not isinstance(end, list):
end = [end]
end = list(map(self.parent.get_key, end))
self._end = end
parent = property(_get_parent, _set_parent, doc='''The parent.
:class:`~formatter.offsets.TokenOffsets`''')
type = property(_get_type, _set_type, doc='''The type.
:class:`~formatter.types.TokenType`''')
surround = property(_get_surround, _set_surround, doc='''Surround the token
with this amount of space.
Setting will set the `pre` and `post` when given a tuple or :func:`int`.
''')
end = property(_get_end, _set_end, doc='''Set the end token in case of
children.
Should either be a token or a tuple with
:class:`~formatter.types.TokenType` and `token` which will be a string.''')
def __str__(self):
return str(getattr(self, 'type', None))
def __repr__(self):
return '<%s[%s:%s]%r (%d,%d) %r>' % (
self.__class__.__name__,
hex(id(self)),
self,
self.token,
self.pre,
self.post,
self.end,
)
class DefaultTokenOffset(TokenOffset):
def __init__(self, parent=None):
if parent is None:
parent = TokenOffsets(self)
TokenOffset.__init__(
self,
parent=parent,
type_=TOKEN_TYPES.DEFAULT,
token=None,
)
@classmethod
def _pprint(cls, stream, offset, visited, depth=0):
stream.write(' ' * depth)
if offset.children and offset.end:
stream.write('[ ')
offset.end, end = None, offset.end
print(repr(offset), file=stream)
offset.end = end
if offset in visited and offset.end:
stream.write(' ' * (depth + 4))
print('RECURSION', file=stream)
else:
visited[offset] = True
for child in sorted(offset.children.values()):
cls._pprint(stream, child, visited, depth=depth + 4)
if offset.end:
stream.write(' ' * depth)
stream.write('] ')
print(repr(offset.end), file=stream)
def pprint(self, stream=sys.stderr):
return self._pprint(stream, self, visited={}, depth=1)
def get_token_offsets():
token_offset = DefaultTokenOffset()
token_offsets = token_offset.children
keywords = DefaultTokenOffset().children
keywords.default_type = TOKEN_TYPES.NAME
keywords['with'].post = 1
keywords['assert'].post = 1
keywords['except'].post = 1
keywords['import'].post = 1
keywords['for'].post = 1
# keywords['while'].post = 1
# keywords['del'].post = 1
keywords['if'].post = 1
keywords['if'].pre_collapse = False
# keywords['if'].post_collapse = False
keywords['elif'].post = 1
keywords['else'].pre_collapse = False
keywords['else'].post_collapse = False
keywords['return'].post = 1
keywords['yield'].post = 1
keywords['raise'].post = 1
keywords['lambda'].post = 1
keywords['as'].surround = 1
keywords['in'].surround = 1
keywords['or'].surround = 1
keywords['and'].surround = 1
keywords['not'].post = 1
token_offsets.update(keywords)
# Operators
operators = DefaultTokenOffset().children
operators.default_type = TOKEN_TYPES.OP
operators['!='].surround = 1
operators['%'].surround = 1
operators['%='].surround = 1
operators['&'].surround = 1
operators['&='].surround = 1
operators['*'].surround = 1
operators['**'].surround = 1
operators['**='].surround = 1
operators['*='].surround = 1
operators['+'].surround = 1
operators['+='].surround = 1
operators['-'].surround = 1
operators['-='].surround = 1
operators['/'].surround = 1
operators['//'].surround = 1
operators['//='].surround = 1
operators['/='].surround = 1
operators['<'].surround = 1
operators['<<'].surround = 1
operators['<<='].surround = 1
operators['<='].surround = 1
operators['=='].surround = 1
operators['>'].surround = 1
operators['>='].surround = 1
operators['>>'].surround = 1
operators['>>='].surround = 1
operators['^'].surround = 1
operators['^='].surround = 1
operators['|'].surround = 1
operators['|='].surround = 1
operators['='].surround = 1
operators[TOKEN_TYPES.NAME, 'is'].surround = 1
token_offsets.update(operators)
token_offsets.default_type = TOKEN_TYPES.OP
token_offsets[':'].post = 1
token_offsets[','].post = 1
token_offsets['='].post = 1
# Within parameters we don't want extra space around the =
paren = token_offsets[TOKEN_TYPES.OP, '(']
paren.end = TOKEN_TYPES.OP, ')'
paren.children.default_type = TOKEN_TYPES.OP
paren.children['-'].surround = 0
paren.children['='].surround = 0
paren.children['*'].surround = 0
paren.children['**'].surround = 0
paren.children[','].post = 1
paren.children.default_type = TOKEN_TYPES.NAME
paren.children[TOKEN_TYPES.NAME].surround = 0
paren.children['or'].surround = 1
paren.children['and'].surround = 1
paren.children['for'].surround = 1
paren.children['if'].surround = 1
paren.children['yield'].surround = 0
paren.children.update(keywords)
paren.children.update(operators)
# Within dicts we don't want extra space around the :
brace = token_offsets[TOKEN_TYPES.OP, '{']
brace.end = TOKEN_TYPES.OP, '}'
brace.children.default_type = TOKEN_TYPES.OP
brace.children[':'].post = 1
brace.children[','].post = 1
brace.children[TOKEN_TYPES.NAME].surround = 0
# Within slices we don't want extra space around the :
bracket = token_offsets[TOKEN_TYPES.OP, '[']
bracket.end = TOKEN_TYPES.OP, ']'
bracket.children.default_type = TOKEN_TYPES.OP
bracket.children[':'].surround = 0
bracket.children[','].post = 1
bracket.children.default_type = TOKEN_TYPES.NAME
bracket.children['for'].surround = 1
bracket.children['if'].surround = 1
bracket.children.update(keywords)
# A little recursion to handle cases with braces in parenthesis and vice
# versa
brace.children.default_type = TOKEN_TYPES.OP
paren.children.default_type = TOKEN_TYPES.OP
bracket.children.default_type = TOKEN_TYPES.OP
brace.children['{'] = brace
brace.children['('] = paren
brace.children['['] = bracket
paren.children['{'] = brace
paren.children['('] = paren
paren.children['['] = bracket
bracket.children['{'] = brace
bracket.children['('] = paren
bracket.children['['] = bracket
# Classes need a space after class and no space before (
class_ = token_offsets[TOKEN_TYPES.NAME, 'class']
class_.post = 1
class_.end = TOKEN_TYPES.OP, ':'
class_.children.default_type = TOKEN_TYPES.OP
class_.children['{'] = brace
class_.children['('] = paren
class_.children['['] = bracket
class_.children[TOKEN_TYPES.NAME].post = 0
# Def need a space after def and no space before (
def_ = token_offsets[TOKEN_TYPES.NAME, 'def']
def_.post = 1
def_.end = TOKEN_TYPES.OP, ':'
def_.children.default_type = TOKEN_TYPES.OP
def_.children['{'] = brace
def_.children['('] = paren
def_.children['['] = bracket
def_.children[TOKEN_TYPES.NAME].post = 0
# Make sure a from ... import ... style import has the space it needs
from_ = token_offsets[TOKEN_TYPES.NAME, 'from']
from_.post = 1
from_.end = TOKEN_TYPES.NAME, 'import'
from_.children[TOKEN_TYPES.NAME, 'import'].surround = 1
# Make sure print statements are formatted, also when they have a >>
print_ = token_offsets[TOKEN_TYPES.NAME, 'print']
print_.post = 1
print_.end = [
(TOKEN_TYPES.OP, ','),
(TOKEN_TYPES.NEWLINE, None),
]
print_.children.default_type = TOKEN_TYPES.OP
print_.children['>>'].surround = 0
print_.children['%'].surround = 1
print_.children[','].post = 1
stream = _stringio.StringIO()
token_offset.pprint(stream)
logger.debug('Token offsets:\n%s', stream.getvalue())
return token_offsets
TOKEN_OFFSETS = get_token_offsets()
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates java source files from a mojom.Module."""
import argparse
import os
import re
from jinja2 import contextfilter
import mojom.generate.generator as generator
import mojom.generate.module as mojom
from mojom.generate.template_expander import UseJinja
GENERATOR_PREFIX = 'java'
_HEADER_SIZE = 8
_spec_to_java_type = {
'b': 'boolean',
'd': 'double',
'f': 'float',
'h:d:c': 'org.chromium.mojo.system.DataPipe.ConsumerHandle',
'h:d:p': 'org.chromium.mojo.system.DataPipe.ProducerHandle',
'h:m': 'org.chromium.mojo.system.MessagePipeHandle',
'h': 'org.chromium.mojo.system.UntypedHandle',
'h:s': 'org.chromium.mojo.system.SharedBufferHandle',
'i16': 'short',
'i32': 'int',
'i64': 'long',
'i8': 'byte',
's': 'String',
'u16': 'short',
'u32': 'int',
'u64': 'long',
'u8': 'byte',
}
_spec_to_decode_method = {
'b': 'readBoolean',
'd': 'readDouble',
'f': 'readFloat',
'h:d:c': 'readConsumerHandle',
'h:d:p': 'readProducerHandle',
'h:m': 'readMessagePipeHandle',
'h': 'readUntypedHandle',
'h:s': 'readSharedBufferHandle',
'i16': 'readShort',
'i32': 'readInt',
'i64': 'readLong',
'i8': 'readByte',
's': 'readString',
'u16': 'readShort',
'u32': 'readInt',
'u64': 'readLong',
'u8': 'readByte',
}
_java_primitive_to_boxed_type = {
'boolean': 'Boolean',
'byte': 'Byte',
'double': 'Double',
'float': 'Float',
'int': 'Integer',
'long': 'Long',
'short': 'Short',
}
def NameToComponent(name):
# insert '_' between anything and a Title name (e.g, HTTPEntry2FooBar ->
# HTTP_Entry2_FooBar)
name = re.sub('([^_])([A-Z][^A-Z_]+)', r'\1_\2', name)
# insert '_' between non upper and start of upper blocks (e.g.,
# HTTP_Entry2_FooBar -> HTTP_Entry2_Foo_Bar)
name = re.sub('([^A-Z_])([A-Z])', r'\1_\2', name)
return [x.lower() for x in name.split('_')]
def CapitalizeFirst(string):
return string[0].upper() + string[1:]
def UpperCamelCase(name):
return ''.join([CapitalizeFirst(x) for x in NameToComponent(name)])
def CamelCase(name):
uccc = UpperCamelCase(name)
return uccc[0].lower() + uccc[1:]
def ConstantStyle(name):
components = NameToComponent(name)
if components[0] == 'k':
components = components[1:]
return '_'.join([x.upper() for x in components])
def GetNameForElement(element):
if isinstance(element, (mojom.Enum,
mojom.Interface,
mojom.Struct)):
return UpperCamelCase(element.name)
if isinstance(element, mojom.InterfaceRequest):
return GetNameForElement(element.kind)
if isinstance(element, (mojom.Method,
mojom.Parameter,
mojom.Field)):
return CamelCase(element.name)
if isinstance(element, mojom.EnumValue):
return (UpperCamelCase(element.enum_name) + '.' +
ConstantStyle(element.name))
if isinstance(element, (mojom.NamedValue,
mojom.Constant)):
return ConstantStyle(element.name)
raise Exception("Unexpected element: " % element)
def GetInterfaceResponseName(method):
return UpperCamelCase(method.name + 'Response')
def ParseStringAttribute(attribute):
assert isinstance(attribute, basestring)
return attribute
def IsArray(kind):
return isinstance(kind, (mojom.Array, mojom.FixedArray))
@contextfilter
def DecodeMethod(context, kind, offset, bit):
def _DecodeMethodName(kind):
if IsArray(kind):
return _DecodeMethodName(kind.kind) + 's'
if isinstance(kind, mojom.Enum):
return _DecodeMethodName(mojom.INT32)
if isinstance(kind, mojom.InterfaceRequest):
return "readInterfaceRequest"
if isinstance(kind, mojom.Interface):
return "readServiceInterface"
return _spec_to_decode_method[kind.spec]
methodName = _DecodeMethodName(kind)
additionalParams = ''
if (kind == mojom.BOOL):
additionalParams = ', %d' % bit
if isinstance(kind, mojom.Interface):
additionalParams = ', %s.BUILDER' % GetJavaType(context, kind)
if IsArray(kind) and isinstance(kind.kind, mojom.Interface):
additionalParams = ', %s.BUILDER' % GetJavaType(context, kind.kind)
return '%s(%s%s)' % (methodName, offset, additionalParams)
@contextfilter
def EncodeMethod(context, kind, variable, offset, bit):
additionalParams = ''
if (kind == mojom.BOOL):
additionalParams = ', %d' % bit
if isinstance(kind, mojom.Interface):
additionalParams = ', %s.BUILDER' % GetJavaType(context, kind)
if IsArray(kind) and isinstance(kind.kind, mojom.Interface):
additionalParams = ', %s.BUILDER' % GetJavaType(context, kind.kind)
return 'encode(%s, %s%s)' % (variable, offset, additionalParams)
def GetPackage(module):
if 'JavaPackage' in module.attributes:
return ParseStringAttribute(module.attributes['JavaPackage'])
# Default package.
return "org.chromium.mojom." + module.namespace
def GetNameForKind(context, kind):
def _GetNameHierachy(kind):
hierachy = []
if kind.parent_kind:
hierachy = _GetNameHierachy(kind.parent_kind)
hierachy.append(GetNameForElement(kind))
return hierachy
module = context.resolve('module')
elements = []
if GetPackage(module) != GetPackage(kind.module):
elements += [GetPackage(kind.module)]
elements += _GetNameHierachy(kind)
return '.'.join(elements)
def GetBoxedJavaType(context, kind):
unboxed_type = GetJavaType(context, kind, False)
if unboxed_type in _java_primitive_to_boxed_type:
return _java_primitive_to_boxed_type[unboxed_type]
return unboxed_type
@contextfilter
def GetJavaType(context, kind, boxed=False):
if boxed:
return GetBoxedJavaType(context, kind)
if isinstance(kind, (mojom.Struct, mojom.Interface)):
return GetNameForKind(context, kind)
if isinstance(kind, mojom.InterfaceRequest):
return ("org.chromium.mojo.bindings.InterfaceRequest<%s>" %
GetNameForKind(context, kind.kind))
if IsArray(kind):
return "%s[]" % GetJavaType(context, kind.kind)
if isinstance(kind, mojom.Enum):
return "int"
return _spec_to_java_type[kind.spec]
def IsHandle(kind):
return kind.spec[0] == 'h'
@contextfilter
def DefaultValue(context, field):
assert field.default
if isinstance(field.kind, mojom.Struct):
assert field.default == "default"
return "new %s()" % GetJavaType(context, field.kind)
return "(%s) %s" % (GetJavaType(context, field.kind),
ExpressionToText(context, field.default))
@contextfilter
def NewArray(context, kind, size):
if IsArray(kind.kind):
return NewArray(context, kind.kind, size) + '[]'
return 'new %s[%s]' % (GetJavaType(context, kind.kind), size)
@contextfilter
def ExpressionToText(context, token):
def _TranslateNamedValue(named_value):
entity_name = GetNameForElement(named_value)
if named_value.parent_kind:
return GetJavaType(context, named_value.parent_kind) + '.' + entity_name
# Handle the case where named_value is a module level constant:
if not isinstance(named_value, mojom.EnumValue):
entity_name = (GetConstantsMainEntityName(named_value.module) + '.' +
entity_name)
if GetPackage(named_value.module) == GetPackage(context.resolve('module')):
return entity_name
return GetPackage(named_value.module) + '.' + entity_name
if isinstance(token, mojom.NamedValue):
return _TranslateNamedValue(token)
# Add Long suffix to all number literals.
if re.match('^[0-9]+$', token):
return token + 'L'
return token
def IsPointerArrayKind(kind):
if not IsArray(kind):
return False
sub_kind = kind.kind
return generator.IsObjectKind(sub_kind)
def GetConstantsMainEntityName(module):
if 'JavaConstantsClassName' in module.attributes:
return ParseStringAttribute(module.attributes['JavaConstantsClassName'])
# This constructs the name of the embedding classes for module level constants
# by extracting the mojom's filename and prepending it to Constants.
return (UpperCamelCase(module.path.split('/')[-1].rsplit('.', 1)[0]) +
'Constants')
class Generator(generator.Generator):
java_filters = {
"interface_response_name": GetInterfaceResponseName,
"default_value": DefaultValue,
"decode_method": DecodeMethod,
"expression_to_text": ExpressionToText,
"encode_method": EncodeMethod,
"is_handle": IsHandle,
"is_pointer_array_kind": IsPointerArrayKind,
"is_struct_kind": lambda kind: isinstance(kind, mojom.Struct),
"java_type": GetJavaType,
"name": GetNameForElement,
"new_array": NewArray,
"struct_size": lambda ps: ps.GetTotalSize() + _HEADER_SIZE,
}
def GetJinjaExports(self):
return {
"module": self.module,
"package": GetPackage(self.module),
}
@UseJinja("java_templates/enum.java.tmpl", filters=java_filters)
def GenerateEnumSource(self, enum):
exports = self.GetJinjaExports()
exports.update({"enum": enum})
return exports
@UseJinja("java_templates/struct.java.tmpl", filters=java_filters)
def GenerateStructSource(self, struct):
exports = self.GetJinjaExports()
exports.update({"struct": struct})
return exports
@UseJinja("java_templates/interface.java.tmpl", filters=java_filters)
def GenerateInterfaceSource(self, interface):
exports = self.GetJinjaExports()
exports.update({"interface": interface})
if interface.client:
for client in self.module.interfaces:
if client.name == interface.client:
exports.update({"client": client})
return exports
@UseJinja("java_templates/constants.java.tmpl", filters=java_filters)
def GenerateConstantsSource(self, module):
exports = self.GetJinjaExports()
exports.update({"main_entity": GetConstantsMainEntityName(module),
"constants": module.constants})
return exports
def GenerateFiles(self, unparsed_args):
parser = argparse.ArgumentParser()
parser.add_argument("--java_output_directory", dest="java_output_directory")
args = parser.parse_args(unparsed_args)
if self.output_dir and args.java_output_directory:
self.output_dir = os.path.join(args.java_output_directory,
GetPackage(self.module).replace('.', '/'))
if not os.path.exists(self.output_dir):
try:
os.makedirs(self.output_dir)
except:
# Ignore errors on directory creation.
pass
for enum in self.module.enums:
self.Write(self.GenerateEnumSource(enum),
"%s.java" % GetNameForElement(enum))
for struct in self.module.structs:
self.Write(self.GenerateStructSource(struct),
"%s.java" % GetNameForElement(struct))
for interface in self.module.interfaces:
self.Write(self.GenerateInterfaceSource(interface),
"%s.java" % GetNameForElement(interface))
if self.module.constants:
self.Write(self.GenerateConstantsSource(self.module),
"%s.java" % GetConstantsMainEntityName(self.module))
def GetJinjaParameters(self):
return {
'lstrip_blocks': True,
'trim_blocks': True,
}
def GetGlobals(self):
return {
'module': self.module,
}
|
|
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base test cases for all neutron tests.
"""
import abc
import contextlib
import functools
import inspect
import os
import os.path
import eventlet.timeout
import fixtures
import mock
from neutron_lib.callbacks import manager as registry_manager
from neutron_lib import fixture
from oslo_concurrency.fixture import lockutils
from oslo_config import cfg
from oslo_db import options as db_options
from oslo_messaging import conffixture as messaging_conffixture
from oslo_utils import excutils
from oslo_utils import fileutils
from oslo_utils import strutils
from oslotest import base
import six
import testtools
from neutron._i18n import _
from neutron.agent.linux import external_process
from neutron.api.rpc.callbacks.consumer import registry as rpc_consumer_reg
from neutron.api.rpc.callbacks.producer import registry as rpc_producer_reg
from neutron.common import config
from neutron.common import rpc as n_rpc
from neutron.conf.agent import common as agent_config
from neutron.db import _model_query as model_query
from neutron.db import _resource_extend as resource_extend
from neutron.db import agentschedulers_db
from neutron.db import api as db_api
from neutron import manager
from neutron import policy
from neutron.quota import resource_registry
from neutron.tests import fake_notifier
from neutron.tests import post_mortem_debug
from neutron.tests import tools
CONF = cfg.CONF
CONF.import_opt('state_path', 'neutron.conf.common')
ROOTDIR = os.path.dirname(__file__)
ETCDIR = os.path.join(ROOTDIR, 'etc')
SUDO_CMD = 'sudo -n'
def etcdir(*p):
return os.path.join(ETCDIR, *p)
def fake_use_fatal_exceptions(*args):
return True
def bool_from_env(key, strict=False, default=False):
value = os.environ.get(key)
return strutils.bool_from_string(value, strict=strict, default=default)
def setup_test_logging(config_opts, log_dir, log_file_path_template):
# Have each test log into its own log file
config_opts.set_override('debug', True)
fileutils.ensure_tree(log_dir, mode=0o755)
log_file = sanitize_log_path(
os.path.join(log_dir, log_file_path_template))
config_opts.set_override('log_file', log_file)
config.setup_logging()
def sanitize_log_path(path):
# Sanitize the string so that its log path is shell friendly
replace_map = {' ': '-', '(': '_', ')': '_'}
for s, r in replace_map.items():
path = path.replace(s, r)
return path
def unstable_test(reason):
def decor(f):
@functools.wraps(f)
def inner(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except Exception as e:
msg = ("%s was marked as unstable because of %s, "
"failure was: %s") % (self.id(), reason, e)
raise self.skipTest(msg)
return inner
return decor
class AttributeDict(dict):
"""
Provide attribute access (dict.key) to dictionary values.
"""
def __getattr__(self, name):
"""Allow attribute access for all keys in the dict."""
if name in self:
return self[name]
raise AttributeError(_("Unknown attribute '%s'.") % name)
def _catch_timeout(f):
@functools.wraps(f)
def func(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except eventlet.Timeout as e:
self.fail('Execution of this test timed out: %s' % e)
return func
class _CatchTimeoutMetaclass(abc.ABCMeta):
def __init__(cls, name, bases, dct):
super(_CatchTimeoutMetaclass, cls).__init__(name, bases, dct)
for name, method in inspect.getmembers(
# NOTE(ihrachys): we should use isroutine because it will catch
# both unbound methods (python2) and functions (python3)
cls, predicate=inspect.isroutine):
if name.startswith('test_'):
setattr(cls, name, _catch_timeout(method))
# Test worker cannot survive eventlet's Timeout exception, which effectively
# kills the whole worker, with all test cases scheduled to it. This metaclass
# makes all test cases convert Timeout exceptions into unittest friendly
# failure mode (self.fail).
@six.add_metaclass(_CatchTimeoutMetaclass)
class DietTestCase(base.BaseTestCase):
"""Same great taste, less filling.
BaseTestCase is responsible for doing lots of plugin-centric setup
that not all tests require (or can tolerate). This class provides
only functionality that is common across all tests.
"""
def setUp(self):
super(DietTestCase, self).setUp()
# FIXME(amuller): this must be called in the Neutron unit tests base
# class. Moving this may cause non-deterministic failures. Bug #1489098
# for more info.
db_options.set_defaults(cfg.CONF, connection='sqlite://')
# Configure this first to ensure pm debugging support for setUp()
debugger = os.environ.get('OS_POST_MORTEM_DEBUGGER')
if debugger:
self.addOnException(post_mortem_debug.get_exception_handler(
debugger))
# Make sure we see all relevant deprecation warnings when running tests
self.useFixture(tools.WarningsFixture())
# NOTE(ihrachys): oslotest already sets stopall for cleanup, but it
# does it using six.moves.mock (the library was moved into
# unittest.mock in Python 3.4). So until we switch to six.moves.mock
# everywhere in unit tests, we can't remove this setup. The base class
# is used in 3party projects, so we would need to switch all of them to
# six before removing the cleanup callback from here.
self.addCleanup(mock.patch.stopall)
self.addCleanup(self.reset_model_query_hooks)
self.addCleanup(self.reset_resource_extend_functions)
self.addOnException(self.check_for_systemexit)
self.orig_pid = os.getpid()
tools.reset_random_seed()
@staticmethod
def reset_model_query_hooks():
model_query._model_query_hooks = {}
@staticmethod
def reset_resource_extend_functions():
resource_extend._resource_extend_functions = {}
def addOnException(self, handler):
def safe_handler(*args, **kwargs):
try:
return handler(*args, **kwargs)
except Exception:
with excutils.save_and_reraise_exception(reraise=False) as ctx:
self.addDetail('failure in exception handler %s' % handler,
testtools.content.TracebackContent(
(ctx.type_, ctx.value, ctx.tb), self))
return super(DietTestCase, self).addOnException(safe_handler)
def check_for_systemexit(self, exc_info):
if isinstance(exc_info[1], SystemExit):
if os.getpid() != self.orig_pid:
# Subprocess - let it just exit
raise
# This makes sys.exit(0) still a failure
self.force_failure = True
@contextlib.contextmanager
def assert_max_execution_time(self, max_execution_time=5):
with eventlet.Timeout(max_execution_time, False):
yield
return
self.fail('Execution of this test timed out')
def assertOrderedEqual(self, expected, actual):
expect_val = self.sort_dict_lists(expected)
actual_val = self.sort_dict_lists(actual)
self.assertEqual(expect_val, actual_val)
def sort_dict_lists(self, dic):
for key, value in dic.items():
if isinstance(value, list):
dic[key] = sorted(value)
elif isinstance(value, dict):
dic[key] = self.sort_dict_lists(value)
return dic
def assertDictSupersetOf(self, expected_subset, actual_superset):
"""Checks that actual dict contains the expected dict.
After checking that the arguments are of the right type, this checks
that each item in expected_subset is in, and matches, what is in
actual_superset. Separate tests are done, so that detailed info can
be reported upon failure.
"""
if not isinstance(expected_subset, dict):
self.fail("expected_subset (%s) is not an instance of dict" %
type(expected_subset))
if not isinstance(actual_superset, dict):
self.fail("actual_superset (%s) is not an instance of dict" %
type(actual_superset))
for k, v in expected_subset.items():
self.assertIn(k, actual_superset)
self.assertEqual(v, actual_superset[k],
"Key %(key)s expected: %(exp)r, actual %(act)r" %
{'key': k, 'exp': v, 'act': actual_superset[k]})
class ProcessMonitorFixture(fixtures.Fixture):
"""Test fixture to capture and cleanup any spawn process monitor."""
def _setUp(self):
self.old_callable = (
external_process.ProcessMonitor._spawn_checking_thread)
p = mock.patch("neutron.agent.linux.external_process.ProcessMonitor."
"_spawn_checking_thread",
new=lambda x: self.record_calls(x))
p.start()
self.instances = []
self.addCleanup(self.stop)
def stop(self):
for instance in self.instances:
instance.stop()
def record_calls(self, instance):
self.old_callable(instance)
self.instances.append(instance)
class BaseTestCase(DietTestCase):
@staticmethod
def config_parse(conf=None, args=None):
"""Create the default configurations."""
# neutron.conf includes rpc_backend which needs to be cleaned up
if args is None:
args = []
args += ['--config-file', etcdir('neutron.conf')]
if conf is None:
config.init(args=args)
else:
conf(args)
def setUp(self):
super(BaseTestCase, self).setUp()
self.useFixture(lockutils.ExternalLockFixture())
self.useFixture(fixture.APIDefinitionFixture())
cfg.CONF.set_override('state_path', self.get_default_temp_dir().path)
self.addCleanup(CONF.reset)
self.useFixture(ProcessMonitorFixture())
self.useFixture(fixtures.MonkeyPatch(
'neutron_lib.exceptions.NeutronException.use_fatal_exceptions',
fake_use_fatal_exceptions))
self.useFixture(fixtures.MonkeyPatch(
'oslo_config.cfg.find_config_files',
lambda project=None, prog=None, extension=None: []))
self.setup_rpc_mocks()
self.setup_config()
self._callback_manager = registry_manager.CallbacksManager()
self.useFixture(fixture.CallbackRegistryFixture(
callback_manager=self._callback_manager))
# Give a private copy of the directory to each test.
self.useFixture(fixture.PluginDirectoryFixture())
policy.init()
self.addCleanup(policy.reset)
self.addCleanup(resource_registry.unregister_all_resources)
self.addCleanup(db_api.sqla_remove_all)
self.addCleanup(rpc_consumer_reg.clear)
self.addCleanup(rpc_producer_reg.clear)
def get_new_temp_dir(self):
"""Create a new temporary directory.
:returns: fixtures.TempDir
"""
return self.useFixture(fixtures.TempDir())
def get_default_temp_dir(self):
"""Create a default temporary directory.
Returns the same directory during the whole test case.
:returns: fixtures.TempDir
"""
if not hasattr(self, '_temp_dir'):
self._temp_dir = self.get_new_temp_dir()
return self._temp_dir
def get_temp_file_path(self, filename, root=None):
"""Returns an absolute path for a temporary file.
If root is None, the file is created in default temporary directory. It
also creates the directory if it's not initialized yet.
If root is not None, the file is created inside the directory passed as
root= argument.
:param filename: filename
:type filename: string
:param root: temporary directory to create a new file in
:type root: fixtures.TempDir
:returns: absolute file path string
"""
root = root or self.get_default_temp_dir()
return root.join(filename)
def setup_rpc_mocks(self):
# don't actually start RPC listeners when testing
mock.patch(
'neutron.common.rpc.Connection.consume_in_threads',
return_value=[]).start()
self.useFixture(fixtures.MonkeyPatch(
'oslo_messaging.Notifier', fake_notifier.FakeNotifier))
self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
self.messaging_conf.transport_driver = 'fake'
# NOTE(russellb) We want all calls to return immediately.
self.messaging_conf.response_timeout = 0
self.useFixture(self.messaging_conf)
self.addCleanup(n_rpc.clear_extra_exmods)
n_rpc.add_extra_exmods('neutron.test')
self.addCleanup(n_rpc.cleanup)
n_rpc.init(CONF)
def setup_config(self, args=None):
"""Tests that need a non-default config can override this method."""
self.config_parse(args=args)
def config(self, **kw):
"""Override some configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a group argument is supplied, the overrides are applied to
the specified configuration option group.
All overrides are automatically cleared at the end of the current
test by the fixtures cleanup process.
"""
group = kw.pop('group', None)
for k, v in kw.items():
CONF.set_override(k, v, group)
def setup_coreplugin(self, core_plugin=None, load_plugins=True):
cp = PluginFixture(core_plugin)
self.useFixture(cp)
self.patched_dhcp_periodic = cp.patched_dhcp_periodic
self.patched_default_svc_plugins = cp.patched_default_svc_plugins
if load_plugins:
manager.init()
def setup_notification_driver(self, notification_driver=None):
self.addCleanup(fake_notifier.reset)
if notification_driver is None:
notification_driver = [fake_notifier.__name__]
cfg.CONF.set_override("notification_driver", notification_driver)
def setup_rootwrap(self):
agent_config.register_root_helper(cfg.CONF)
self.config(group='AGENT',
root_helper=os.environ.get('OS_ROOTWRAP_CMD', SUDO_CMD))
self.config(group='AGENT',
root_helper_daemon=os.environ.get(
'OS_ROOTWRAP_DAEMON_CMD'))
class PluginFixture(fixtures.Fixture):
def __init__(self, core_plugin=None):
super(PluginFixture, self).__init__()
self.core_plugin = core_plugin
def _setUp(self):
# Do not load default service plugins in the testing framework
# as all the mocking involved can cause havoc.
self.default_svc_plugins_p = mock.patch(
'neutron.manager.NeutronManager._get_default_service_plugins')
self.patched_default_svc_plugins = self.default_svc_plugins_p.start()
self.dhcp_periodic_p = mock.patch(
'neutron.db.agentschedulers_db.DhcpAgentSchedulerDbMixin.'
'add_periodic_dhcp_agent_status_check')
self.patched_dhcp_periodic = self.dhcp_periodic_p.start()
self.agent_health_check_p = mock.patch(
'neutron.db.agentschedulers_db.DhcpAgentSchedulerDbMixin.'
'add_agent_status_check_worker')
self.agent_health_check = self.agent_health_check_p.start()
# Plugin cleanup should be triggered last so that
# test-specific cleanup has a chance to release references.
self.addCleanup(self.cleanup_core_plugin)
if self.core_plugin is not None:
cfg.CONF.set_override('core_plugin', self.core_plugin)
def cleanup_core_plugin(self):
"""Ensure that the core plugin is deallocated."""
nm = manager.NeutronManager
if not nm.has_instance():
return
# TODO(marun) Fix plugins that do not properly initialize notifiers
agentschedulers_db.AgentSchedulerDbMixin.agent_notifiers = {}
nm.clear_instance()
class Timeout(fixtures.Fixture):
"""Setup per test timeouts.
In order to avoid test deadlocks we support setting up a test
timeout parameter read from the environment. In almost all
cases where the timeout is reached this means a deadlock.
A scaling factor allows extremely long tests to specify they
need more time.
"""
def __init__(self, timeout=None, scaling=1):
super(Timeout, self).__init__()
if timeout is None:
timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
self.test_timeout = int(timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
self.test_timeout = 0
if scaling >= 1:
self.test_timeout *= scaling
else:
raise ValueError('scaling value must be >= 1')
def setUp(self):
super(Timeout, self).setUp()
if self.test_timeout > 0:
self.useFixture(fixtures.Timeout(self.test_timeout, gentle=True))
|
|
from __future__ import unicode_literals
from datetime import datetime
from decimal import Decimal
from django import forms
from django.conf import settings
from django.contrib.admin import helpers
from django.contrib.admin.utils import (
NestedObjects, display_for_field, display_for_value, flatten,
flatten_fieldsets, label_for_field, lookup_field, quote,
)
from django.db import DEFAULT_DB_ALIAS, models
from django.test import SimpleTestCase, TestCase, override_settings
from django.utils.formats import localize
from django.utils.safestring import mark_safe
from .models import (
Article, Car, Count, Event, EventGuide, Location, Site, Vehicle,
)
class NestedObjectsTests(TestCase):
"""
Tests for ``NestedObject`` utility collection.
"""
def setUp(self):
self.n = NestedObjects(using=DEFAULT_DB_ALIAS)
self.objs = [Count.objects.create(num=i) for i in range(5)]
def _check(self, target):
self.assertEqual(self.n.nested(lambda obj: obj.num), target)
def _connect(self, i, j):
self.objs[i].parent = self.objs[j]
self.objs[i].save()
def _collect(self, *indices):
self.n.collect([self.objs[i] for i in indices])
def test_unrelated_roots(self):
self._connect(2, 1)
self._collect(0)
self._collect(1)
self._check([0, 1, [2]])
def test_siblings(self):
self._connect(1, 0)
self._connect(2, 0)
self._collect(0)
self._check([0, [1, 2]])
def test_non_added_parent(self):
self._connect(0, 1)
self._collect(0)
self._check([0])
def test_cyclic(self):
self._connect(0, 2)
self._connect(1, 0)
self._connect(2, 1)
self._collect(0)
self._check([0, [1, [2]]])
def test_queries(self):
self._connect(1, 0)
self._connect(2, 0)
# 1 query to fetch all children of 0 (1 and 2)
# 1 query to fetch all children of 1 and 2 (none)
# Should not require additional queries to populate the nested graph.
self.assertNumQueries(2, self._collect, 0)
def test_on_delete_do_nothing(self):
"""
Check that the nested collector doesn't query for DO_NOTHING objects.
"""
n = NestedObjects(using=DEFAULT_DB_ALIAS)
objs = [Event.objects.create()]
EventGuide.objects.create(event=objs[0])
with self.assertNumQueries(2):
# One for Location, one for Guest, and no query for EventGuide
n.collect(objs)
def test_relation_on_abstract(self):
"""
#21846 -- Check that `NestedObjects.collect()` doesn't trip
(AttributeError) on the special notation for relations on abstract
models (related_name that contains %(app_label)s and/or %(class)s).
"""
n = NestedObjects(using=DEFAULT_DB_ALIAS)
Car.objects.create()
n.collect([Vehicle.objects.first()])
class UtilsTests(SimpleTestCase):
empty_value = '-empty-'
def test_values_from_lookup_field(self):
"""
Regression test for #12654: lookup_field
"""
SITE_NAME = 'example.com'
TITLE_TEXT = 'Some title'
CREATED_DATE = datetime.min
ADMIN_METHOD = 'admin method'
SIMPLE_FUNCTION = 'function'
INSTANCE_ATTRIBUTE = 'attr'
class MockModelAdmin(object):
def get_admin_value(self, obj):
return ADMIN_METHOD
def simple_function(obj):
return SIMPLE_FUNCTION
site_obj = Site(domain=SITE_NAME)
article = Article(
site=site_obj,
title=TITLE_TEXT,
created=CREATED_DATE,
)
article.non_field = INSTANCE_ATTRIBUTE
verifications = (
('site', SITE_NAME),
('created', localize(CREATED_DATE)),
('title', TITLE_TEXT),
('get_admin_value', ADMIN_METHOD),
(simple_function, SIMPLE_FUNCTION),
('test_from_model', article.test_from_model()),
('non_field', INSTANCE_ATTRIBUTE)
)
mock_admin = MockModelAdmin()
for name, value in verifications:
field, attr, resolved_value = lookup_field(name, article, mock_admin)
if field is not None:
resolved_value = display_for_field(resolved_value, field, self.empty_value)
self.assertEqual(value, resolved_value)
def test_null_display_for_field(self):
"""
Regression test for #12550: display_for_field should handle None
value.
"""
display_value = display_for_field(None, models.CharField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
display_value = display_for_field(None, models.CharField(
choices=(
(None, "test_none"),
)
), self.empty_value)
self.assertEqual(display_value, "test_none")
display_value = display_for_field(None, models.DateField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
display_value = display_for_field(None, models.TimeField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
# Regression test for #13071: NullBooleanField has special
# handling.
display_value = display_for_field(None, models.NullBooleanField(), self.empty_value)
expected = '<img src="%sadmin/img/icon-unknown.svg" alt="None" />' % settings.STATIC_URL
self.assertHTMLEqual(display_value, expected)
display_value = display_for_field(None, models.DecimalField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
display_value = display_for_field(None, models.FloatField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
def test_number_formats_display_for_field(self):
display_value = display_for_field(12345.6789, models.FloatField(), self.empty_value)
self.assertEqual(display_value, '12345.6789')
display_value = display_for_field(Decimal('12345.6789'), models.DecimalField(), self.empty_value)
self.assertEqual(display_value, '12345.6789')
display_value = display_for_field(12345, models.IntegerField(), self.empty_value)
self.assertEqual(display_value, '12345')
@override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True)
def test_number_formats_with_thousand_separator_display_for_field(self):
display_value = display_for_field(12345.6789, models.FloatField(), self.empty_value)
self.assertEqual(display_value, '12,345.6789')
display_value = display_for_field(Decimal('12345.6789'), models.DecimalField(), self.empty_value)
self.assertEqual(display_value, '12,345.6789')
display_value = display_for_field(12345, models.IntegerField(), self.empty_value)
self.assertEqual(display_value, '12,345')
def test_list_display_for_value(self):
display_value = display_for_value([1, 2, 3], self.empty_value)
self.assertEqual(display_value, '1, 2, 3')
display_value = display_for_value([1, 2, 'buckle', 'my', 'shoe'], self.empty_value)
self.assertEqual(display_value, '1, 2, buckle, my, shoe')
def test_label_for_field(self):
"""
Tests for label_for_field
"""
self.assertEqual(
label_for_field("title", Article),
"title"
)
self.assertEqual(
label_for_field("hist", Article),
"History"
)
self.assertEqual(
label_for_field("hist", Article, return_attr=True),
("History", None)
)
self.assertEqual(
label_for_field("__unicode__", Article),
"article"
)
self.assertEqual(
label_for_field("__str__", Article),
str("article")
)
with self.assertRaises(AttributeError):
label_for_field("unknown", Article)
def test_callable(obj):
return "nothing"
self.assertEqual(
label_for_field(test_callable, Article),
"Test callable"
)
self.assertEqual(
label_for_field(test_callable, Article, return_attr=True),
("Test callable", test_callable)
)
self.assertEqual(
label_for_field("test_from_model", Article),
"Test from model"
)
self.assertEqual(
label_for_field("test_from_model", Article, return_attr=True),
("Test from model", Article.test_from_model)
)
self.assertEqual(
label_for_field("test_from_model_with_override", Article),
"not What you Expect"
)
self.assertEqual(
label_for_field(lambda x: "nothing", Article),
"--"
)
self.assertEqual(label_for_field('site_id', Article), 'Site id')
class MockModelAdmin(object):
def test_from_model(self, obj):
return "nothing"
test_from_model.short_description = "not Really the Model"
self.assertEqual(
label_for_field("test_from_model", Article, model_admin=MockModelAdmin),
"not Really the Model"
)
self.assertEqual(
label_for_field("test_from_model", Article, model_admin=MockModelAdmin, return_attr=True),
("not Really the Model", MockModelAdmin.test_from_model)
)
def test_label_for_property(self):
# NOTE: cannot use @property decorator, because of
# AttributeError: 'property' object has no attribute 'short_description'
class MockModelAdmin(object):
def my_property(self):
return "this if from property"
my_property.short_description = 'property short description'
test_from_property = property(my_property)
self.assertEqual(
label_for_field("test_from_property", Article, model_admin=MockModelAdmin),
'property short description'
)
def test_related_name(self):
"""
Regression test for #13963
"""
self.assertEqual(
label_for_field('location', Event, return_attr=True),
('location', None),
)
self.assertEqual(
label_for_field('event', Location, return_attr=True),
('awesome event', None),
)
self.assertEqual(
label_for_field('guest', Event, return_attr=True),
('awesome guest', None),
)
def test_safestring_in_field_label(self):
# safestring should not be escaped
class MyForm(forms.Form):
text = forms.CharField(label=mark_safe('<i>text</i>'))
cb = forms.BooleanField(label=mark_safe('<i>cb</i>'))
form = MyForm()
self.assertHTMLEqual(helpers.AdminField(form, 'text', is_first=False).label_tag(),
'<label for="id_text" class="required inline"><i>text</i>:</label>')
self.assertHTMLEqual(helpers.AdminField(form, 'cb', is_first=False).label_tag(),
'<label for="id_cb" class="vCheckboxLabel required inline"><i>cb</i></label>')
# normal strings needs to be escaped
class MyForm(forms.Form):
text = forms.CharField(label='&text')
cb = forms.BooleanField(label='&cb')
form = MyForm()
self.assertHTMLEqual(helpers.AdminField(form, 'text', is_first=False).label_tag(),
'<label for="id_text" class="required inline">&text:</label>')
self.assertHTMLEqual(helpers.AdminField(form, 'cb', is_first=False).label_tag(),
'<label for="id_cb" class="vCheckboxLabel required inline">&cb</label>')
def test_flatten(self):
flat_all = ['url', 'title', 'content', 'sites']
inputs = (
((), []),
(('url', 'title', ('content', 'sites')), flat_all),
(('url', 'title', 'content', 'sites'), flat_all),
((('url', 'title'), ('content', 'sites')), flat_all)
)
for orig, expected in inputs:
self.assertEqual(flatten(orig), expected)
def test_flatten_fieldsets(self):
"""
Regression test for #18051
"""
fieldsets = (
(None, {
'fields': ('url', 'title', ('content', 'sites'))
}),
)
self.assertEqual(flatten_fieldsets(fieldsets), ['url', 'title', 'content', 'sites'])
fieldsets = (
(None, {
'fields': ('url', 'title', ['content', 'sites'])
}),
)
self.assertEqual(flatten_fieldsets(fieldsets), ['url', 'title', 'content', 'sites'])
def test_quote(self):
self.assertEqual(quote('something\nor\nother'), 'something_0Aor_0Aother')
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import itertools
import six
from heat.common import exception
from heat.common.i18n import _
class CircularDependencyException(exception.HeatException):
msg_fmt = _("Circular Dependency Found: %(cycle)s")
@six.python_2_unicode_compatible
class Node(object):
'''A node in a dependency graph.'''
def __init__(self, requires=None, required_by=None):
'''
Initialise the node, optionally with a set of keys this node
requires and/or a set of keys that this node is required by.
'''
self.require = requires and requires.copy() or set()
self.satisfy = required_by and required_by.copy() or set()
def copy(self):
'''Return a copy of the node.'''
return Node(self.require, self.satisfy)
def reverse_copy(self):
'''Return a copy of the node with the edge directions reversed.'''
return Node(self.satisfy, self.require)
def required_by(self, source=None):
'''
List the keys that require this node, and optionally add a
new one.
'''
if source is not None:
self.satisfy.add(source)
return iter(self.satisfy)
def requires(self, target=None):
'''
Add a key that this node requires, and optionally add a
new one.
'''
if target is not None:
self.require.add(target)
return iter(self.require)
def __isub__(self, target):
'''Remove a key that this node requires.'''
self.require.remove(target)
return self
def __nonzero__(self):
'''Return True if this node is not a leaf (it requires other nodes).'''
return bool(self.require)
def __bool__(self):
'''Return True if this node is not a leaf (it requires other nodes).'''
return self.__nonzero__()
def stem(self):
'''Return True if this node is a stem (required by nothing).'''
return not bool(self.satisfy)
def disjoint(self):
'''Return True if this node is both a leaf and a stem.'''
return (not self) and self.stem()
def __len__(self):
'''Count the number of keys required by this node.'''
return len(self.require)
def __iter__(self):
'''Iterate over the keys required by this node.'''
return iter(self.require)
def __str__(self):
'''Return a human-readable string representation of the node.'''
text = '{%s}' % ', '.join(str(n) for n in self)
return six.text_type(text)
def __repr__(self):
'''Return a string representation of the node.'''
return repr(self.require)
@six.python_2_unicode_compatible
class Graph(collections.defaultdict):
'''A mutable mapping of objects to nodes in a dependency graph.'''
def __init__(self, *args):
super(Graph, self).__init__(Node, *args)
def map(self, func):
'''
Return a dictionary derived from mapping the supplied function onto
each node in the graph.
'''
return dict((k, func(n)) for k, n in self.items())
def copy(self):
'''Return a copy of the graph.'''
return Graph(self.map(lambda n: n.copy()))
def reverse_copy(self):
'''Return a copy of the graph with the edges reversed.'''
return Graph(self.map(lambda n: n.reverse_copy()))
def edges(self):
'''Return an iterator over all of the edges in the graph.'''
def outgoing_edges(rqr, node):
if node.disjoint():
yield (rqr, None)
else:
for rqd in node:
yield (rqr, rqd)
return itertools.chain.from_iterable(outgoing_edges(*i)
for i in six.iteritems(self))
def __delitem__(self, key):
'''Delete the node given by the specified key from the graph.'''
node = self[key]
for src in node.required_by():
src_node = self[src]
if key in src_node:
src_node -= key
return super(Graph, self).__delitem__(key)
def __str__(self):
'''Convert the graph to a human-readable string.'''
pairs = ('%s: %s' % (str(k), str(v)) for k, v in six.iteritems(self))
text = '{%s}' % ', '.join(pairs)
return six.text_type(text)
@staticmethod
def toposort(graph):
'''
Return a topologically sorted iterator over a dependency graph.
This is a destructive operation for the graph.
'''
for iteration in six.moves.xrange(len(graph)):
for key, node in six.iteritems(graph):
if not node:
yield key
del graph[key]
break
else:
# There are nodes remaining, but none without
# dependencies: a cycle
raise CircularDependencyException(cycle=six.text_type(graph))
@six.python_2_unicode_compatible
class Dependencies(object):
'''Helper class for calculating a dependency graph.'''
def __init__(self, edges=None):
'''
Initialise, optionally with a list of edges, in the form of
(requirer, required) tuples.
'''
edges = edges or []
self._graph = Graph()
for e in edges:
self += e
def __iadd__(self, edge):
'''Add another edge, in the form of a (requirer, required) tuple.'''
requirer, required = edge
if required is None:
# Just ensure the node is created by accessing the defaultdict
self._graph[requirer]
else:
self._graph[required].required_by(requirer)
self._graph[requirer].requires(required)
return self
def required_by(self, last):
'''
List the keys that require the specified node.
'''
if last not in self._graph:
raise KeyError
return self._graph[last].required_by()
def requires(self, target):
'''
List the keys that require the specified node.
'''
if target not in self._graph:
raise KeyError
return self._graph[target].requires()
def __getitem__(self, last):
'''
Return a partial dependency graph consisting of the specified node and
all those that require it only.
'''
if last not in self._graph:
raise KeyError
def get_edges(key):
def requirer_edges(rqr):
# Concatenate the dependency on the current node with the
# recursive generated list
return itertools.chain([(rqr, key)], get_edges(rqr))
# Get the edge list for each node that requires the current node
edge_lists = six.moves.map(requirer_edges,
self._graph[key].required_by())
# Combine the lists into one long list
return itertools.chain.from_iterable(edge_lists)
if self._graph[last].stem():
# Nothing requires this, so just add the node itself
edges = [(last, None)]
else:
edges = get_edges(last)
return Dependencies(edges)
def leaves(self):
'''
Return an iterator over all of the leaf nodes in the graph.
'''
return (requirer for requirer, required in self._graph.items()
if not required)
def roots(self):
'''
Return an iterator over all of the root nodes in the graph.
'''
return (requirer for requirer, required in self.graph(
reverse=True).items() if not required)
def translate(self, transform):
'''
Translate all of the nodes using a transform function.
Returns a new Dependencies object.
'''
def transform_key(key):
return transform(key) if key is not None else None
edges = self._graph.edges()
return type(self)(tuple(map(transform_key, e)) for e in edges)
def __str__(self):
'''
Return a human-readable string representation of the dependency graph
'''
return six.text_type(self._graph)
def __repr__(self):
'''Return a consistent string representation of the object.'''
edge_reprs = list(repr(e) for e in self._graph.edges())
edge_reprs.sort()
text = 'Dependencies([%s])' % ', '.join(edge_reprs)
return text
def graph(self, reverse=False):
'''Return a copy of the underlying dependency graph.'''
if reverse:
return self._graph.reverse_copy()
else:
return self._graph.copy()
def __iter__(self):
'''Return a topologically sorted iterator.'''
return Graph.toposort(self.graph())
def __reversed__(self):
'''Return a reverse topologically sorted iterator.'''
return Graph.toposort(self.graph(reverse=True))
|
|
"""Component to allow running Python scripts."""
import datetime
import glob
import logging
import os
import time
from RestrictedPython import (
compile_restricted_exec,
limited_builtins,
safe_builtins,
utility_builtins,
)
from RestrictedPython.Eval import default_guarded_getitem
from RestrictedPython.Guards import (
full_write_guard,
guarded_iter_unpack_sequence,
guarded_unpack_sequence,
)
import voluptuous as vol
from homeassistant.const import CONF_DESCRIPTION, CONF_NAME, SERVICE_RELOAD
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.service import async_set_service_schema
from homeassistant.loader import bind_hass
from homeassistant.util import raise_if_invalid_filename
import homeassistant.util.dt as dt_util
from homeassistant.util.yaml.loader import load_yaml
_LOGGER = logging.getLogger(__name__)
DOMAIN = "python_script"
FOLDER = "python_scripts"
CONFIG_SCHEMA = vol.Schema({DOMAIN: vol.Schema(dict)}, extra=vol.ALLOW_EXTRA)
ALLOWED_HASS = {"bus", "services", "states"}
ALLOWED_EVENTBUS = {"fire"}
ALLOWED_STATEMACHINE = {
"entity_ids",
"all",
"get",
"is_state",
"is_state_attr",
"remove",
"set",
}
ALLOWED_SERVICEREGISTRY = {"services", "has_service", "call"}
ALLOWED_TIME = {
"sleep",
"strftime",
"strptime",
"gmtime",
"localtime",
"ctime",
"time",
"mktime",
}
ALLOWED_DATETIME = {"date", "time", "datetime", "timedelta", "tzinfo"}
ALLOWED_DT_UTIL = {
"utcnow",
"now",
"as_utc",
"as_timestamp",
"as_local",
"utc_from_timestamp",
"start_of_local_day",
"parse_datetime",
"parse_date",
"get_age",
}
CONF_FIELDS = "fields"
class ScriptError(HomeAssistantError):
"""When a script error occurs."""
def setup(hass, config):
"""Initialize the Python script component."""
path = hass.config.path(FOLDER)
if not os.path.isdir(path):
_LOGGER.warning("Folder %s not found in configuration folder", FOLDER)
return False
discover_scripts(hass)
def reload_scripts_handler(call):
"""Handle reload service calls."""
discover_scripts(hass)
hass.services.register(DOMAIN, SERVICE_RELOAD, reload_scripts_handler)
return True
def discover_scripts(hass):
"""Discover python scripts in folder."""
path = hass.config.path(FOLDER)
if not os.path.isdir(path):
_LOGGER.warning("Folder %s not found in configuration folder", FOLDER)
return False
def python_script_service_handler(call):
"""Handle python script service calls."""
execute_script(hass, call.service, call.data)
existing = hass.services.services.get(DOMAIN, {}).keys()
for existing_service in existing:
if existing_service == SERVICE_RELOAD:
continue
hass.services.remove(DOMAIN, existing_service)
# Load user-provided service descriptions from python_scripts/services.yaml
services_yaml = os.path.join(path, "services.yaml")
if os.path.exists(services_yaml):
services_dict = load_yaml(services_yaml)
else:
services_dict = {}
for fil in glob.iglob(os.path.join(path, "*.py")):
name = os.path.splitext(os.path.basename(fil))[0]
hass.services.register(DOMAIN, name, python_script_service_handler)
service_desc = {
CONF_NAME: services_dict.get(name, {}).get("name", name),
CONF_DESCRIPTION: services_dict.get(name, {}).get("description", ""),
CONF_FIELDS: services_dict.get(name, {}).get("fields", {}),
}
async_set_service_schema(hass, DOMAIN, name, service_desc)
@bind_hass
def execute_script(hass, name, data=None):
"""Execute a script."""
filename = f"{name}.py"
raise_if_invalid_filename(filename)
with open(hass.config.path(FOLDER, filename)) as fil:
source = fil.read()
execute(hass, filename, source, data)
@bind_hass
def execute(hass, filename, source, data=None):
"""Execute Python source."""
compiled = compile_restricted_exec(source, filename=filename)
if compiled.errors:
_LOGGER.error(
"Error loading script %s: %s", filename, ", ".join(compiled.errors)
)
return
if compiled.warnings:
_LOGGER.warning(
"Warning loading script %s: %s", filename, ", ".join(compiled.warnings)
)
def protected_getattr(obj, name, default=None):
"""Restricted method to get attributes."""
if name.startswith("async_"):
raise ScriptError("Not allowed to access async methods")
if (
obj is hass
and name not in ALLOWED_HASS
or obj is hass.bus
and name not in ALLOWED_EVENTBUS
or obj is hass.states
and name not in ALLOWED_STATEMACHINE
or obj is hass.services
and name not in ALLOWED_SERVICEREGISTRY
or obj is dt_util
and name not in ALLOWED_DT_UTIL
or obj is datetime
and name not in ALLOWED_DATETIME
or isinstance(obj, TimeWrapper)
and name not in ALLOWED_TIME
):
raise ScriptError(f"Not allowed to access {obj.__class__.__name__}.{name}")
return getattr(obj, name, default)
extra_builtins = {
"datetime": datetime,
"sorted": sorted,
"time": TimeWrapper(),
"dt_util": dt_util,
"min": min,
"max": max,
"sum": sum,
"any": any,
"all": all,
}
builtins = safe_builtins.copy()
builtins.update(utility_builtins)
builtins.update(limited_builtins)
builtins.update(extra_builtins)
logger = logging.getLogger(f"{__name__}.{filename}")
restricted_globals = {
"__builtins__": builtins,
"_print_": StubPrinter,
"_getattr_": protected_getattr,
"_write_": full_write_guard,
"_getiter_": iter,
"_getitem_": default_guarded_getitem,
"_iter_unpack_sequence_": guarded_iter_unpack_sequence,
"_unpack_sequence_": guarded_unpack_sequence,
"hass": hass,
"data": data or {},
"logger": logger,
}
try:
_LOGGER.info("Executing %s: %s", filename, data)
# pylint: disable=exec-used
exec(compiled.code, restricted_globals)
except ScriptError as err:
logger.error("Error executing script: %s", err)
except Exception as err: # pylint: disable=broad-except
logger.exception("Error executing script: %s", err)
class StubPrinter:
"""Class to handle printing inside scripts."""
def __init__(self, _getattr_):
"""Initialize our printer."""
def _call_print(self, *objects, **kwargs):
"""Print text."""
# pylint: disable=no-self-use
_LOGGER.warning("Don't use print() inside scripts. Use logger.info() instead")
class TimeWrapper:
"""Wrap the time module."""
# Class variable, only going to warn once per Home Assistant run
warned = False
# pylint: disable=no-self-use
def sleep(self, *args, **kwargs):
"""Sleep method that warns once."""
if not TimeWrapper.warned:
TimeWrapper.warned = True
_LOGGER.warning(
"Using time.sleep can reduce the performance of Home Assistant"
)
time.sleep(*args, **kwargs)
def __getattr__(self, attr):
"""Fetch an attribute from Time module."""
attribute = getattr(time, attr)
if callable(attribute):
def wrapper(*args, **kw):
"""Wrap to return callable method if callable."""
return attribute(*args, **kw)
return wrapper
return attribute
|
|
"""
This module provides a pool manager that uses Google App Engine's
`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
Example usage::
from urllib3 import PoolManager
from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
if is_appengine_sandbox():
# AppEngineManager uses AppEngine's URLFetch API behind the scenes
http = AppEngineManager()
else:
# PoolManager uses a socket-level API behind the scenes
http = PoolManager()
r = http.request('GET', 'https://google.com/')
There are `limitations <https://cloud.google.com/appengine/docs/python/\
urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be
the best choice for your application. There are three options for using
urllib3 on Google App Engine:
1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is
cost-effective in many circumstances as long as your usage is within the
limitations.
2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.
Sockets also have `limitations and restrictions
<https://cloud.google.com/appengine/docs/python/sockets/\
#limitations-and-restrictions>`_ and have a lower free quota than URLFetch.
To use sockets, be sure to specify the following in your ``app.yaml``::
env_variables:
GAE_USE_SOCKETS_HTTPLIB : 'true'
3. If you are using `App Engine Flexible
<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard
:class:`PoolManager` without any configuration or special environment variables.
"""
from __future__ import absolute_import
import logging
import os
import warnings
from ..exceptions import (
HTTPError,
HTTPWarning,
MaxRetryError,
ProtocolError,
TimeoutError,
SSLError
)
from ..packages.six import BytesIO
from ..request import RequestMethods
from ..response import HTTPResponse
from ..util.timeout import Timeout
from ..util.retry import Retry
try:
from google.appengine.api import urlfetch
except ImportError:
urlfetch = None
log = logging.getLogger(__name__)
class AppEnginePlatformWarning(HTTPWarning):
pass
class AppEnginePlatformError(HTTPError):
pass
class AppEngineManager(RequestMethods):
"""
Connection manager for Google App Engine sandbox applications.
This manager uses the URLFetch service directly instead of using the
emulated httplib, and is subject to URLFetch limitations as described in
the App Engine documentation `here
<https://cloud.google.com/appengine/docs/python/urlfetch>`_.
Notably it will raise an :class:`AppEnginePlatformError` if:
* URLFetch is not available.
* If you attempt to use this on App Engine Flexible, as full socket
support is available.
* If a request size is more than 10 megabytes.
* If a response size is more than 32 megabtyes.
* If you use an unsupported request method such as OPTIONS.
Beyond those cases, it will raise normal urllib3 errors.
"""
def __init__(self, headers=None, retries=None, validate_certificate=True):
if not urlfetch:
raise AppEnginePlatformError(
"URLFetch is not available in this environment.")
if is_prod_appengine_mvms():
raise AppEnginePlatformError(
"Use normal urllib3.PoolManager instead of AppEngineManager"
"on Managed VMs, as using URLFetch is not necessary in "
"this environment.")
warnings.warn(
"urllib3 is using URLFetch on Google App Engine sandbox instead "
"of sockets. To use sockets directly instead of URLFetch see "
"https://urllib3.readthedocs.io/en/latest/contrib.html.",
AppEnginePlatformWarning)
RequestMethods.__init__(self, headers)
self.validate_certificate = validate_certificate
self.retries = retries or Retry.DEFAULT
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Return False to re-raise any potential exceptions
return False
def urlopen(self, method, url, body=None, headers=None,
retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT,
**response_kw):
retries = self._get_retries(retries, redirect)
try:
response = urlfetch.fetch(
url,
payload=body,
method=method,
headers=headers or {},
allow_truncated=False,
follow_redirects=(
redirect and
retries.redirect != 0 and
retries.total),
deadline=self._get_absolute_timeout(timeout),
validate_certificate=self.validate_certificate,
)
except urlfetch.DeadlineExceededError as e:
raise TimeoutError(self, e)
except urlfetch.InvalidURLError as e:
if 'too large' in str(e):
raise AppEnginePlatformError(
"URLFetch request too large, URLFetch only "
"supports requests up to 10mb in size.", e)
raise ProtocolError(e)
except urlfetch.DownloadError as e:
if 'Too many redirects' in str(e):
raise MaxRetryError(self, url, reason=e)
raise ProtocolError(e)
except urlfetch.ResponseTooLargeError as e:
raise AppEnginePlatformError(
"URLFetch response too large, URLFetch only supports"
"responses up to 32mb in size.", e)
except urlfetch.SSLCertificateError as e:
raise SSLError(e)
except urlfetch.InvalidMethodError as e:
raise AppEnginePlatformError(
"URLFetch does not support method: %s" % method, e)
http_response = self._urlfetch_response_to_http_response(
response, retries=retries, **response_kw)
# Check for redirect response
if (http_response.get_redirect_location() and
retries.raise_on_redirect and redirect):
raise MaxRetryError(self, url, "too many redirects")
# Check if we should retry the HTTP response.
if retries.is_forced_retry(method, status_code=http_response.status):
retries = retries.increment(
method, url, response=http_response, _pool=self)
log.info("Forced retry: %s", url)
retries.sleep()
return self.urlopen(
method, url,
body=body, headers=headers,
retries=retries, redirect=redirect,
timeout=timeout, **response_kw)
return http_response
def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
if is_prod_appengine():
# Production GAE handles deflate encoding automatically, but does
# not remove the encoding header.
content_encoding = urlfetch_resp.headers.get('content-encoding')
if content_encoding == 'deflate':
del urlfetch_resp.headers['content-encoding']
transfer_encoding = urlfetch_resp.headers.get('transfer-encoding')
# We have a full response's content,
# so let's make sure we don't report ourselves as chunked data.
if transfer_encoding == 'chunked':
encodings = transfer_encoding.split(",")
encodings.remove('chunked')
urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings)
return HTTPResponse(
# In order for decoding to work, we must present the content as
# a file-like object.
body=BytesIO(urlfetch_resp.content),
headers=urlfetch_resp.headers,
status=urlfetch_resp.status_code,
**response_kw
)
def _get_absolute_timeout(self, timeout):
if timeout is Timeout.DEFAULT_TIMEOUT:
return None # Defer to URLFetch's default.
if isinstance(timeout, Timeout):
if timeout._read is not None or timeout._connect is not None:
warnings.warn(
"URLFetch does not support granular timeout settings, "
"reverting to total or default URLFetch timeout.",
AppEnginePlatformWarning)
return timeout.total
return timeout
def _get_retries(self, retries, redirect):
if not isinstance(retries, Retry):
retries = Retry.from_int(
retries, redirect=redirect, default=self.retries)
if retries.connect or retries.read or retries.redirect:
warnings.warn(
"URLFetch only supports total retries and does not "
"recognize connect, read, or redirect retry parameters.",
AppEnginePlatformWarning)
return retries
def is_appengine():
return (is_local_appengine() or
is_prod_appengine() or
is_prod_appengine_mvms())
def is_appengine_sandbox():
return is_appengine() and not is_prod_appengine_mvms()
def is_local_appengine():
return ('APPENGINE_RUNTIME' in os.environ and
'Development/' in os.environ['SERVER_SOFTWARE'])
def is_prod_appengine():
return ('APPENGINE_RUNTIME' in os.environ and
'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and
not is_prod_appengine_mvms())
def is_prod_appengine_mvms():
return os.environ.get('GAE_VM', False) == 'true'
|
|
"""Support for the Lovelace UI."""
import logging
import voluptuous as vol
from homeassistant.components import frontend
from homeassistant.config import async_hass_config_yaml, async_process_component_config
from homeassistant.const import CONF_FILENAME, CONF_MODE, CONF_RESOURCES
from homeassistant.core import HomeAssistant, ServiceCall, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import collection, config_validation as cv
from homeassistant.helpers.service import async_register_admin_service
from homeassistant.helpers.typing import ConfigType
from homeassistant.loader import async_get_integration
from . import dashboard, resources, websocket
from .const import (
CONF_ICON,
CONF_REQUIRE_ADMIN,
CONF_SHOW_IN_SIDEBAR,
CONF_TITLE,
CONF_URL_PATH,
DASHBOARD_BASE_CREATE_FIELDS,
DEFAULT_ICON,
DOMAIN,
MODE_STORAGE,
MODE_YAML,
RESOURCE_CREATE_FIELDS,
RESOURCE_RELOAD_SERVICE_SCHEMA,
RESOURCE_SCHEMA,
RESOURCE_UPDATE_FIELDS,
SERVICE_RELOAD_RESOURCES,
STORAGE_DASHBOARD_CREATE_FIELDS,
STORAGE_DASHBOARD_UPDATE_FIELDS,
url_slug,
)
from .system_health import system_health_info # noqa: F401
_LOGGER = logging.getLogger(__name__)
CONF_DASHBOARDS = "dashboards"
YAML_DASHBOARD_SCHEMA = vol.Schema(
{
**DASHBOARD_BASE_CREATE_FIELDS,
vol.Required(CONF_MODE): MODE_YAML,
vol.Required(CONF_FILENAME): cv.path,
}
)
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(DOMAIN, default={}): vol.Schema(
{
vol.Optional(CONF_MODE, default=MODE_STORAGE): vol.All(
vol.Lower, vol.In([MODE_YAML, MODE_STORAGE])
),
vol.Optional(CONF_DASHBOARDS): cv.schema_with_slug_keys(
YAML_DASHBOARD_SCHEMA,
slug_validator=url_slug,
),
vol.Optional(CONF_RESOURCES): [RESOURCE_SCHEMA],
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the Lovelace commands."""
mode = config[DOMAIN][CONF_MODE]
yaml_resources = config[DOMAIN].get(CONF_RESOURCES)
frontend.async_register_built_in_panel(hass, DOMAIN, config={"mode": mode})
async def reload_resources_service_handler(service_call: ServiceCall) -> None:
"""Reload yaml resources."""
try:
conf = await async_hass_config_yaml(hass)
except HomeAssistantError as err:
_LOGGER.error(err)
return
integration = await async_get_integration(hass, DOMAIN)
config = await async_process_component_config(hass, conf, integration)
resource_collection = await create_yaml_resource_col(
hass, config[DOMAIN].get(CONF_RESOURCES)
)
hass.data[DOMAIN]["resources"] = resource_collection
if mode == MODE_YAML:
default_config = dashboard.LovelaceYAML(hass, None, None)
resource_collection = await create_yaml_resource_col(hass, yaml_resources)
async_register_admin_service(
hass,
DOMAIN,
SERVICE_RELOAD_RESOURCES,
reload_resources_service_handler,
schema=RESOURCE_RELOAD_SERVICE_SCHEMA,
)
else:
default_config = dashboard.LovelaceStorage(hass, None)
if yaml_resources is not None:
_LOGGER.warning(
"Lovelace is running in storage mode. Define resources via user interface"
)
resource_collection = resources.ResourceStorageCollection(hass, default_config)
collection.StorageCollectionWebsocket(
resource_collection,
"lovelace/resources",
"resource",
RESOURCE_CREATE_FIELDS,
RESOURCE_UPDATE_FIELDS,
).async_setup(hass, create_list=False)
hass.components.websocket_api.async_register_command(
websocket.websocket_lovelace_config
)
hass.components.websocket_api.async_register_command(
websocket.websocket_lovelace_save_config
)
hass.components.websocket_api.async_register_command(
websocket.websocket_lovelace_delete_config
)
hass.components.websocket_api.async_register_command(
websocket.websocket_lovelace_resources
)
hass.components.websocket_api.async_register_command(
websocket.websocket_lovelace_dashboards
)
hass.data[DOMAIN] = {
# We store a dictionary mapping url_path: config. None is the default.
"dashboards": {None: default_config},
"resources": resource_collection,
"yaml_dashboards": config[DOMAIN].get(CONF_DASHBOARDS, {}),
}
if hass.config.safe_mode:
return True
async def storage_dashboard_changed(change_type, item_id, item):
"""Handle a storage dashboard change."""
url_path = item[CONF_URL_PATH]
if change_type == collection.CHANGE_REMOVED:
frontend.async_remove_panel(hass, url_path)
await hass.data[DOMAIN]["dashboards"].pop(url_path).async_delete()
return
if change_type == collection.CHANGE_ADDED:
existing = hass.data[DOMAIN]["dashboards"].get(url_path)
if existing:
_LOGGER.warning(
"Cannot register panel at %s, it is already defined in %s",
url_path,
existing,
)
return
hass.data[DOMAIN]["dashboards"][url_path] = dashboard.LovelaceStorage(
hass, item
)
update = False
else:
hass.data[DOMAIN]["dashboards"][url_path].config = item
update = True
try:
_register_panel(hass, url_path, MODE_STORAGE, item, update)
except ValueError:
_LOGGER.warning("Failed to %s panel %s from storage", change_type, url_path)
# Process YAML dashboards
for url_path, dashboard_conf in hass.data[DOMAIN]["yaml_dashboards"].items():
# For now always mode=yaml
config = dashboard.LovelaceYAML(hass, url_path, dashboard_conf)
hass.data[DOMAIN]["dashboards"][url_path] = config
try:
_register_panel(hass, url_path, MODE_YAML, dashboard_conf, False)
except ValueError:
_LOGGER.warning("Panel url path %s is not unique", url_path)
# Process storage dashboards
dashboards_collection = dashboard.DashboardsCollection(hass)
dashboards_collection.async_add_listener(storage_dashboard_changed)
await dashboards_collection.async_load()
collection.StorageCollectionWebsocket(
dashboards_collection,
"lovelace/dashboards",
"dashboard",
STORAGE_DASHBOARD_CREATE_FIELDS,
STORAGE_DASHBOARD_UPDATE_FIELDS,
).async_setup(hass, create_list=False)
return True
async def create_yaml_resource_col(hass, yaml_resources):
"""Create yaml resources collection."""
if yaml_resources is None:
default_config = dashboard.LovelaceYAML(hass, None, None)
try:
ll_conf = await default_config.async_load(False)
except HomeAssistantError:
pass
else:
if CONF_RESOURCES in ll_conf:
_LOGGER.warning(
"Resources need to be specified in your configuration.yaml. Please see the docs"
)
yaml_resources = ll_conf[CONF_RESOURCES]
return resources.ResourceYAMLCollection(yaml_resources or [])
@callback
def _register_panel(hass, url_path, mode, config, update):
"""Register a panel."""
kwargs = {
"frontend_url_path": url_path,
"require_admin": config[CONF_REQUIRE_ADMIN],
"config": {"mode": mode},
"update": update,
}
if config[CONF_SHOW_IN_SIDEBAR]:
kwargs["sidebar_title"] = config[CONF_TITLE]
kwargs["sidebar_icon"] = config.get(CONF_ICON, DEFAULT_ICON)
frontend.async_register_built_in_panel(hass, DOMAIN, **kwargs)
|
|
"""
Module for controlling deliveries os samples and projects to GRUS
"""
import glob
import time
import requests
import datetime
import os
import logging
import json
import subprocess
import sys
import re
import shutil
from dateutil.relativedelta import relativedelta
from ngi_pipeline.database.classes import CharonSession
from taca.utils.filesystem import do_copy, create_folder
from taca.utils.config import CONFIG
from taca.utils.statusdb import StatusdbSession, ProjectSummaryConnection
from .deliver import ProjectDeliverer, SampleDeliverer, DelivererInterruptedError
from ..utils.database import DatabaseError
from six.moves import input
logger = logging.getLogger(__name__)
def proceed_or_not(question):
yes = set(['yes', 'y', 'ye'])
no = set(['no', 'n'])
sys.stdout.write("{}".format(question))
while True:
choice = input().lower()
if choice in yes:
return True
elif choice in no:
return False
else:
sys.stdout.write("Please respond with 'yes' or 'no'")
def check_mover_version():
cmd = ['moverinfo', '--version']
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
m = re.search('.* version (\d\.\d\.\d)', output)
if not m:
logger.error("Probelm tring to idenitify mover version. Failed!")
return False
if m.group(1) != "1.0.0":
logger.error("mover version is {}, only allowed version is 1.0.0. Please run module load mover/1.0.0 and retry".format(m.group(1)))
return False
return True #if I am here this is mover/1.0.0 so I am finr
class GrusProjectDeliverer(ProjectDeliverer):
""" This object takes care of delivering project samples to castor's wharf.
"""
def __init__(self, projectid=None, sampleid=None,
pi_email=None, sensitive=True,
hard_stage_only=False, add_user=None,
fcid=None, **kwargs):
super(GrusProjectDeliverer, self).__init__(
projectid,
sampleid,
**kwargs
)
self.stagingpathhard = getattr(self, 'stagingpathhard', None)
if self.stagingpathhard is None:
raise AttributeError("stagingpathhard is required when delivering to GRUS")
self.config_snic = CONFIG.get('snic', None)
if self.config_snic is None:
raise AttributeError("snic confoguration is needed delivering to GRUS (snic_api_url, snic_api_user, snic_api_password")
self.config_statusdb = CONFIG.get('statusdb', None)
if self.config_statusdb is None:
raise AttributeError("statusdb configuration is needed delivering to GRUS (url, username, password, port")
self.orderportal = CONFIG.get('order_portal', None) # do not need to raise exception here, I have already checked for this and monitoring does not need it
if self.orderportal:
self._set_pi_details(pi_email) # set PI email and SNIC id
self._set_other_member_details(add_user, CONFIG.get('add_project_owner', False)) # set SNIC id for other project members
self.sensitive = sensitive
self.hard_stage_only = hard_stage_only
self.fcid = fcid
def get_delivery_status(self, dbentry=None):
""" Returns the delivery status for this sample. If a sampleentry
dict is supplied, it will be used instead of fethcing from database
:params sampleentry: a database sample entry to use instead of
fetching from db
:returns: the delivery status of this sample as a string
"""
dbentry = dbentry or self.db_entry()
if dbentry.get('delivery_token'):
if dbentry.get('delivery_token') not in ['NO-TOKEN', 'not_under_delivery'] :
return 'IN_PROGRESS' #it means that at least some samples are under delivery
if dbentry.get('delivery_status'):
if dbentry.get('delivery_status') == 'DELIVERED':
return 'DELIVERED' #it means that the project has been marked as delivered
if dbentry.get('delivery_projects'):
return 'PARTIAL' #it means that the project underwent a delivery, but not for all the samples
return 'NOT_DELIVERED' #last possible case is that the project is not delivered
def check_mover_delivery_status(self):
""" This function checks is project is under delivery. If so it waits until projects is delivered or a certain threshold is met
"""
#first thing check that we are using mover 1.0.0
if not check_mover_version():
logger.error("Not delivering becouse wrong mover version detected")
return False
charon_status = self.get_delivery_status()
# we don't care if delivery is not in progress
if charon_status != 'IN_PROGRESS':
logger.info("Project {} has no delivery token. Project is not being delivered at the moment".format(self.projectid))
return
# if it's 'IN_PROGRESS', checking moverinfo
delivery_token = self.db_entry().get('delivery_token')
logger.info("Project {} under delivery. Delivery token is {}. Starting monitoring:".format(self.projectid, delivery_token))
delivery_status = 'IN_PROGRESS'
not_monitoring = False
max_delivery_time = relativedelta(days=7)
monitoring_start = datetime.datetime.now()
while ( not not_monitoring ):
try:
cmd = ['moverinfo', '-i', delivery_token]
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
except Exception as e:
logger.error('Cannot get the delivery status for project {}'.format(self.projectid))
# write Traceback to the log file
logger.exception(e)
# we do not raise, but exit(1). Traceback will be written to log.
exit(1)
else:
#Moverinfo output with option -i can be: InProgress, Accepted, Failed,
mover_status = output.split(':')[0]
if mover_status == 'Delivered':
# check the filesystem anyway
if os.path.exists(self.expand_path(self.stagingpathhard)):
logger.error('Delivery {} for project {} delivered done but project folder found in DELIVERY_HARD. Failing delivery.'.format(delivery_token, self.projectid))
delivery_status = 'FAILED'
else:
logger.info("Project {} succefully delivered. Delivery token is {}.".format(self.projectid, delivery_token))
delivery_status = 'DELIVERED'
not_monitoring = True #stop the monitoring, it is either failed or delivered
continue
else:
#check for how long time delivery has been going on
if self.db_entry().get('delivery_started'):
delivery_started = self.db_entry().get('delivery_started')
else:
delivery_started = monitoring_start #the first time I checked the status, not necessarly when it begun
now = datetime.datetime.now()
if now - max_delivery_time > delivery_started:
logger.error('Delivery {} for project {} has been ongoing for more than 48 hours. Check what the f**k is going on. The project status will be reset'.format(delivery_token, self.projectid))
delivery_status = 'FAILED'
not_monitoring = True #stop the monitoring, it is taking too long
continue
if mover_status == 'Accepted':
logger.info("Project {} under delivery. Status for delivery-token {} is : {}".format(self.projectid, delivery_token, mover_status))
elif mover_status == 'Failed':
logger.warn("Project {} under delivery (attention mover returned {}). Status for delivery-token {} is : {}".format(self.projectid, mover_status, delivery_token, mover_status))
elif mover_status == 'InProgress':
#this is an error because it is a new status
logger.info("Project {} under delivery. Status for delivery-token {} is : {}".format(self.projectid, delivery_token, mover_status))
else:
logger.warn("Project {} under delivery. Unexpected status-delivery returned by mover for delivery-token {}: {}".format(self.projectid, delivery_token, mover_status))
time.sleep(900) #sleep for 15 minutes and then check again the status
#I am here only if not_monitoring is True, that is only if mover status was delivered or the delivery is ongoing for more than 48h
if delivery_status == 'DELIVERED' or delivery_status == 'FAILED':
#fetch all samples that were under delivery
in_progress_samples = self.get_samples_from_charon(delivery_status="IN_PROGRESS")
# now update them
for sample_id in in_progress_samples:
try:
sample_deliverer = GrusSampleDeliverer(self.projectid, sample_id)
sample_deliverer.update_delivery_status(status=delivery_status)
except Exception as e:
logger.error('Sample {}: Problems in setting sample status on charon. Error: {}'.format(sample_id, e))
logger.exception(e)
#now reset delivery
self.delete_delivery_token_in_charon()
#now check, if all samples in charon are DELIVERED or are ABORTED as status, then the all projecct is DELIVERED
all_samples_delivered = True
for sample_id in self.get_samples_from_charon(delivery_status=None):
try:
sample_deliverer = GrusSampleDeliverer(self.projectid, sample_id)
if sample_deliverer.get_sample_status() == 'ABORTED':
continue
if sample_deliverer.get_delivery_status() != 'DELIVERED':
all_samples_delivered = False
except Exception as e:
logger.error('Sample {}: Problems in setting sample status on charon. Error: {}'.format(sample_id, e))
logger.exception(e)
if all_samples_delivered:
self.update_delivery_status(status=delivery_status)
def deliver_project(self):
""" Deliver all samples in a project to grus
:returns: True if all samples were delivered successfully, False if
any sample was not properly delivered or ready to be delivered
"""
#first thing check that we are using mover 1.0.0
if not check_mover_version():
logger.error("Not delivering because wrong mover version detected")
return False
# moved this part from constructor, as we can create an object without running the delivery (e.g. to check_delivery_status)
#check if the project directory already exists, if so abort
soft_stagepath = self.expand_path(self.stagingpath)
hard_stagepath = self.expand_path(self.stagingpathhard)
if os.path.exists(hard_stagepath):
logger.error("In {} found already folder {}. No multiple mover deliveries are allowed".format(
hard_stagepath, self.projectid))
raise DelivererInterruptedError("Hard Staged Folder already present")
#check that this project is not under delivery with mover already in this case stop delivery
if self.get_delivery_status() == 'DELIVERED' \
and not self.force:
logger.info("{} has already been delivered. This project will not be delivered again this time.".format(str(self)))
return True
elif self.get_delivery_status() == 'IN_PROGRESS':
logger.error("Project {} is already under delivery. No multiple mover deliveries are allowed".format(
self.projectid))
raise DelivererInterruptedError("Project already under delivery with Mover")
elif self.get_delivery_status() == 'PARTIAL':
logger.warning("{} has already been partially delivered. Please confirm you want to proceed.".format(str(self)))
if proceed_or_not("Do you want to proceed (yes/no): "):
logger.info("{} has already been partially delivered. User confirmed to proceed.".format(str(self)))
else:
logger.error("{} has already been partially delivered. User decided to not proceed.".format(str(self)))
return False
#now check if the sensitive flag has been set in the correct way
question = "This project has been marked as SENSITIVE (option --sensitive). Do you want to proceed with delivery? "
if not self.sensitive:
question = "This project has been marked as NON-SENSITIVE (option --no-sensitive). Do you want to proceed with delivery? "
if proceed_or_not(question):
logger.info("Delivering {} to GRUS with mover. Project marked as SENSITIVE={}".format(str(self), self.sensitive))
else:
logger.error("{} delivery has been aborted. Sensitive level was WRONG.".format(str(self)))
return False
#now start with the real work
status = True
# connect to charon, return list of sample objects that have been staged
try:
samples_to_deliver = self.get_samples_from_charon(delivery_status="STAGED")
except Exception as e:
logger.error("Cannot get samples from Charon. Error says: {}".format(str(e)))
logger.exception(e)
raise e
if len(samples_to_deliver) == 0:
logger.warning('No staged samples found in Charon')
raise AssertionError('No staged samples found in Charon')
# collect other files (not samples) if any to include in the hard staging
misc_to_deliver = [itm for itm in os.listdir(soft_stagepath) if os.path.splitext(itm)[0] not in samples_to_deliver]
question = "\nProject stagepath: {}\nSamples: {}\nMiscellaneous: {}\n\nProceed with delivery ? "
question = question.format(soft_stagepath, ", ".join(samples_to_deliver), ", ".join(misc_to_deliver))
if proceed_or_not(question):
logger.info("Proceeding with delivery of {}".format(str(self)))
#lock the delivery by creating the folder
create_folder(hard_stagepath)
else:
logger.error("Aborting delivery for {}, remove unwanted files and try again".format(str(self)))
return False
hard_staged_samples = []
for sample_id in samples_to_deliver:
try:
sample_deliverer = GrusSampleDeliverer(self.projectid, sample_id)
sample_deliverer.deliver_sample()
except Exception as e:
logger.error('Sample {} has not been hard staged. Error says: {}'.format(sample_id, e))
logger.exception(e)
raise e
else:
hard_staged_samples.append(sample_id)
if len(samples_to_deliver) != len(hard_staged_samples):
# Something unexpected happend, terminate
logger.warning('Not all the samples have been hard staged. Terminating')
raise AssertionError('len(samples_to_deliver) != len(hard_staged_samples): {} != {}'.format(len(samples_to_deliver),
len(hard_staged_samples)))
hard_staged_misc = []
for itm in misc_to_deliver:
src_misc = os.path.join(soft_stagepath, itm)
dst_misc = os.path.join(hard_stagepath, itm)
try:
if os.path.isdir(src_misc):
shutil.copytree(src_misc, dst_misc)
else:
shutil.copy(src_misc, dst_misc)
hard_staged_misc.append(itm)
except Exception as e:
logger.error('Miscellaneous file {} has not been hard staged for project {}. Error says: {}'.format(itm, self.projectid, e))
logger.exception(e)
raise e
if len(misc_to_deliver) != len(hard_staged_misc):
# Something unexpected happend, terminate
logger.warning('Not all the Miscellaneous files have been hard staged for project {}. Terminating'.format(self.projectid))
raise AssertionError('len(misc_to_deliver) != len(hard_staged_misc): {} != {}'.format(len(misc_to_deliver),
len(hard_staged_misc)))
# create a delivery project id
supr_name_of_delivery = ''
try:
delivery_project_info = self._create_delivery_project()
supr_name_of_delivery = delivery_project_info['name']
logger.info("Delivery project for project {} has been created. Delivery IDis {}".format(self.projectid, supr_name_of_delivery))
except Exception as e:
logger.error('Cannot create delivery project. Error says: {}'.format(e))
logger.exception(e)
delivery_token = self.do_delivery(supr_name_of_delivery) # instead of to_outbox
#at this point I have delivery_token and supr_name_of_delivery so I need to update the project fields and the samples fields
if delivery_token:
#memorise the delivery token used to check if project is under delivery
self.save_delivery_token_in_charon(delivery_token)
#memorise the delivery project so I know each NGi project to how many delivery projects it has been sent
self.add_supr_name_delivery_in_charon(supr_name_of_delivery)
self.add_supr_name_delivery_in_statusdb(supr_name_of_delivery)
logger.info("Delivery token for project {}, delivery project {} is {}".format(self.projectid,
supr_name_of_delivery,
delivery_token))
for sample_id in samples_to_deliver:
try:
sample_deliverer = GrusSampleDeliverer(self.projectid, sample_id)
sample_deliverer.save_delivery_token_in_charon(delivery_token)
sample_deliverer.add_supr_name_delivery_in_charon(supr_name_of_delivery)
except Exception as e:
logger.error('Failed in saving sample infomration for sample {}. Error says: {}'.format(sample_id, e))
logger.exception(e)
else:
logger.error('Delivery project for project {} has not been created'.format(self.projectid))
status = False
return status
def deliver_run_folder(self):
'''Hard stages run folder and initiates delivery
'''
#stage the data
dst = self.expand_path(self.stagingpathhard)
path_to_data = self.expand_path(self.datapath)
runfolder_archive = os.path.join(path_to_data, self.fcid + ".tar.gz")
runfolder_md5file = runfolder_archive + ".md5"
question = "This project has been marked as SENSITIVE (option --sensitive). Do you want to proceed with delivery? "
if not self.sensitive:
question = "This project has been marked as NON-SENSITIVE (option --no-sensitive). Do you want to proceed with delivery? "
if proceed_or_not(question):
logger.info("Delivering {} to GRUS with mover. Project marked as SENSITIVE={}".format(str(self), self.sensitive))
else:
logger.error("{} delivery has been aborted. Sensitive level was WRONG.".format(str(self)))
return False
status = True
create_folder(dst)
try:
shutil.copy(runfolder_archive, dst)
shutil.copy(runfolder_md5file, dst)
logger.info("Copying files {} and {} to {}".format(runfolder_archive, runfolder_md5file, dst))
except IOError as e:
logger.error("Unable to copy files to {}. Please check that the files exist and that the filenames match the flowcell ID.".format(dst))
delivery_id = ''
try:
delivery_project_info = self._create_delivery_project()
delivery_id = delivery_project_info['name']
logger.info("Delivery project for project {} has been created. Delivery IDis {}".format(self.projectid, delivery_id))
except Exception as e:
logger.error('Cannot create delivery project. Error says: {}'.format(e))
logger.exception(e)
#invoke mover
delivery_token = self.do_delivery(delivery_id)
if delivery_token:
logger.info("Delivery token for project {}, delivery project {} is {}".format(self.projectid,
delivery_id,
delivery_token))
else:
logger.error('Delivery project for project {} has not been created'.format(self.projectid))
status = False
return status
def save_delivery_token_in_charon(self, delivery_token):
'''Updates delivery_token in Charon at project level
'''
charon_session = CharonSession()
charon_session.project_update(self.projectid, delivery_token=delivery_token)
def delete_delivery_token_in_charon(self):
'''Removes delivery_token from Charon upon successful delivery
'''
charon_session = CharonSession()
charon_session.project_update(self.projectid, delivery_token='NO-TOKEN')
def add_supr_name_delivery_in_charon(self, supr_name_of_delivery):
'''Updates delivery_projects in Charon at project level
'''
charon_session = CharonSession()
try:
#fetch the project
project_charon = charon_session.project_get(self.projectid)
delivery_projects = project_charon['delivery_projects']
if supr_name_of_delivery not in delivery_projects:
delivery_projects.append(supr_name_of_delivery)
charon_session.project_update(self.projectid, delivery_projects=delivery_projects)
logger.info('Charon delivery_projects for project {} updated with value {}'.format(self.projectid, supr_name_of_delivery))
else:
logger.warn('Charon delivery_projects for project {} not updated with value {} because the value was already present'.format(self.projectid, supr_name_of_delivery))
except Exception as e:
logger.error('Failed to update delivery_projects in charon while delivering {}. Error says: {}'.format(self.projectid, e))
logger.exception(e)
def add_supr_name_delivery_in_statusdb(self, supr_name_of_delivery):
'''Updates delivery_projects in StatusDB at project level
'''
save_meta_info = getattr(self, 'save_meta_info', False)
if not save_meta_info:
return
status_db = ProjectSummaryConnection(self.config_statusdb)
project_page = status_db.get_entry(self.projectid, use_id_view=True)
dprojs = []
if 'delivery_projects' in project_page:
dprojs = project_page['delivery_projects']
dprojs.append(supr_name_of_delivery)
project_page['delivery_projects'] = dprojs
try:
status_db.save_db_doc(project_page)
logger.info('Delivery_projects for project {} updated with value {} in statusdb'.format(self.projectid, supr_name_of_delivery))
except Exception as e:
logger.error('Failed to update delivery_projects in statusdb while delivering {}. Error says: {}'.format(self.projectid, e))
logger.exception(e)
def do_delivery(self, supr_name_of_delivery):
# this one returns error : "265 is non-existing at /usr/local/bin/to_outbox line 214". (265 is delivery_project_id, created via api)
# or: id=P6968-ngi-sw-1488209917 Error: receiver 274 does not exist or has expired.
hard_stage = self.expand_path(self.stagingpathhard)
#need to change group to all files
os.chown(hard_stage, -1, 47537)
for root, dirs, files in os.walk(hard_stage):
for dir in dirs:
dir_path = os.path.join(root, dir)
os.chown(dir_path, -1, 47537) #gr_id is the one of ngi2016003
for file in files:
fname = os.path.join(root, file)
os.chown(fname, -1, 47537)
cmd = ['to_outbox', hard_stage, supr_name_of_delivery]
if self.hard_stage_only:
logger.warning("to_mover command not executed, only hard-staging done. Do what you need to do and then run: {}".format(" ".join(cmd)))
return "manually-set-up"
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
except subprocess.CalledProcessError as e:
logger.error('to_outbox failed while delivering {} to {}'.format(hard_stage, supr_name_of_delivery))
logger.exception(e)
delivery_token = output.rstrip()
return delivery_token
def get_samples_from_charon(self, delivery_status='STAGED'):
"""Takes as input a delivery status and return all samples with that delivery status
"""
charon_session = CharonSession()
result = charon_session.project_get_samples(self.projectid)
samples = result.get('samples')
if samples is None:
raise AssertionError('CharonSession returned no results for project {}'.format(self.projectid))
samples_of_interest = []
for sample in samples:
sample_id = sample.get('sampleid')
charon_delivery_status = sample.get('delivery_status')
if charon_delivery_status == delivery_status or delivery_status is None:
samples_of_interest.append(sample_id)
return samples_of_interest
def _create_delivery_project(self):
create_project_url = '{}/ngi_delivery/project/create/'.format(self.config_snic.get('snic_api_url'))
user = self.config_snic.get('snic_api_user')
password = self.config_snic.get('snic_api_password')
supr_date_format = '%Y-%m-%d'
today = datetime.date.today()
three_months_from_now = (today + relativedelta(months=+3))
data = {
'ngi_project_name': self.projectid,
'title': "DELIVERY_{}_{}".format(self.projectid, today.strftime(supr_date_format)),
'pi_id': self.pi_snic_id,
'start_date': today.strftime(supr_date_format),
'end_date': three_months_from_now.strftime(supr_date_format),
'continuation_name': '',
# You can use this field to allocate the size of the delivery
# 'allocated': size_of_delivery,
# This field can be used to add any data you like
'api_opaque_data': '',
'ngi_ready': False,
'ngi_delivery_status': '',
'ngi_sensitive_data': self.sensitive,
'member_ids': self.other_member_snic_ids
}
response = requests.post(create_project_url, data=json.dumps(data), auth=(user, password))
if response.status_code != 200:
raise AssertionError("API returned status code {}. Response: {}. URL: {}".format(response.status_code, response.content, create_project_url))
result = json.loads(response.content)
return result
def _set_pi_details(self, given_pi_email=None):
"""
Set PI email address and PI SNIC ID using PI email
"""
self.pi_email, self.pi_snic_id = (None, None)
# try getting PI email
if given_pi_email:
logger.warning("PI email for project {} specified by user: {}".format(self.projectid, given_pi_email))
self.pi_email = given_pi_email
else:
try:
self.pi_email = self._get_order_detail()['fields']['project_pi_email']
logger.info("PI email for project {} found: {}".format(self.projectid, self.pi_email))
except Exception as e:
logger.error("Cannot fetch pi_email from StatusDB. Error says: {}".format(str(e)))
raise e
# try getting PI SNIC ID
try:
self.pi_snic_id = self._get_user_snic_id(self.pi_email)
logger.info("SNIC PI-id for delivering of project {} is {}".format(self.projectid, self.pi_snic_id))
except Exception as e:
logger.error("Cannot fetch PI SNIC id using snic API. Error says: {}".format(str(e)))
raise e
def _set_other_member_details(self, other_member_emails=[], include_owner=False):
"""
Set other contact details if avilable, this is not mandatory so
the method will not raise error if it could not find any contact
"""
self.other_member_snic_ids = []
# try getting appropriate contact emails
try:
prj_order = self._get_order_detail()
if include_owner:
owner_email = prj_order.get('owner', {}).get('email')
if owner_email and owner_email != self.pi_email and owner_email not in other_member_emails:
other_member_emails.append(owner_email)
binfo_email = prj_order.get('fields', {}).get('project_bx_email')
if binfo_email and binfo_email != self.pi_email and binfo_email not in other_member_emails:
other_member_emails.append(binfo_email)
except (AssertionError, ValueError) as e:
pass # nothing to worry, just move on
if other_member_emails:
logger.info("Other appropriate contacts were found, they will be added to GRUS delivery project: {}".format(", ".join(other_member_emails)))
# try getting snic id for other emails if any
for uemail in other_member_emails:
try:
self.other_member_snic_ids.append(self._get_user_snic_id(uemail))
except:
logger.warning("Was not able to get SNIC id for email {}, so that user will not be included in the GRUS project".format(uemail))
def _get_user_snic_id(self, uemail):
user = self.config_snic.get('snic_api_user')
password = self.config_snic.get('snic_api_password')
get_user_url = '{}/person/search/'.format(self.config_snic.get('snic_api_url'))
params = {'email_i': uemail}
response = requests.get(get_user_url, params=params, auth=(user, password))
if response.status_code != 200:
raise AssertionError("Unexpected code returned when trying to get SNIC id for email: {}. Response was: {}".format(uemail, response.content))
result = json.loads(response.content)
matches = result.get("matches")
if matches is None:
raise AssertionError('The response returned unexpected data')
if len(matches) < 1:
raise AssertionError("There was no hit in SUPR for email: {}".format(uemail))
if len(matches) > 1:
raise AssertionError("There were more than one hit in SUPR for email: {}".format(uemail))
return matches[0].get("id")
def _get_order_detail(self):
status_db = StatusdbSession(self.config_statusdb)
projects_db = status_db.connection['projects']
view = projects_db.view('order_portal/ProjectID_to_PortalID')
rows = view[self.projectid].rows
if len(rows) < 1:
raise AssertionError("Project {} not found in StatusDB".format(self.projectid))
if len(rows) > 1:
raise AssertionError('Project {} has more than one entry in orderportal_db'.format(self.projectid))
portal_id = rows[0].value
#now get the PI email from order portal API
get_project_url = '{}/v1/order/{}'.format(self.orderportal.get('orderportal_api_url'), portal_id)
headers = {'X-OrderPortal-API-key': self.orderportal.get('orderportal_api_token')}
response = requests.get(get_project_url, headers=headers)
if response.status_code != 200:
raise AssertionError("Status code returned when trying to get PI email from project in order portal: {} was not 200. Response was: {}".format(portal_id, response.content))
return json.loads(response.content)
class GrusSampleDeliverer(SampleDeliverer):
"""
A class for handling sample deliveries to castor
"""
def __init__(self, projectid=None, sampleid=None, **kwargs):
super(GrusSampleDeliverer, self).__init__(
projectid,
sampleid,
**kwargs)
def deliver_sample(self, sampleentry=None):
""" Deliver a sample to the destination specified via command line of on Charon.
Will check if the sample has already been delivered and should not
be delivered again or if the sample is not yet ready to be delivered.
Delivers only samples that have been staged.
:params sampleentry: a database sample entry to use for delivery,
be very careful with caching the database entries though since
concurrent processes can update the database at any time
:returns: True if sample was successfully delivered or was previously
delivered, False if sample was not yet ready to be delivered
:raises taca_ngi_pipeline.utils.database.DatabaseError: if an entry corresponding to this
sample could not be found in the database
:raises DelivererReplaceError: if a previous delivery of this sample
has taken place but should be replaced
:raises DelivererError: if the delivery failed
"""
# propagate raised errors upwards, they should trigger notification to operator
# try:
logger.info("Delivering {} to GRUS with MOVER".format(str(self)))
hard_stagepath = self.expand_path(self.stagingpathhard)
soft_stagepath = self.expand_path(self.stagingpath)
try:
logger.info("Trying to deliver {} to GRUS with MOVER".format(str(self)))
hard_stagepath = self.expand_path(self.stagingpathhard)
soft_stagepath = self.expand_path(self.stagingpath)
try:
if self.get_delivery_status(sampleentry) != 'STAGED':
logger.info("{} has not been staged and will not be delivered".format(str(self)))
return False
except DatabaseError as e:
logger.error("error '{}' occurred during delivery of {}".format(str(e), str(self)))
logger.exception(e)
raise(e)
#at this point copywith deferance the softlink folder
self.update_delivery_status(status="IN_PROGRESS")
self.do_delivery()
#in case of faiulure put again the status to STAGED
except DelivererInterruptedError as e:
self.update_delivery_status(status="STAGED")
logger.exception(e)
raise(e)
except Exception as e:
self.update_delivery_status(status="STAGED")
logger.exception(e)
raise(e)
def save_delivery_token_in_charon(self, delivery_token):
'''Updates delivery_token in Charon at sample level
'''
charon_session = CharonSession()
charon_session.sample_update(self.projectid, self.sampleid, delivery_token=delivery_token)
def add_supr_name_delivery_in_charon(self, supr_name_of_delivery):
'''Updates delivery_projects in Charon at project level
'''
charon_session = CharonSession()
try:
#fetch the project
sample_charon = charon_session.sample_get(self.projectid, self.sampleid)
delivery_projects = sample_charon['delivery_projects']
if supr_name_of_delivery not in sample_charon:
delivery_projects.append(supr_name_of_delivery)
charon_session.sample_update(self.projectid, self.sampleid, delivery_projects=delivery_projects)
logger.info('Charon delivery_projects for sample {} updated with value {}'.format(self.sampleid, supr_name_of_delivery))
else:
logger.warn('Charon delivery_projects for sample {} not updated with value {} because the value was already present'.format(self.sampleid, supr_name_of_delivery))
except Exception as e:
logger.error('Failed to update delivery_projects in charon while delivering {}. Error says: {}'.format(self.sampleid, e))
logger.exception(e)
def do_delivery(self):
""" Creating a hard copy of staged data
"""
logger.info("Creating hard copy of sample {}".format(self.sampleid))
# join stage dir with sample dir
source_dir = os.path.join(self.expand_path(self.stagingpath), self.sampleid)
destination_dir = os.path.join(self.expand_path(self.stagingpathhard), self.sampleid)
# destination must NOT exist
do_copy(source_dir, destination_dir)
#now copy md5 and other files
for file in glob.glob("{}.*".format(source_dir)):
shutil.copy(file, self.expand_path(self.stagingpathhard))
logger.info("Sample {} has been hard staged to {}".format(self.sampleid, destination_dir))
return
|
|
from collections import defaultdict
import numpy as np
import param
from bokeh.models import (
CategoricalColorMapper, CustomJS, FactorRange, Range1d, Whisker
)
from bokeh.models.tools import BoxSelectTool
from bokeh.transform import jitter
from ...core.data import Dataset
from ...core.dimension import dimension_name
from ...core.util import (
OrderedDict, dimension_sanitizer, isfinite
)
from ...operation import interpolate_curve
from ...util.transform import dim
from ..mixins import AreaMixin, BarsMixin, SpikesMixin
from ..util import compute_sizes, get_min_distance
from .element import ElementPlot, ColorbarPlot, LegendPlot, OverlayPlot
from .selection import BokehOverlaySelectionDisplay
from .styles import (
expand_batched_style, base_properties, line_properties, fill_properties,
mpl_to_bokeh, rgb2hex
)
from .util import bokeh_version, categorize_array
class PointPlot(LegendPlot, ColorbarPlot):
jitter = param.Number(default=None, bounds=(0, None), doc="""
The amount of jitter to apply to offset the points along the x-axis.""")
selected = param.List(default=None, doc="""
The current selection as a list of integers corresponding
to the selected items.""")
# Deprecated parameters
color_index = param.ClassSelector(default=None, class_=(str, int),
allow_None=True, doc="""
Deprecated in favor of color style mapping, e.g. `color=dim('color')`""")
size_index = param.ClassSelector(default=None, class_=(str, int),
allow_None=True, doc="""
Deprecated in favor of size style mapping, e.g. `size=dim('size')`""")
scaling_method = param.ObjectSelector(default="area",
objects=["width", "area"],
doc="""
Deprecated in favor of size style mapping, e.g.
size=dim('size')**2.""")
scaling_factor = param.Number(default=1, bounds=(0, None), doc="""
Scaling factor which is applied to either the width or area
of each point, depending on the value of `scaling_method`.""")
size_fn = param.Callable(default=np.abs, doc="""
Function applied to size values before applying scaling,
to remove values lower than zero.""")
selection_display = BokehOverlaySelectionDisplay()
style_opts = (['cmap', 'palette', 'marker', 'size', 'angle', 'visible'] +
line_properties + fill_properties)
_plot_methods = dict(single='scatter', batched='scatter')
_batched_style_opts = line_properties + fill_properties + ['size', 'marker', 'angle']
def _get_size_data(self, element, ranges, style):
data, mapping = {}, {}
sdim = element.get_dimension(self.size_index)
ms = style.get('size', np.sqrt(6))
if sdim and ((isinstance(ms, str) and ms in element) or isinstance(ms, dim)):
self.param.warning(
"Cannot declare style mapping for 'size' option and "
"declare a size_index; ignoring the size_index.")
sdim = None
if not sdim or self.static_source:
return data, mapping
map_key = 'size_' + sdim.name
ms = ms**2
sizes = element.dimension_values(self.size_index)
sizes = compute_sizes(sizes, self.size_fn,
self.scaling_factor,
self.scaling_method, ms)
if sizes is None:
eltype = type(element).__name__
self.param.warning(
'%s dimension is not numeric, cannot use to scale %s size.'
% (sdim.pprint_label, eltype))
else:
data[map_key] = np.sqrt(sizes)
mapping['size'] = map_key
return data, mapping
def get_data(self, element, ranges, style):
dims = element.dimensions(label=True)
xidx, yidx = (1, 0) if self.invert_axes else (0, 1)
mapping = dict(x=dims[xidx], y=dims[yidx])
data = {}
if not self.static_source or self.batched:
xdim, ydim = dims[:2]
data[xdim] = element.dimension_values(xdim)
data[ydim] = element.dimension_values(ydim)
self._categorize_data(data, dims[:2], element.dimensions())
cdata, cmapping = self._get_color_data(element, ranges, style)
data.update(cdata)
mapping.update(cmapping)
sdata, smapping = self._get_size_data(element, ranges, style)
data.update(sdata)
mapping.update(smapping)
if 'angle' in style and isinstance(style['angle'], (int, float)):
style['angle'] = np.deg2rad(style['angle'])
if self.jitter:
if self.invert_axes:
mapping['y'] = jitter(dims[yidx], self.jitter,
range=self.handles['y_range'])
else:
mapping['x'] = jitter(dims[xidx], self.jitter,
range=self.handles['x_range'])
self._get_hover_data(data, element)
return data, mapping, style
def get_batched_data(self, element, ranges):
data = defaultdict(list)
zorders = self._updated_zorders(element)
# Angles need special handling since they are tied to the
# marker in certain cases
has_angles = False
for (key, el), zorder in zip(element.data.items(), zorders):
el_opts = self.lookup_options(el, 'plot').options
self.param.set_param(**{k: v for k, v in el_opts.items()
if k not in OverlayPlot._propagate_options})
style = self.lookup_options(element.last, 'style')
style = style.max_cycles(len(self.ordering))[zorder]
eldata, elmapping, style = self.get_data(el, ranges, style)
style = mpl_to_bokeh(style)
for k, eld in eldata.items():
data[k].append(eld)
# Skip if data is empty
if not eldata:
continue
# Apply static styles
nvals = len(list(eldata.values())[0])
sdata, smapping = expand_batched_style(style, self._batched_style_opts,
elmapping, nvals)
if 'angle' in sdata and '__angle' not in data and 'marker' in data:
data['__angle'] = [np.zeros(len(d)) for d in data['marker']]
has_angles = True
elmapping.update(smapping)
for k, v in sorted(sdata.items()):
if k == 'angle':
k = '__angle'
has_angles = True
data[k].append(v)
if has_angles and 'angle' not in sdata:
data['__angle'].append(np.zeros(len(v)))
if 'hover' in self.handles:
for d, k in zip(element.dimensions(), key):
sanitized = dimension_sanitizer(d.name)
data[sanitized].append([k]*nvals)
data = {k: np.concatenate(v) for k, v in data.items()}
if '__angle' in data:
elmapping['angle'] = {'field': '__angle'}
return data, elmapping, style
class VectorFieldPlot(ColorbarPlot):
arrow_heads = param.Boolean(default=True, doc="""
Whether or not to draw arrow heads.""")
magnitude = param.ClassSelector(class_=(str, dim), doc="""
Dimension or dimension value transform that declares the magnitude
of each vector. Magnitude is expected to be scaled between 0-1,
by default the magnitudes are rescaled relative to the minimum
distance between vectors, this can be disabled with the
rescale_lengths option.""")
padding = param.ClassSelector(default=0.05, class_=(int, float, tuple))
pivot = param.ObjectSelector(default='mid', objects=['mid', 'tip', 'tail'],
doc="""
The point around which the arrows should pivot valid options
include 'mid', 'tip' and 'tail'.""")
rescale_lengths = param.Boolean(default=True, doc="""
Whether the lengths will be rescaled to take into account the
smallest non-zero distance between two vectors.""")
# Deprecated parameters
color_index = param.ClassSelector(default=None, class_=(str, int),
allow_None=True, doc="""
Deprecated in favor of dimension value transform on color option,
e.g. `color=dim('Magnitude')`.
""")
size_index = param.ClassSelector(default=None, class_=(str, int),
allow_None=True, doc="""
Deprecated in favor of the magnitude option, e.g.
`magnitude=dim('Magnitude')`.
""")
normalize_lengths = param.Boolean(default=True, doc="""
Deprecated in favor of rescaling length using dimension value
transforms using the magnitude option, e.g.
`dim('Magnitude').norm()`.""")
selection_display = BokehOverlaySelectionDisplay()
style_opts = base_properties + line_properties + ['scale', 'cmap']
_nonvectorized_styles = base_properties + ['scale', 'cmap']
_plot_methods = dict(single='segment')
def _get_lengths(self, element, ranges):
size_dim = element.get_dimension(self.size_index)
mag_dim = self.magnitude
if size_dim and mag_dim:
self.param.warning(
"Cannot declare style mapping for 'magnitude' option "
"and declare a size_index; ignoring the size_index.")
elif size_dim:
mag_dim = size_dim
elif isinstance(mag_dim, str):
mag_dim = element.get_dimension(mag_dim)
(x0, x1), (y0, y1) = (element.range(i) for i in range(2))
if mag_dim:
if isinstance(mag_dim, dim):
magnitudes = mag_dim.apply(element, flat=True)
else:
magnitudes = element.dimension_values(mag_dim)
_, max_magnitude = ranges[dimension_name(mag_dim)]['combined']
if self.normalize_lengths and max_magnitude != 0:
magnitudes = magnitudes / max_magnitude
if self.rescale_lengths:
base_dist = get_min_distance(element)
magnitudes *= base_dist
else:
magnitudes = np.ones(len(element))
if self.rescale_lengths:
base_dist = get_min_distance(element)
magnitudes *= base_dist
return magnitudes
def _glyph_properties(self, *args):
properties = super()._glyph_properties(*args)
properties.pop('scale', None)
return properties
def get_data(self, element, ranges, style):
input_scale = style.pop('scale', 1.0)
# Get x, y, angle, magnitude and color data
rads = element.dimension_values(2)
if self.invert_axes:
xidx, yidx = (1, 0)
rads = np.pi/2 - rads
else:
xidx, yidx = (0, 1)
lens = self._get_lengths(element, ranges)/input_scale
cdim = element.get_dimension(self.color_index)
cdata, cmapping = self._get_color_data(element, ranges, style,
name='line_color')
# Compute segments and arrowheads
xs = element.dimension_values(xidx)
ys = element.dimension_values(yidx)
# Compute offset depending on pivot option
xoffsets = np.cos(rads)*lens/2.
yoffsets = np.sin(rads)*lens/2.
if self.pivot == 'mid':
nxoff, pxoff = xoffsets, xoffsets
nyoff, pyoff = yoffsets, yoffsets
elif self.pivot == 'tip':
nxoff, pxoff = 0, xoffsets*2
nyoff, pyoff = 0, yoffsets*2
elif self.pivot == 'tail':
nxoff, pxoff = xoffsets*2, 0
nyoff, pyoff = yoffsets*2, 0
x0s, x1s = (xs + nxoff, xs - pxoff)
y0s, y1s = (ys + nyoff, ys - pyoff)
color = None
if self.arrow_heads:
arrow_len = (lens/4.)
xa1s = x0s - np.cos(rads+np.pi/4)*arrow_len
ya1s = y0s - np.sin(rads+np.pi/4)*arrow_len
xa2s = x0s - np.cos(rads-np.pi/4)*arrow_len
ya2s = y0s - np.sin(rads-np.pi/4)*arrow_len
x0s = np.tile(x0s, 3)
x1s = np.concatenate([x1s, xa1s, xa2s])
y0s = np.tile(y0s, 3)
y1s = np.concatenate([y1s, ya1s, ya2s])
if cdim and cdim.name in cdata:
color = np.tile(cdata[cdim.name], 3)
elif cdim:
color = cdata.get(cdim.name)
data = {'x0': x0s, 'x1': x1s, 'y0': y0s, 'y1': y1s}
mapping = dict(x0='x0', x1='x1', y0='y0', y1='y1')
if cdim and color is not None:
data[cdim.name] = color
mapping.update(cmapping)
return (data, mapping, style)
class CurvePlot(ElementPlot):
padding = param.ClassSelector(default=(0, 0.1), class_=(int, float, tuple))
interpolation = param.ObjectSelector(objects=['linear', 'steps-mid',
'steps-pre', 'steps-post'],
default='linear', doc="""
Defines how the samples of the Curve are interpolated,
default is 'linear', other options include 'steps-mid',
'steps-pre' and 'steps-post'.""")
selection_display = BokehOverlaySelectionDisplay()
style_opts = base_properties + line_properties
_batched_style_opts = line_properties
_nonvectorized_styles = base_properties + line_properties
_plot_methods = dict(single='line', batched='multi_line')
def get_data(self, element, ranges, style):
xidx, yidx = (1, 0) if self.invert_axes else (0, 1)
x = element.get_dimension(xidx).name
y = element.get_dimension(yidx).name
if self.static_source and not self.batched:
return {}, dict(x=x, y=y), style
if 'steps' in self.interpolation:
element = interpolate_curve(element, interpolation=self.interpolation)
data = {x: element.dimension_values(xidx),
y: element.dimension_values(yidx)}
self._get_hover_data(data, element)
self._categorize_data(data, (x, y), element.dimensions())
return (data, dict(x=x, y=y), style)
def _hover_opts(self, element):
if self.batched:
dims = list(self.hmap.last.kdims)
line_policy = 'prev'
else:
dims = list(self.overlay_dims.keys())+element.dimensions()
line_policy = 'nearest'
return dims, dict(line_policy=line_policy)
def get_batched_data(self, overlay, ranges):
data = defaultdict(list)
zorders = self._updated_zorders(overlay)
for (key, el), zorder in zip(overlay.data.items(), zorders):
el_opts = self.lookup_options(el, 'plot').options
self.param.set_param(**{k: v for k, v in el_opts.items()
if k not in OverlayPlot._propagate_options})
style = self.lookup_options(el, 'style')
style = style.max_cycles(len(self.ordering))[zorder]
eldata, elmapping, style = self.get_data(el, ranges, style)
# Skip if data empty
if not eldata:
continue
for k, eld in eldata.items():
data[k].append(eld)
# Apply static styles
sdata, smapping = expand_batched_style(style, self._batched_style_opts,
elmapping, nvals=1)
elmapping.update(smapping)
for k, v in sdata.items():
data[k].append(v[0])
for d, k in zip(overlay.kdims, key):
sanitized = dimension_sanitizer(d.name)
data[sanitized].append(k)
data = {opt: vals for opt, vals in data.items()
if not any(v is None for v in vals)}
mapping = {{'x': 'xs', 'y': 'ys'}.get(k, k): v
for k, v in elmapping.items()}
return data, mapping, style
class HistogramPlot(ColorbarPlot):
selection_display = BokehOverlaySelectionDisplay(color_prop=['color', 'fill_color'])
style_opts = base_properties + fill_properties + line_properties + ['cmap']
_nonvectorized_styles = base_properties + ['line_dash']
_plot_methods = dict(single='quad')
def get_data(self, element, ranges, style):
if self.invert_axes:
mapping = dict(top='right', bottom='left', left=0, right='top')
else:
mapping = dict(top='top', bottom=0, left='left', right='right')
if self.static_source:
data = dict(top=[], left=[], right=[])
else:
x = element.kdims[0]
values = element.dimension_values(1)
edges = element.interface.coords(element, x, edges=True)
if hasattr(edges, 'compute'):
edges = edges.compute()
data = dict(top=values, left=edges[:-1], right=edges[1:])
self._get_hover_data(data, element)
return (data, mapping, style)
def get_extents(self, element, ranges, range_type='combined'):
ydim = element.get_dimension(1)
s0, s1 = ranges[ydim.name]['soft']
s0 = min(s0, 0) if isfinite(s0) else 0
s1 = max(s1, 0) if isfinite(s1) else 0
ranges[ydim.name]['soft'] = (s0, s1)
return super().get_extents(element, ranges, range_type)
class SideHistogramPlot(HistogramPlot):
style_opts = HistogramPlot.style_opts + ['cmap']
height = param.Integer(default=125, doc="The height of the plot")
width = param.Integer(default=125, doc="The width of the plot")
show_title = param.Boolean(default=False, doc="""
Whether to display the plot title.""")
default_tools = param.List(default=['save', 'pan', 'wheel_zoom',
'box_zoom', 'reset'],
doc="A list of plugin tools to use on the plot.")
_callback = """
color_mapper.low = cb_obj['geometry']['{axis}0'];
color_mapper.high = cb_obj['geometry']['{axis}1'];
source.change.emit()
main_source.change.emit()
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.invert_axes:
self.default_tools.append('ybox_select')
else:
self.default_tools.append('xbox_select')
def get_data(self, element, ranges, style):
data, mapping, style = HistogramPlot.get_data(self, element, ranges, style)
color_dims = [d for d in self.adjoined.traverse(lambda x: x.handles.get('color_dim'))
if d is not None]
dimension = color_dims[0] if color_dims else None
cmapper = self._get_colormapper(dimension, element, {}, {})
if cmapper:
cvals = None
if isinstance(dimension, dim):
if dimension.applies(element):
dim_name = dimension.dimension.name
cvals = [] if self.static_source else dimension.apply(element)
else:
if dimension in element.dimensions():
dim_name = dimension.name
cvals = [] if self.static_source else element.dimension_values(dimension)
if cvals is not None:
data[dim_name] = cvals
mapping['fill_color'] = {'field': dim_name,
'transform': cmapper}
return (data, mapping, style)
def _init_glyph(self, plot, mapping, properties):
"""
Returns a Bokeh glyph object.
"""
ret = super()._init_glyph(plot, mapping, properties)
if not 'field' in mapping.get('fill_color', {}):
return ret
dim = mapping['fill_color']['field']
sources = self.adjoined.traverse(lambda x: (x.handles.get('color_dim'),
x.handles.get('source')))
sources = [src for cdim, src in sources if cdim == dim]
tools = [t for t in self.handles['plot'].tools
if isinstance(t, BoxSelectTool)]
if not tools or not sources:
return
main_source = sources[0]
handles = {'color_mapper': self.handles['color_mapper'],
'source': self.handles['source'],
'cds': self.handles['source'],
'main_source': main_source}
callback = self._callback.format(axis='y' if self.invert_axes else 'x')
self.state.js_on_event("selectiongeometry", CustomJS(args=handles, code=callback))
return ret
class ErrorPlot(ColorbarPlot):
selected = param.List(default=None, doc="""
The current selection as a list of integers corresponding
to the selected items.""")
selection_display = BokehOverlaySelectionDisplay()
style_opts = ([
p for p in line_properties if p.split('_')[0] not in
('hover', 'selection', 'nonselection', 'muted')
] + ['lower_head', 'upper_head'] + base_properties)
_nonvectorized_styles = base_properties + ['line_dash']
_mapping = dict(base="base", upper="upper", lower="lower")
_plot_methods = dict(single=Whisker)
def get_data(self, element, ranges, style):
mapping = dict(self._mapping)
if self.static_source:
return {}, mapping, style
x_idx, y_idx = (1, 0) if element.horizontal else (0, 1)
base = element.dimension_values(x_idx)
mean = element.dimension_values(y_idx)
neg_error = element.dimension_values(2)
pos_idx = 3 if len(element.dimensions()) > 3 else 2
pos_error = element.dimension_values(pos_idx)
lower = mean - neg_error
upper = mean + pos_error
if element.horizontal ^ self.invert_axes:
mapping['dimension'] = 'width'
else:
mapping['dimension'] = 'height'
data = dict(base=base, lower=lower, upper=upper)
self._categorize_data(data, ('base',), element.dimensions())
return (data, mapping, style)
def _init_glyph(self, plot, mapping, properties):
"""
Returns a Bokeh glyph object.
"""
properties = {k: v for k, v in properties.items() if 'legend' not in k}
for prop in ['color', 'alpha']:
if prop not in properties:
continue
pval = properties.pop(prop)
line_prop = 'line_%s' % prop
fill_prop = 'fill_%s' % prop
if line_prop not in properties:
properties[line_prop] = pval
if fill_prop not in properties and fill_prop in self.style_opts:
properties[fill_prop] = pval
properties = mpl_to_bokeh(properties)
plot_method = self._plot_methods['single']
glyph = plot_method(**dict(properties, **mapping))
plot.add_layout(glyph)
return None, glyph
class SpreadPlot(ElementPlot):
padding = param.ClassSelector(default=(0, 0.1), class_=(int, float, tuple))
selection_display = BokehOverlaySelectionDisplay()
style_opts = base_properties + fill_properties + line_properties
_no_op_style = style_opts
_nonvectorized_styles = style_opts
_plot_methods = dict(single='patch')
_stream_data = False # Plot does not support streaming data
def _split_area(self, xs, lower, upper):
"""
Splits area plots at nans and returns x- and y-coordinates for
each area separated by nans.
"""
xnan = np.array([np.datetime64('nat') if xs.dtype.kind == 'M' else np.nan])
ynan = np.array([np.datetime64('nat') if lower.dtype.kind == 'M' else np.nan])
split = np.where(~isfinite(xs) | ~isfinite(lower) | ~isfinite(upper))[0]
xvals = np.split(xs, split)
lower = np.split(lower, split)
upper = np.split(upper, split)
band_x, band_y = [], []
for i, (x, l, u) in enumerate(zip(xvals, lower, upper)):
if i:
x, l, u = x[1:], l[1:], u[1:]
if not len(x):
continue
band_x += [np.append(x, x[::-1]), xnan]
band_y += [np.append(l, u[::-1]), ynan]
if len(band_x):
xs = np.concatenate(band_x[:-1])
ys = np.concatenate(band_y[:-1])
return xs, ys
return [], []
def get_data(self, element, ranges, style):
mapping = dict(x='x', y='y')
xvals = element.dimension_values(0)
mean = element.dimension_values(1)
neg_error = element.dimension_values(2)
pos_idx = 3 if len(element.dimensions()) > 3 else 2
pos_error = element.dimension_values(pos_idx)
lower = mean - neg_error
upper = mean + pos_error
band_x, band_y = self._split_area(xvals, lower, upper)
if self.invert_axes:
data = dict(x=band_y, y=band_x)
else:
data = dict(x=band_x, y=band_y)
return data, mapping, style
class AreaPlot(AreaMixin, SpreadPlot):
padding = param.ClassSelector(default=(0, 0.1), class_=(int, float, tuple))
selection_display = BokehOverlaySelectionDisplay()
_stream_data = False # Plot does not support streaming data
def get_data(self, element, ranges, style):
mapping = dict(x='x', y='y')
xs = element.dimension_values(0)
if len(element.vdims) > 1:
bottom = element.dimension_values(2)
else:
bottom = np.zeros(len(element))
top = element.dimension_values(1)
band_xs, band_ys = self._split_area(xs, bottom, top)
if self.invert_axes:
data = dict(x=band_ys, y=band_xs)
else:
data = dict(x=band_xs, y=band_ys)
return data, mapping, style
class SpikesPlot(SpikesMixin, ColorbarPlot):
spike_length = param.Number(default=0.5, doc="""
The length of each spike if Spikes object is one dimensional.""")
position = param.Number(default=0., doc="""
The position of the lower end of each spike.""")
show_legend = param.Boolean(default=True, doc="""
Whether to show legend for the plot.""")
# Deprecated parameters
color_index = param.ClassSelector(default=None, class_=(str, int),
allow_None=True, doc="""
Deprecated in favor of color style mapping, e.g. `color=dim('color')`""")
selection_display = BokehOverlaySelectionDisplay()
style_opts = base_properties + line_properties + ['cmap', 'palette']
_nonvectorized_styles = base_properties + ['cmap']
_plot_methods = dict(single='segment')
def _get_axis_dims(self, element):
if 'spike_length' in self.lookup_options(element, 'plot').options:
return [element.dimensions()[0], None, None]
return super()._get_axis_dims(element)
def get_data(self, element, ranges, style):
dims = element.dimensions()
data = {}
pos = self.position
opts = self.lookup_options(element, 'plot').options
if len(element) == 0 or self.static_source:
data = {'x': [], 'y0': [], 'y1': []}
else:
data['x'] = element.dimension_values(0)
data['y0'] = np.full(len(element), pos)
if len(dims) > 1 and 'spike_length' not in opts:
data['y1'] = element.dimension_values(1)+pos
else:
data['y1'] = data['y0']+self.spike_length
if self.invert_axes:
mapping = {'x0': 'y0', 'x1': 'y1', 'y0': 'x', 'y1': 'x'}
else:
mapping = {'x0': 'x', 'x1': 'x', 'y0': 'y0', 'y1': 'y1'}
cdata, cmapping = self._get_color_data(element, ranges, dict(style))
data.update(cdata)
mapping.update(cmapping)
self._get_hover_data(data, element)
return data, mapping, style
class SideSpikesPlot(SpikesPlot):
"""
SpikesPlot with useful defaults for plotting adjoined rug plot.
"""
selected = param.List(default=None, doc="""
The current selection as a list of integers corresponding
to the selected items.""")
xaxis = param.ObjectSelector(default='top-bare',
objects=['top', 'bottom', 'bare', 'top-bare',
'bottom-bare', None], doc="""
Whether and where to display the xaxis, bare options allow suppressing
all axis labels including ticks and xlabel. Valid options are 'top',
'bottom', 'bare', 'top-bare' and 'bottom-bare'.""")
yaxis = param.ObjectSelector(default='right-bare',
objects=['left', 'right', 'bare', 'left-bare',
'right-bare', None], doc="""
Whether and where to display the yaxis, bare options allow suppressing
all axis labels including ticks and ylabel. Valid options are 'left',
'right', 'bare' 'left-bare' and 'right-bare'.""")
border = param.Integer(default=5, doc="Default borders on plot")
height = param.Integer(default=50, doc="Height of plot")
width = param.Integer(default=50, doc="Width of plot")
class BarPlot(BarsMixin, ColorbarPlot, LegendPlot):
"""
BarPlot allows generating single- or multi-category
bar Charts, by selecting which key dimensions are
mapped onto separate groups, categories and stacks.
"""
multi_level = param.Boolean(default=True, doc="""
Whether the Bars should be grouped into a second categorical axis level.""")
stacked = param.Boolean(default=False, doc="""
Whether the bars should be stacked or grouped.""")
# Deprecated parameters
color_index = param.ClassSelector(default=None, class_=(str, int),
allow_None=True, doc="""
Deprecated in favor of color style mapping, e.g. `color=dim('color')`""")
selection_display = BokehOverlaySelectionDisplay()
style_opts = (base_properties + fill_properties + line_properties +
['bar_width', 'cmap'])
_nonvectorized_styles = base_properties + ['bar_width', 'cmap']
_plot_methods = dict(single=('vbar', 'hbar'))
# Declare that y-range should auto-range if not bounded
_x_range_type = FactorRange
_y_range_type = Range1d
def _axis_properties(self, axis, key, plot, dimension=None,
ax_mapping={'x': 0, 'y': 1}):
props = super()._axis_properties(axis, key, plot, dimension, ax_mapping)
if (not self.multi_level and not self.stacked and self.current_frame.ndims > 1 and
((not self.invert_axes and axis == 'x') or (self.invert_axes and axis =='y'))):
props['separator_line_width'] = 0
props['major_tick_line_alpha'] = 0
props['major_label_text_font_size'] = '0pt'
props['group_text_color'] = 'black'
props['group_text_font_style'] = "normal"
if axis == 'x':
props['group_text_align'] = "center"
if 'major_label_orientation' in props:
props['group_label_orientation'] = props.pop('major_label_orientation')
elif axis == 'y':
props['group_label_orientation'] = 0
props['group_text_align'] = 'right'
props['group_text_baseline'] = 'middle'
return props
def _get_axis_dims(self, element):
if element.ndims > 1 and not (self.stacked or not self.multi_level):
xdims = element.kdims
else:
xdims = element.kdims[0]
return (xdims, element.vdims[0])
def _get_factors(self, element, ranges):
xvals, gvals = self._get_coords(element, ranges)
if gvals is not None:
xvals = [(x, g) for x in xvals for g in gvals]
return ([], xvals) if self.invert_axes else (xvals, [])
def get_stack(self, xvals, yvals, baselines, sign='positive'):
"""
Iterates over a x- and y-values in a stack layer
and appropriately offsets the layer on top of the
previous layer.
"""
bottoms, tops = [], []
for x, y in zip(xvals, yvals):
baseline = baselines[x][sign]
if sign == 'positive':
bottom = baseline
top = bottom+y
baseline = top
else:
top = baseline
bottom = top+y
baseline = bottom
baselines[x][sign] = baseline
bottoms.append(bottom)
tops.append(top)
return bottoms, tops
def _glyph_properties(self, *args, **kwargs):
props = super()._glyph_properties(*args, **kwargs)
return {k: v for k, v in props.items() if k not in ['width', 'bar_width']}
def _add_color_data(self, ds, ranges, style, cdim, data, mapping, factors, colors):
cdata, cmapping = self._get_color_data(ds, ranges, dict(style),
factors=factors, colors=colors)
if 'color' not in cmapping:
return
# Enable legend if colormapper is categorical
cmapper = cmapping['color']['transform']
legend_prop = 'legend_field' if bokeh_version >= '1.3.5' else 'legend'
if ('color' in cmapping and self.show_legend and
isinstance(cmapper, CategoricalColorMapper)):
mapping[legend_prop] = cdim.name
if not self.stacked and ds.ndims > 1 and self.multi_level:
cmapping.pop(legend_prop, None)
mapping.pop(legend_prop, None)
# Merge data and mappings
mapping.update(cmapping)
for k, cd in cdata.items():
if isinstance(cmapper, CategoricalColorMapper) and cd.dtype.kind in 'uif':
cd = categorize_array(cd, cdim)
if k not in data or len(data[k]) != [len(data[key]) for key in data if key != k][0]:
data[k].append(cd)
else:
data[k][-1] = cd
def get_data(self, element, ranges, style):
# Get x, y, group, stack and color dimensions
group_dim, stack_dim = None, None
if element.ndims == 1:
grouping = None
elif self.stacked:
grouping = 'stacked'
stack_dim = element.get_dimension(1)
if stack_dim.values:
stack_order = stack_dim.values
elif stack_dim in ranges and ranges[stack_dim.name].get('factors'):
stack_order = ranges[stack_dim]['factors']
else:
stack_order = element.dimension_values(1, False)
stack_order = list(stack_order)
else:
grouping = 'grouped'
group_dim = element.get_dimension(1)
xdim = element.get_dimension(0)
ydim = element.vdims[0]
no_cidx = self.color_index is None
color_index = (group_dim or stack_dim) if no_cidx else self.color_index
color_dim = element.get_dimension(color_index)
if color_dim:
self.color_index = color_dim.name
# Define style information
width = style.get('bar_width', style.get('width', 1))
if 'width' in style:
self.param.warning("BarPlot width option is deprecated "
"use 'bar_width' instead.")
cmap = style.get('cmap')
hover = 'hover' in self.handles
# Group by stack or group dim if necessary
if group_dim is None:
grouped = {0: element}
else:
grouped = element.groupby(group_dim, group_type=Dataset,
container_type=OrderedDict,
datatype=['dataframe', 'dictionary'])
y0, y1 = ranges.get(ydim.name, {'combined': (None, None)})['combined']
if self.logy:
bottom = (ydim.range[0] or (10**(np.log10(y1)-2)) if y1 else 0.01)
else:
bottom = 0
# Map attributes to data
if grouping == 'stacked':
mapping = {'x': xdim.name, 'top': 'top',
'bottom': 'bottom', 'width': width}
elif grouping == 'grouped':
mapping = {'x': 'xoffsets', 'top': ydim.name, 'bottom': bottom,
'width': width}
else:
mapping = {'x': xdim.name, 'top': ydim.name, 'bottom': bottom, 'width': width}
# Get colors
cdim = color_dim or group_dim
style_mapping = [v for k, v in style.items() if 'color' in k and
(isinstance(v, dim) or v in element)]
if style_mapping and not no_cidx and self.color_index is not None:
self.param.warning("Cannot declare style mapping for '%s' option "
"and declare a color_index; ignoring the color_index."
% style_mapping[0])
cdim = None
cvals = element.dimension_values(cdim, expanded=False) if cdim else None
if cvals is not None:
if cvals.dtype.kind in 'uif' and no_cidx:
cvals = categorize_array(cvals, color_dim)
factors = None if cvals.dtype.kind in 'uif' else list(cvals)
if cdim is xdim and factors:
factors = list(categorize_array(factors, xdim))
if cmap is None and factors:
styles = self.style.max_cycles(len(factors))
colors = [styles[i]['color'] for i in range(len(factors))]
colors = [rgb2hex(c) if isinstance(c, tuple) else c for c in colors]
else:
colors = None
else:
factors, colors = None, None
# Iterate over stacks and groups and accumulate data
data = defaultdict(list)
baselines = defaultdict(lambda: {'positive': bottom, 'negative': 0})
for i, (k, ds) in enumerate(grouped.items()):
k = k[0] if isinstance(k, tuple) else k
if group_dim:
gval = k if isinstance(k, str) else group_dim.pprint_value(k)
# Apply stacking or grouping
if grouping == 'stacked':
for sign, slc in [('negative', (None, 0)), ('positive', (0, None))]:
slc_ds = ds.select(**{ds.vdims[0].name: slc})
stack_inds = [stack_order.index(v) if v in stack_order else -1
for v in slc_ds[stack_dim.name]]
slc_ds = slc_ds.add_dimension('_stack_order', 0, stack_inds).sort('_stack_order')
xs = slc_ds.dimension_values(xdim)
ys = slc_ds.dimension_values(ydim)
bs, ts = self.get_stack(xs, ys, baselines, sign)
data['bottom'].append(bs)
data['top'].append(ts)
data[xdim.name].append(xs)
data[stack_dim.name].append(slc_ds.dimension_values(stack_dim))
if hover:
data[ydim.name].append(ys)
for vd in slc_ds.vdims[1:]:
data[vd.name].append(slc_ds.dimension_values(vd))
if not style_mapping:
self._add_color_data(slc_ds, ranges, style, cdim, data,
mapping, factors, colors)
elif grouping == 'grouped':
xs = ds.dimension_values(xdim)
ys = ds.dimension_values(ydim)
xoffsets = [(x if xs.dtype.kind in 'SU' else xdim.pprint_value(x), gval)
for x in xs]
data['xoffsets'].append(xoffsets)
data[ydim.name].append(ys)
if hover: data[xdim.name].append(xs)
if group_dim not in ds.dimensions():
ds = ds.add_dimension(group_dim, ds.ndims, gval)
data[group_dim.name].append(ds.dimension_values(group_dim))
else:
data[xdim.name].append(ds.dimension_values(xdim))
data[ydim.name].append(ds.dimension_values(ydim))
if hover and grouping != 'stacked':
for vd in ds.vdims[1:]:
data[vd.name].append(ds.dimension_values(vd))
if grouping != 'stacked' and not style_mapping:
self._add_color_data(ds, ranges, style, cdim, data,
mapping, factors, colors)
# Concatenate the stacks or groups
sanitized_data = {}
for col, vals in data.items():
if len(vals) == 1:
sanitized_data[dimension_sanitizer(col)] = vals[0]
elif vals:
sanitized_data[dimension_sanitizer(col)] = np.concatenate(vals)
for name, val in mapping.items():
sanitized = None
if isinstance(val, str):
sanitized = dimension_sanitizer(mapping[name])
mapping[name] = sanitized
elif isinstance(val, dict) and 'field' in val:
sanitized = dimension_sanitizer(val['field'])
val['field'] = sanitized
if sanitized is not None and sanitized not in sanitized_data:
sanitized_data[sanitized] = []
# Ensure x-values are categorical
xname = dimension_sanitizer(xdim.name)
if xname in sanitized_data:
sanitized_data[xname] = categorize_array(sanitized_data[xname], xdim)
# If axes inverted change mapping to match hbar signature
if self.invert_axes:
mapping.update({'y': mapping.pop('x'), 'left': mapping.pop('bottom'),
'right': mapping.pop('top'), 'height': mapping.pop('width')})
return sanitized_data, mapping, style
|
|
"""
Support for MQTT discovery.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/mqtt/#discovery
"""
import asyncio
import json
import logging
import re
from homeassistant.components import mqtt
from homeassistant.components.mqtt import ATTR_DISCOVERY_HASH, CONF_STATE_TOPIC
from homeassistant.const import CONF_DEVICE, CONF_PLATFORM
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.typing import HomeAssistantType
_LOGGER = logging.getLogger(__name__)
TOPIC_MATCHER = re.compile(
r'(?P<prefix_topic>\w+)/(?P<component>\w+)/'
r'(?:(?P<node_id>[a-zA-Z0-9_-]+)/)?(?P<object_id>[a-zA-Z0-9_-]+)/config')
SUPPORTED_COMPONENTS = [
'binary_sensor', 'camera', 'cover', 'fan',
'light', 'sensor', 'switch', 'lock', 'climate',
'alarm_control_panel', 'vacuum']
CONFIG_ENTRY_COMPONENTS = [
'binary_sensor',
'camera',
'cover',
'light',
'lock',
'sensor',
'switch',
'climate',
'alarm_control_panel',
'fan',
'vacuum',
]
DEPRECATED_PLATFORM_TO_SCHEMA = {
'light': {
'mqtt_json': 'json',
'mqtt_template': 'template',
}
}
ALREADY_DISCOVERED = 'mqtt_discovered_components'
DATA_CONFIG_ENTRY_LOCK = 'mqtt_config_entry_lock'
CONFIG_ENTRY_IS_SETUP = 'mqtt_config_entry_is_setup'
MQTT_DISCOVERY_UPDATED = 'mqtt_discovery_updated_{}'
MQTT_DISCOVERY_NEW = 'mqtt_discovery_new_{}_{}'
TOPIC_BASE = '~'
ABBREVIATIONS = {
'aux_cmd_t': 'aux_command_topic',
'aux_stat_tpl': 'aux_state_template',
'aux_stat_t': 'aux_state_topic',
'avty_t': 'availability_topic',
'away_mode_cmd_t': 'away_mode_command_topic',
'away_mode_stat_tpl': 'away_mode_state_template',
'away_mode_stat_t': 'away_mode_state_topic',
'bri_cmd_t': 'brightness_command_topic',
'bri_scl': 'brightness_scale',
'bri_stat_t': 'brightness_state_topic',
'bri_val_tpl': 'brightness_value_template',
'clr_temp_cmd_tpl': 'color_temp_command_template',
'bat_lev_t': 'battery_level_topic',
'bat_lev_tpl': 'battery_level_template',
'chrg_t': 'charging_topic',
'chrg_tpl': 'charging_template',
'clr_temp_cmd_t': 'color_temp_command_topic',
'clr_temp_stat_t': 'color_temp_state_topic',
'clr_temp_val_tpl': 'color_temp_value_template',
'cln_t': 'cleaning_topic',
'cln_tpl': 'cleaning_template',
'cmd_t': 'command_topic',
'curr_temp_t': 'current_temperature_topic',
'dev': 'device',
'dev_cla': 'device_class',
'dock_t': 'docked_topic',
'dock_tpl': 'docked_template',
'err_t': 'error_topic',
'err_tpl': 'error_template',
'fanspd_t': 'fan_speed_topic',
'fanspd_tpl': 'fan_speed_template',
'fanspd_lst': 'fan_speed_list',
'fx_cmd_t': 'effect_command_topic',
'fx_list': 'effect_list',
'fx_stat_t': 'effect_state_topic',
'fx_val_tpl': 'effect_value_template',
'exp_aft': 'expire_after',
'fan_mode_cmd_t': 'fan_mode_command_topic',
'fan_mode_stat_tpl': 'fan_mode_state_template',
'fan_mode_stat_t': 'fan_mode_state_topic',
'frc_upd': 'force_update',
'hold_cmd_t': 'hold_command_topic',
'hold_stat_tpl': 'hold_state_template',
'hold_stat_t': 'hold_state_topic',
'ic': 'icon',
'init': 'initial',
'json_attr': 'json_attributes',
'json_attr_t': 'json_attributes_topic',
'max_temp': 'max_temp',
'min_temp': 'min_temp',
'mode_cmd_t': 'mode_command_topic',
'mode_stat_tpl': 'mode_state_template',
'mode_stat_t': 'mode_state_topic',
'name': 'name',
'on_cmd_type': 'on_command_type',
'opt': 'optimistic',
'osc_cmd_t': 'oscillation_command_topic',
'osc_stat_t': 'oscillation_state_topic',
'osc_val_tpl': 'oscillation_value_template',
'pl_arm_away': 'payload_arm_away',
'pl_arm_home': 'payload_arm_home',
'pl_avail': 'payload_available',
'pl_cls': 'payload_close',
'pl_disarm': 'payload_disarm',
'pl_hi_spd': 'payload_high_speed',
'pl_lock': 'payload_lock',
'pl_lo_spd': 'payload_low_speed',
'pl_med_spd': 'payload_medium_speed',
'pl_not_avail': 'payload_not_available',
'pl_off': 'payload_off',
'pl_on': 'payload_on',
'pl_open': 'payload_open',
'pl_osc_off': 'payload_oscillation_off',
'pl_osc_on': 'payload_oscillation_on',
'pl_stop': 'payload_stop',
'pl_unlk': 'payload_unlock',
'pow_cmd_t': 'power_command_topic',
'ret': 'retain',
'rgb_cmd_tpl': 'rgb_command_template',
'rgb_cmd_t': 'rgb_command_topic',
'rgb_stat_t': 'rgb_state_topic',
'rgb_val_tpl': 'rgb_value_template',
'send_cmd_t': 'send_command_topic',
'send_if_off': 'send_if_off',
'set_pos_tpl': 'set_position_template',
'set_pos_t': 'set_position_topic',
'spd_cmd_t': 'speed_command_topic',
'spd_stat_t': 'speed_state_topic',
'spd_val_tpl': 'speed_value_template',
'spds': 'speeds',
'stat_clsd': 'state_closed',
'stat_off': 'state_off',
'stat_on': 'state_on',
'stat_open': 'state_open',
'stat_t': 'state_topic',
'stat_val_tpl': 'state_value_template',
'sup_feat': 'supported_features',
'swing_mode_cmd_t': 'swing_mode_command_topic',
'swing_mode_stat_tpl': 'swing_mode_state_template',
'swing_mode_stat_t': 'swing_mode_state_topic',
'temp_cmd_t': 'temperature_command_topic',
'temp_stat_tpl': 'temperature_state_template',
'temp_stat_t': 'temperature_state_topic',
'tilt_clsd_val': 'tilt_closed_value',
'tilt_cmd_t': 'tilt_command_topic',
'tilt_inv_stat': 'tilt_invert_state',
'tilt_max': 'tilt_max',
'tilt_min': 'tilt_min',
'tilt_opnd_val': 'tilt_opened_value',
'tilt_status_opt': 'tilt_status_optimistic',
'tilt_status_t': 'tilt_status_topic',
't': 'topic',
'uniq_id': 'unique_id',
'unit_of_meas': 'unit_of_measurement',
'val_tpl': 'value_template',
'whit_val_cmd_t': 'white_value_command_topic',
'whit_val_scl': 'white_value_scale',
'whit_val_stat_t': 'white_value_state_topic',
'whit_val_tpl': 'white_value_template',
'xy_cmd_t': 'xy_command_topic',
'xy_stat_t': 'xy_state_topic',
'xy_val_tpl': 'xy_value_template',
}
DEVICE_ABBREVIATIONS = {
'cns': 'connections',
'ids': 'identifiers',
'name': 'name',
'mf': 'manufacturer',
'mdl': 'model',
'sw': 'sw_version',
}
def clear_discovery_hash(hass, discovery_hash):
"""Clear entry in ALREADY_DISCOVERED list."""
del hass.data[ALREADY_DISCOVERED][discovery_hash]
async def async_start(hass: HomeAssistantType, discovery_topic, hass_config,
config_entry=None) -> bool:
"""Initialize of MQTT Discovery."""
async def async_device_message_received(topic, payload, qos):
"""Process the received message."""
match = TOPIC_MATCHER.match(topic)
if not match:
return
_prefix_topic, component, node_id, object_id = match.groups()
if component not in SUPPORTED_COMPONENTS:
_LOGGER.warning("Component %s is not supported", component)
return
if payload:
try:
payload = json.loads(payload)
except ValueError:
_LOGGER.warning("Unable to parse JSON %s: '%s'",
object_id, payload)
return
payload = dict(payload)
for key in list(payload.keys()):
abbreviated_key = key
key = ABBREVIATIONS.get(key, key)
payload[key] = payload.pop(abbreviated_key)
if CONF_DEVICE in payload:
device = payload[CONF_DEVICE]
for key in list(device.keys()):
abbreviated_key = key
key = DEVICE_ABBREVIATIONS.get(key, key)
device[key] = device.pop(abbreviated_key)
if TOPIC_BASE in payload:
base = payload.pop(TOPIC_BASE)
for key, value in payload.items():
if isinstance(value, str) and value:
if value[0] == TOPIC_BASE and key.endswith('_topic'):
payload[key] = "{}{}".format(base, value[1:])
if value[-1] == TOPIC_BASE and key.endswith('_topic'):
payload[key] = "{}{}".format(value[:-1], base)
# If present, the node_id will be included in the discovered object id
discovery_id = ' '.join((node_id, object_id)) if node_id else object_id
discovery_hash = (component, discovery_id)
if payload:
if CONF_PLATFORM in payload and 'schema' not in payload:
platform = payload[CONF_PLATFORM]
if (component in DEPRECATED_PLATFORM_TO_SCHEMA and
platform in DEPRECATED_PLATFORM_TO_SCHEMA[component]):
schema = DEPRECATED_PLATFORM_TO_SCHEMA[component][platform]
payload['schema'] = schema
_LOGGER.warning('"platform": "%s" is deprecated, '
'replace with "schema":"%s"',
platform, schema)
payload[CONF_PLATFORM] = 'mqtt'
if CONF_STATE_TOPIC not in payload:
payload[CONF_STATE_TOPIC] = '{}/{}/{}{}/state'.format(
discovery_topic, component,
'%s/' % node_id if node_id else '', object_id)
payload[ATTR_DISCOVERY_HASH] = discovery_hash
if ALREADY_DISCOVERED not in hass.data:
hass.data[ALREADY_DISCOVERED] = {}
if discovery_hash in hass.data[ALREADY_DISCOVERED]:
# Dispatch update
_LOGGER.info(
"Component has already been discovered: %s %s, sending update",
component, discovery_id)
async_dispatcher_send(
hass, MQTT_DISCOVERY_UPDATED.format(discovery_hash), payload)
elif payload:
# Add component
_LOGGER.info("Found new component: %s %s", component, discovery_id)
hass.data[ALREADY_DISCOVERED][discovery_hash] = None
if component not in CONFIG_ENTRY_COMPONENTS:
await async_load_platform(
hass, component, 'mqtt', payload, hass_config)
return
config_entries_key = '{}.{}'.format(component, 'mqtt')
async with hass.data[DATA_CONFIG_ENTRY_LOCK]:
if config_entries_key not in hass.data[CONFIG_ENTRY_IS_SETUP]:
await hass.config_entries.async_forward_entry_setup(
config_entry, component)
hass.data[CONFIG_ENTRY_IS_SETUP].add(config_entries_key)
async_dispatcher_send(hass, MQTT_DISCOVERY_NEW.format(
component, 'mqtt'), payload)
hass.data[DATA_CONFIG_ENTRY_LOCK] = asyncio.Lock()
hass.data[CONFIG_ENTRY_IS_SETUP] = set()
await mqtt.async_subscribe(
hass, discovery_topic + '/#', async_device_message_received, 0)
return True
|
|
from __future__ import print_function
__author__ = "shekkizh"
import tensorflow as tf
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth=True
import numpy as np
import os, sys, inspect
import time
utils_folder = os.path.realpath(
os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..")))
if utils_folder not in sys.path:
sys.path.insert(0, utils_folder)
import utils as utils
import Dataset_Reader.read_celebADataset as celebA
from six.moves import xrange
class GAN(object):
def __init__(self, z_dim, crop_image_size, resized_image_size, batch_size, data_dir):
local_dataset = celebA.read_local_data(data_dir)
self.z_dim = z_dim
self.crop_image_size = crop_image_size
self.resized_image_size = resized_image_size
self.batch_size = batch_size
filename_queue = tf.train.string_input_producer(local_dataset['train'])
self.images = self._read_input_queue(filename_queue)
def _read_input(self, filename_queue):
class DataRecord(object):
pass
reader = tf.WholeFileReader()
key, value = reader.read(filename_queue)
record = DataRecord()
decoded_image = tf.image.decode_jpeg(value,
channels=3) # Assumption:Color images are read and are to be generated
# decoded_image_4d = tf.expand_dims(decoded_image, 0)
# resized_image = tf.image.resize_bilinear(decoded_image_4d, [self.target_image_size, self.target_image_size])
# record.input_image = tf.squeeze(resized_image, squeeze_dims=[0])
decoded_image_4d = tf.expand_dims(decoded_image, 0)
resized_image = tf.image.resize_bilinear(decoded_image_4d, [self.resized_image_size, self.resized_image_size])
record.input_image = tf.squeeze(resized_image, squeeze_dims=[0])
return record
def _read_input_queue(self, filename_queue):
print("Setting up image reader...")
read_input = self._read_input(filename_queue)
num_preprocess_threads = 4
num_examples_per_epoch = 800
min_queue_examples = int(0.1 * num_examples_per_epoch)
print("Shuffling")
input_image = tf.train.batch([read_input.input_image],
batch_size=self.batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 2 * self.batch_size
)
input_image = utils.process_image(input_image, 127.5, 127.5)
return input_image
def _generator(self, z, dims, train_phase, activation=tf.nn.relu, scope_name="generator"):
N = len(dims)
image_size = self.resized_image_size // (2 ** (N - 1))
with tf.variable_scope(scope_name) as scope:
W_z = utils.weight_variable([self.z_dim, dims[0] * image_size * image_size], name="W_z")
b_z = utils.bias_variable([dims[0] * image_size * image_size], name="b_z")
h_z = tf.matmul(z, W_z) + b_z
h_z = tf.reshape(h_z, [-1, image_size, image_size, dims[0]])
h_bnz = utils.batch_norm(h_z, dims[0], train_phase, scope="gen_bnz")
h = activation(h_bnz, name='h_z')
utils.add_activation_summary(h)
for index in range(N - 2):
image_size *= 2
W = utils.weight_variable([5, 5, dims[index + 1], dims[index]], name="W_%d" % index)
b = utils.bias_variable([dims[index + 1]], name="b_%d" % index)
deconv_shape = tf.stack([tf.shape(h)[0], image_size, image_size, dims[index + 1]])
h_conv_t = utils.conv2d_transpose_strided(h, W, b, output_shape=deconv_shape)
h_bn = utils.batch_norm(h_conv_t, dims[index + 1], train_phase, scope="gen_bn%d" % index)
h = activation(h_bn, name='h_%d' % index)
utils.add_activation_summary(h)
image_size *= 2
W_pred = utils.weight_variable([5, 5, dims[-1], dims[-2]], name="W_pred")
b_pred = utils.bias_variable([dims[-1]], name="b_pred")
deconv_shape = tf.stack([tf.shape(h)[0], image_size, image_size, dims[-1]])
h_conv_t = utils.conv2d_transpose_strided(h, W_pred, b_pred, output_shape=deconv_shape)
pred_image = tf.nn.tanh(h_conv_t, name='pred_image')
utils.add_activation_summary(pred_image)
return pred_image
def _discriminator(self, input_images, dims, train_phase, activation=tf.nn.relu, scope_name="discriminator",
scope_reuse=False):
N = len(dims)
with tf.variable_scope(scope_name) as scope:
if scope_reuse:
scope.reuse_variables()
h = input_images
skip_bn = True # First layer of discriminator skips batch norm
for index in range(N - 2):
W = utils.weight_variable([5, 5, dims[index], dims[index + 1]], name="W_%d" % index)
b = utils.bias_variable([dims[index + 1]], name="b_%d" % index)
h_conv = utils.conv2d_strided(h, W, b)
if skip_bn:
h_bn = h_conv
skip_bn = False
else:
h_bn = utils.batch_norm(h_conv, dims[index + 1], train_phase, scope="disc_bn%d" % index)
h = activation(h_bn, name="h_%d" % index)
utils.add_activation_summary(h)
shape = h.get_shape().as_list()
image_size = self.resized_image_size // (2 ** (N - 2)) # dims has input dim and output dim
h_reshaped = tf.reshape(h, [self.batch_size, image_size * image_size * shape[3]])
W_pred = utils.weight_variable([image_size * image_size * shape[3], dims[-1]], name="W_pred")
b_pred = utils.bias_variable([dims[-1]], name="b_pred")
h_pred = tf.matmul(h_reshaped, W_pred) + b_pred
return tf.nn.sigmoid(h_pred), h_pred, h
def _cross_entropy_loss(self, logits, labels, name="x_entropy"):
xentropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits, labels))
tf.summary.scalar(name, xentropy)
return xentropy
def _get_optimizer(self, optimizer_name, learning_rate, optimizer_param):
self.learning_rate = learning_rate
if optimizer_name == "Adam":
return tf.train.AdamOptimizer(learning_rate, beta1=optimizer_param)
elif optimizer_name == "RMSProp":
return tf.train.RMSPropOptimizer(learning_rate, decay=optimizer_param)
else:
raise ValueError("Unknown optimizer %s" % optimizer_name)
def _train(self, loss_val, var_list, optimizer):
grads = optimizer.compute_gradients(loss_val, var_list=var_list)
for grad, var in grads:
utils.add_gradient_summary(grad, var)
return optimizer.apply_gradients(grads)
def _setup_placeholder(self):
self.train_phase = tf.placeholder(tf.bool)
self.z_vec = tf.placeholder(tf.float32, [self.batch_size, self.z_dim], name="z")
def _gan_loss(self, logits_real, logits_fake, feature_real, feature_fake, use_features=False):
discriminator_loss_real = self._cross_entropy_loss(logits_real, tf.ones_like(logits_real),
name="disc_real_loss")
discriminator_loss_fake = self._cross_entropy_loss(logits_fake, tf.zeros_like(logits_fake),
name="disc_fake_loss")
self.discriminator_loss = discriminator_loss_fake + discriminator_loss_real
gen_loss_disc = self._cross_entropy_loss(logits_fake, tf.ones_like(logits_fake), name="gen_disc_loss")
if use_features:
gen_loss_features = tf.reduce_mean(tf.nn.l2_loss(feature_real - feature_fake)) / (self.crop_image_size ** 2)
else:
gen_loss_features = 0
self.gen_loss = gen_loss_disc + 0.1 * gen_loss_features
tf.summary.scalar("Discriminator_loss", self.discriminator_loss)
tf.summary.scalar("Generator_loss", self.gen_loss)
def create_network(self, generator_dims, discriminator_dims, optimizer="Adam", learning_rate=2e-4,
optimizer_param=0.9, improved_gan_loss=True):
print("Setting up model...")
self._setup_placeholder()
tf.summary.histogram("z", self.z_vec)
self.gen_images = self._generator(self.z_vec, generator_dims, self.train_phase, scope_name="generator")
tf.summary.image("image_real", self.images, max_outputs=2)
tf.summary.image("image_generated", self.gen_images, max_outputs=2)
def leaky_relu(x, name="leaky_relu"):
return utils.leaky_relu(x, alpha=0.2, name=name)
discriminator_real_prob, logits_real, feature_real = self._discriminator(self.images, discriminator_dims,
self.train_phase,
activation=leaky_relu,
scope_name="discriminator",
scope_reuse=False)
discriminator_fake_prob, logits_fake, feature_fake = self._discriminator(self.gen_images, discriminator_dims,
self.train_phase,
activation=leaky_relu,
scope_name="discriminator",
scope_reuse=True)
# utils.add_activation_summary(tf.identity(discriminator_real_prob, name='disc_real_prob'))
# utils.add_activation_summary(tf.identity(discriminator_fake_prob, name='disc_fake_prob'))
# Loss calculation
self._gan_loss(logits_real, logits_fake, feature_real, feature_fake, use_features=improved_gan_loss)
train_variables = tf.trainable_variables()
for v in train_variables:
# print (v.op.name)
utils.add_to_regularization_and_summary(var=v)
self.generator_variables = [v for v in train_variables if v.name.startswith("generator")]
# print(map(lambda x: x.op.name, generator_variables))
self.discriminator_variables = [v for v in train_variables if v.name.startswith("discriminator")]
# print(map(lambda x: x.op.name, discriminator_variables))
optim = self._get_optimizer(optimizer, learning_rate, optimizer_param)
self.generator_train_op = self._train(self.gen_loss, self.generator_variables, optim)
self.discriminator_train_op = self._train(self.discriminator_loss, self.discriminator_variables, optim)
def initialize_network(self, logs_dir):
print("Initializing network...")
self.logs_dir = logs_dir
self.sess = tf.Session(config=tfconfig)
self.summary_op = tf.summary.merge_all()
self.saver = tf.train.Saver()
self.summary_writer = tf.summary.FileWriter(self.logs_dir, self.sess.graph)
self.sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(self.logs_dir)
if ckpt and ckpt.model_checkpoint_path:
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
print("Model restored...")
self.coord = tf.train.Coordinator()
self.threads = tf.train.start_queue_runners(self.sess, self.coord)
def train_model(self, max_iterations):
try:
print("Training model...")
for itr in xrange(1, max_iterations):
batch_z = np.random.uniform(-1.0, 1.0, size=[self.batch_size, self.z_dim]).astype(np.float32)
feed_dict = {self.z_vec: batch_z, self.train_phase: True}
self.sess.run(self.discriminator_train_op, feed_dict=feed_dict)
self.sess.run(self.generator_train_op, feed_dict=feed_dict)
if itr % 10 == 0:
g_loss_val, d_loss_val, summary_str = self.sess.run(
[self.gen_loss, self.discriminator_loss, self.summary_op], feed_dict=feed_dict)
print("Step: %d, generator loss: %g, discriminator_loss: %g" % (itr, g_loss_val, d_loss_val))
self.summary_writer.add_summary(summary_str, itr)
if itr % 2000 == 0:
self.saver.save(self.sess, self.logs_dir + "model.ckpt", global_step=itr)
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
except KeyboardInterrupt:
print("Ending Training...")
finally:
self.coord.request_stop()
self.coord.join(self.threads) # Wait for threads to finish.
def visualize_model(self):
print("Sampling images from model...")
batch_z = np.random.uniform(-1.0, 1.0, size=[self.batch_size, self.z_dim]).astype(np.float32)
feed_dict = {self.z_vec: batch_z, self.train_phase: False}
images = self.sess.run(self.gen_images, feed_dict=feed_dict)
images = utils.unprocess_image(images, 127.5, 127.5).astype(np.uint8)
shape = [4, self.batch_size // 4]
utils.save_imshow_grid(images, self.logs_dir, "generated.png", shape=shape)
class WasserstienGAN(GAN):
def __init__(self, z_dim, crop_image_size, resized_image_size, batch_size, data_dir, clip_values=(-0.01, 0.01),
critic_iterations=5):
self.critic_iterations = critic_iterations
self.clip_values = clip_values
GAN.__init__(self, z_dim, crop_image_size, resized_image_size, batch_size, data_dir)
def _generator(self, z, dims, train_phase, activation=tf.nn.relu, scope_name="generator"):
N = len(dims)
image_size = self.resized_image_size // (2 ** (N - 1))
with tf.variable_scope(scope_name) as scope:
W_z = utils.weight_variable([self.z_dim, dims[0] * image_size * image_size], name="W_z")
h_z = tf.matmul(z, W_z)
h_z = tf.reshape(h_z, [-1, image_size, image_size, dims[0]])
h_bnz = utils.batch_norm(h_z, dims[0], train_phase, scope="gen_bnz")
h = activation(h_bnz, name='h_z')
utils.add_activation_summary(h)
for index in range(N - 2):
image_size *= 2
W = utils.weight_variable([4, 4, dims[index + 1], dims[index]], name="W_%d" % index)
b = tf.zeros([dims[index + 1]])
deconv_shape = tf.stack([tf.shape(h)[0], image_size, image_size, dims[index + 1]])
h_conv_t = utils.conv2d_transpose_strided(h, W, b, output_shape=deconv_shape)
h_bn = utils.batch_norm(h_conv_t, dims[index + 1], train_phase, scope="gen_bn%d" % index)
h = activation(h_bn, name='h_%d' % index)
utils.add_activation_summary(h)
image_size *= 2
W_pred = utils.weight_variable([4, 4, dims[-1], dims[-2]], name="W_pred")
b = tf.zeros([dims[-1]])
deconv_shape = tf.stack([tf.shape(h)[0], image_size, image_size, dims[-1]])
h_conv_t = utils.conv2d_transpose_strided(h, W_pred, b, output_shape=deconv_shape)
pred_image = tf.nn.tanh(h_conv_t, name='pred_image')
utils.add_activation_summary(pred_image)
return pred_image
def _discriminator(self, input_images, dims, train_phase, activation=tf.nn.relu, scope_name="discriminator",
scope_reuse=False):
N = len(dims)
with tf.variable_scope(scope_name) as scope:
if scope_reuse:
scope.reuse_variables()
h = input_images
skip_bn = True # First layer of discriminator skips batch norm
for index in range(N - 2):
W = utils.weight_variable([4, 4, dims[index], dims[index + 1]], name="W_%d" % index)
b = tf.zeros([dims[index+1]])
h_conv = utils.conv2d_strided(h, W, b)
if skip_bn:
h_bn = h_conv
skip_bn = False
else:
h_bn = utils.batch_norm(h_conv, dims[index + 1], train_phase, scope="disc_bn%d" % index)
h = activation(h_bn, name="h_%d" % index)
utils.add_activation_summary(h)
W_pred = utils.weight_variable([4, 4, dims[-2], dims[-1]], name="W_pred")
b = tf.zeros([dims[-1]])
h_pred = utils.conv2d_strided(h, W_pred, b)
return None, h_pred, None # Return the last convolution output. None values are returned to maintatin disc from other GAN
def _gan_loss(self, logits_real, logits_fake, feature_real, feature_fake, use_features=False):
self.discriminator_loss = tf.reduce_mean(logits_real - logits_fake)
self.gen_loss = tf.reduce_mean(logits_fake)
tf.summary.scalar("Discriminator_loss", self.discriminator_loss)
tf.summary.scalar("Generator_loss", self.gen_loss)
def train_model(self, max_iterations):
try:
print("Training Wasserstein GAN model...")
clip_discriminator_var_op = [var.assign(tf.clip_by_value(var, self.clip_values[0], self.clip_values[1])) for
var in self.discriminator_variables]
start_time = time.time()
def get_feed_dict(train_phase=True):
batch_z = np.random.uniform(-1.0, 1.0, size=[self.batch_size, self.z_dim]).astype(np.float32)
feed_dict = {self.z_vec: batch_z, self.train_phase: train_phase}
return feed_dict
for itr in xrange(1, max_iterations):
if itr < 25 or itr % 500 == 0:
critic_itrs = 25
else:
critic_itrs = self.critic_iterations
for critic_itr in range(critic_itrs):
self.sess.run(self.discriminator_train_op, feed_dict=get_feed_dict(True))
self.sess.run(clip_discriminator_var_op)
feed_dict = get_feed_dict(True)
self.sess.run(self.generator_train_op, feed_dict=feed_dict)
if itr % 100 == 0:
summary_str = self.sess.run(self.summary_op, feed_dict=feed_dict)
self.summary_writer.add_summary(summary_str, itr)
if itr % 200 == 0:
stop_time = time.time()
duration = (stop_time - start_time) / 200.0
start_time = stop_time
g_loss_val, d_loss_val = self.sess.run([self.gen_loss, self.discriminator_loss],
feed_dict=feed_dict)
print("Time: %g/itr, Step: %d, generator loss: %g, discriminator_loss: %g" % (
duration, itr, g_loss_val, d_loss_val))
if itr % 5000 == 0:
self.saver.save(self.sess, self.logs_dir + "model.ckpt", global_step=itr)
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
except KeyboardInterrupt:
print("Ending Training...")
finally:
self.coord.request_stop()
self.coord.join(self.threads) # Wait for threads to finish.
|
|
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2020 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from os.path import relpath
from unittest import TestCase
from pybuilder.core import Project, Author, Logger
from pybuilder.plugins.python.sphinx_plugin import (assert_sphinx_is_available,
assert_sphinx_quickstart_is_available,
get_sphinx_build_command,
get_sphinx_quickstart_command,
initialize_sphinx_plugin,
run_sphinx_build,
get_sphinx_apidoc_command,
sphinx_pyb_quickstart_generate,
sphinx_generate,
generate_sphinx_apidocs)
from pybuilder.utils import np, jp
from test_utils import Mock, patch, call, ANY
class CheckSphinxAvailableTests(TestCase):
def test_should_check_that_sphinx_can_be_executed(self):
mock_project = Mock(Project)
mock_logger = Mock(Logger)
reactor = Mock()
pyb_env = Mock()
reactor.python_env_registry = {"pybuilder": pyb_env}
reactor.pybuilder_venv = pyb_env
assert_sphinx_is_available(mock_project, mock_logger, reactor)
pyb_env.verify_can_execute.assert_has_calls(
[
call(["sphinx-build", "--version"], "sphinx-build", "plugin python.sphinx"),
call(["sphinx-apidoc", "--version"], "sphinx-apidoc", "plugin python.sphinx")
]
)
def test_should_check_that_sphinx_quickstart_can_be_executed(self):
mock_project = Mock(Project)
mock_logger = Mock(Logger)
reactor = Mock()
pyb_env = Mock()
reactor.python_env_registry = {"pybuilder": pyb_env}
reactor.pybuilder_venv = pyb_env
assert_sphinx_quickstart_is_available(mock_project, mock_logger, reactor)
pyb_env.verify_can_execute.assert_called_with(
["sphinx-quickstart", "--version"], "sphinx-quickstart", "plugin python.sphinx")
class SphinxPluginInitializationTests(TestCase):
def setUp(self):
self.project = Project("basedir")
def test_should_leave_user_specified_properties_when_initializing_plugin(self):
expected_properties = {
"sphinx_source_dir": "source_dir",
"sphinx_output_dir": "output_dir",
"sphinx_config_path": "config_path",
"sphinx_doc_author": "author",
"sphinx_doc_builder": "doc_builder",
"sphinx_project_name": "project_name",
"sphinx_project_version": "project_version"
}
for property_name, property_value in expected_properties.items():
self.project.set_property(property_name, property_value)
initialize_sphinx_plugin(self.project)
for property_name, property_value in expected_properties.items():
self.assertEqual(
self.project.get_property(property_name),
property_value)
def test_should_set_default_values_when_initializing_plugin(self):
self.project.authors = [
Author("John Doe", "[email protected]"),
Author("Jane Doe", "[email protected]")]
initialize_sphinx_plugin(self.project)
self.project.set_property("sphinx_project_name", "foo")
self.project.set_property("sphinx_project_version", "1.0")
self.assertEqual(
self.project.get_property("sphinx_source_dir"), "docs")
self.assertEqual(
self.project.get_property("sphinx_output_dir"), np("docs/_build/"))
self.assertEqual(
self.project.get_property("sphinx_config_path"), "docs")
self.assertEqual(
self.project.get_property("sphinx_doc_author"), "John Doe, Jane Doe")
self.assertEqual(
self.project.get_property("sphinx_doc_builder"), "html")
self.assertEqual(
self.project.get_property("sphinx_project_name"), "foo")
self.assertEqual(
self.project.get_property("sphinx_project_version"), "1.0")
class SphinxBuildCommandTests(TestCase):
def setUp(self):
self.project = Project("basedir")
self.logger = Mock(Logger)
self.reactor = Mock()
self.pyb_env = pyb_env = Mock()
self.reactor.python_env_registry = {"pybuilder": pyb_env}
self.reactor.pybuilder_venv = pyb_env
pyb_env.execute_command.return_value = 0
pyb_env.version = (2, 7, 12, 'final', 0)
pyb_env.executable = ["/a/b"]
pyb_env.exec_dir = "/a"
def test_should_generate_sphinx_build_command_per_project_properties(self):
self.project.set_property("sphinx_config_path", "docs/")
self.project.set_property("sphinx_source_dir", "docs/")
self.project.set_property("sphinx_output_dir", "docs/_build/")
self.project.set_property("sphinx_doc_builder", "JSONx")
sphinx_build_command = get_sphinx_build_command(self.project, Mock(), "JSONx")
self.assertEqual(sphinx_build_command,
[ANY, "-b", "JSONx",
np(jp(self.project.basedir, "docs/")),
np(jp(self.project.basedir, "docs/_build/"))])
def test_should_generate_sphinx_build_command_verbose(self):
self.project.set_property("sphinx_config_path", "docs/")
self.project.set_property("sphinx_source_dir", "docs/")
self.project.set_property("sphinx_output_dir", "docs/_build/")
self.project.set_property("sphinx_doc_builder", "JSONx")
self.project.set_property("verbose", True)
sphinx_build_command = get_sphinx_build_command(self.project, Mock(), "JSONx")
self.assertEqual(sphinx_build_command,
[ANY, "-b", "JSONx", "-v",
np(jp(self.project.basedir, "docs/")),
np(jp(self.project.basedir, "docs/_build/"))])
def test_should_generate_sphinx_build_command_debug(self):
self.project.set_property("sphinx_config_path", "docs/")
self.project.set_property("sphinx_source_dir", "docs/")
self.project.set_property("sphinx_output_dir", "docs/_build/")
self.project.set_property("sphinx_doc_builder", "JSONx")
logger = Mock()
logger.level = 1
logger.DEBUG = 1
sphinx_build_command = get_sphinx_build_command(self.project, logger, "JSONx")
self.assertEqual(sphinx_build_command,
[ANY, "-b", "JSONx", "-vvvv",
np(jp(self.project.basedir, "docs/")),
np(jp(self.project.basedir, "docs/_build/"))])
def test_should_generate_sphinx_build_command_forced_builder_dir(self):
self.project.set_property("sphinx_config_path", "docs/")
self.project.set_property("sphinx_source_dir", "docs/")
self.project.set_property("sphinx_output_dir", "docs/_build/")
self.project.set_property("sphinx_doc_builder", "JSONx")
self.project.set_property("sphinx_output_per_builder", True)
sphinx_build_command = get_sphinx_build_command(self.project, Mock(), "JSONx")
self.assertEqual(sphinx_build_command,
[ANY, "-b", "JSONx",
np(jp(self.project.basedir, "docs/")),
np(jp(self.project.basedir, "docs/_build/JSONx"))])
def test_should_generate_sphinx_build_command_builder_dir(self):
self.project.set_property("sphinx_config_path", "docs/")
self.project.set_property("sphinx_source_dir", "docs/")
self.project.set_property("sphinx_output_dir", "docs/_build/")
self.project.set_property("sphinx_doc_builder", ["JSONx", "pdf"])
sphinx_build_command = get_sphinx_build_command(self.project, Mock(), "JSONx")
self.assertEqual(sphinx_build_command,
[ANY, "-b", "JSONx",
np(jp(self.project.basedir, "docs/")),
np(jp(self.project.basedir, "docs/_build/JSONx"))
])
def test_should_generate_sphinx_quickstart_command_with_project_properties(self):
self.project.set_property("sphinx_doc_author", "bar")
self.project.set_property("sphinx_project_name", "foo")
self.project.set_property("sphinx_project_version", "3")
self.project.set_property("sphinx_source_dir", "docs/")
sphinx_quickstart_command = get_sphinx_quickstart_command(self.project)
self.assertEqual(sphinx_quickstart_command,
[ANY, "-q", "-p", "foo", "-a", "bar", "-v", "3",
np(jp(self.project.basedir, "docs/"))
])
def test_should_execute_command_regardless_of_verbose(self):
self.project.set_property("verbose", True)
self.project.set_property("dir_target", "spam")
initialize_sphinx_plugin(self.project)
run_sphinx_build(["foo"], "bar", Mock(), self.project, self.reactor)
self.assertEqual(self.pyb_env.execute_command.call_count, 1)
def test_get_sphinx_apidoc_command_enabled(self):
sphinx_mock = Mock()
sys.modules["sphinx"] = sphinx_mock
try:
sphinx_mock.version_info = (1, 2, 3, 4, 5)
self.project.set_property("sphinx_run_apidoc", True)
self.project.set_property("dir_target", "dir_target")
self.project.set_property("dir_source_main_python", "dir_source")
self.project.set_property("sphinx_project_name", "project_name")
self.assertEqual(get_sphinx_apidoc_command(self.project, self.reactor),
[ANY,
"-H",
"project_name",
"-o",
np(jp(self.project.basedir, "dir_target/sphinx_pyb/apidoc")),
np(jp(self.project.basedir, "dir_source"))
]
)
finally:
del sys.modules["sphinx"]
def test_get_sphinx_apidoc_command_enabled_with_pep420(self):
sphinx_mock = Mock()
sys.modules["sphinx"] = sphinx_mock
try:
sphinx_mock.version_info = (1, 5, 3, 4, 5)
self.project.set_property("sphinx_run_apidoc", True)
self.project.set_property("dir_target", "dir_target")
self.project.set_property("dir_source_main_python", "dir_source")
self.project.set_property("sphinx_project_name", "project_name")
self.assertEqual(get_sphinx_apidoc_command(self.project, self.reactor),
[ANY,
"-H",
"project_name",
"-o",
np(jp(self.project.basedir, "dir_target/sphinx_pyb/apidoc")),
np(jp(self.project.basedir, "dir_source"))
]
)
self.reactor.pybuilder_venv.version = (3, 5, 6, 'final', 0)
self.assertEqual(get_sphinx_apidoc_command(self.project, self.reactor),
[ANY,
"-H",
"project_name",
"--implicit-namespaces",
"-o",
np(jp(self.project.basedir, "dir_target/sphinx_pyb/apidoc")),
np(jp(self.project.basedir, "dir_source"))
])
finally:
del sys.modules["sphinx"]
@patch("pybuilder.plugins.python.sphinx_plugin.open", create=True)
@patch("pybuilder.plugins.python.sphinx_plugin.rmtree")
@patch("pybuilder.plugins.python.sphinx_plugin.exists")
@patch("pybuilder.plugins.python.sphinx_plugin.mkdir")
@patch("pybuilder.plugins.python.sphinx_plugin.symlink")
def test_sphinx_pyb_quickstart_generate(self,
symlink,
mkdir,
exists,
rmtree,
open
):
exists.return_value = False
self.project.set_property("sphinx_source_dir", "sphinx_source_dir")
self.project.set_property("sphinx_config_path", "sphinx_config_path")
self.project.set_property("dir_target", "dir_target")
self.project.set_property("dir_source_main_python", "dir_source")
self.project.set_property("sphinx_project_name", "project_name")
sphinx_pyb_quickstart_generate(self.project, Mock(), self.reactor)
open().__enter__().write.assert_called_with("""\
# Automatically generated by PyB
import sys
from os.path import normcase as nc, normpath as np, join as jp, dirname, exists
sphinx_pyb_dir = nc(np(jp(dirname(__file__) if __file__ else '.', %r)))
sphinx_pyb_module = 'sphinx_pyb_conf'
sphinx_pyb_module_file = nc(np(jp(sphinx_pyb_dir, sphinx_pyb_module + '.py')))
sys.path.insert(0, sphinx_pyb_dir)
if not exists(sphinx_pyb_module_file):
raise RuntimeError("No PyB-based Sphinx configuration found in " + sphinx_pyb_module_file)
from sphinx_pyb_conf import *
# Overwrite PyB-settings here statically if that's the thing that you want
""" % relpath(np(jp(self.project.basedir, "../dir_target/sphinx_pyb")), self.project.basedir))
symlink.assert_called_with(relpath(np(jp(self.project.basedir, "../dir_target/sphinx_pyb/apidoc")),
self.project.basedir),
np(jp(self.project.basedir, "sphinx_source_dir/apidoc")),
target_is_directory=True)
@patch("pybuilder.plugins.python.sphinx_plugin.open", create=True)
@patch("pybuilder.plugins.python.sphinx_plugin.rmtree")
@patch("pybuilder.plugins.python.sphinx_plugin.exists")
@patch("pybuilder.plugins.python.sphinx_plugin.mkdir")
def test_sphinx_generate(self,
mkdir,
exists,
rmtree,
open
):
exists.return_value = True
sphinx_mock = Mock()
sys.modules["sphinx"] = sphinx_mock
try:
sphinx_mock.version_info = (1, 5, 3, 4, 5)
self.project.set_property("sphinx_source_dir", "sphinx_source_dir")
self.project.set_property("sphinx_config_path", "sphinx_config_path")
self.project.set_property("sphinx_output_dir", "sphinx_output_dir")
self.project.set_property("dir_target", "dir_target")
self.project.set_property("dir_source_main_python", "dir_source")
self.project.set_property("sphinx_project_name", "project_name")
self.project.set_property("sphinx_project_conf", {"a": 1, "b": "foo"})
self.project.set_property("sphinx_run_apidoc", True)
self.project.set_property("sphinx_doc_builder", ["JSONx", "pdf"])
sphinx_generate(self.project, Mock(), self.reactor)
finally:
del sys.modules["sphinx"]
exists.assert_called_with(np(jp(self.project.basedir, "dir_target/sphinx_pyb/apidoc")))
rmtree.assert_called_with(np(jp(self.project.basedir, "dir_target/sphinx_pyb/apidoc")))
mkdir.assert_called_with(np(jp(self.project.basedir, "dir_target/sphinx_pyb/apidoc")))
open().__enter__().write.assert_has_calls([call("a = 1\n"), call("b = 'foo'\n"), call(
"\nimport sys\nsys.path.insert(0, %r)\n" % np(jp(self.project.basedir, "dir_source")))], any_order=True)
self.pyb_env.execute_command.assert_has_calls([
call(self.reactor.pybuilder_venv.executable + ["-c", ANY,
"-H", "project_name", "-o",
np(jp(self.project.basedir, "dir_target/sphinx_pyb/apidoc")),
np(jp(self.project.basedir, "dir_source"))],
np(jp(self.project.basedir, "dir_target/reports/sphinx-apidoc")), shell=False),
call(self.reactor.pybuilder_venv.executable + ["-c", ANY, "-b", "JSONx",
np(jp(self.project.basedir, "sphinx_config_path")),
np(jp(self.project.basedir, "sphinx_output_dir/JSONx"))],
np(jp(self.project.basedir, "dir_target/reports/sphinx_JSONx")), shell=False),
call(self.reactor.pybuilder_venv.executable + ["-c", ANY, "-b", "pdf",
np(jp(self.project.basedir, "sphinx_config_path")),
np(jp(self.project.basedir, "sphinx_output_dir/pdf"))],
np(jp(self.project.basedir, "dir_target/reports/sphinx_pdf")), shell=False)])
@patch("pybuilder.plugins.python.sphinx_plugin.open", create=True)
@patch("pybuilder.plugins.python.sphinx_plugin.rmtree")
@patch("pybuilder.plugins.python.sphinx_plugin.exists")
@patch("pybuilder.plugins.python.sphinx_plugin.mkdir")
def test_apidoc_does_not_run_when_off(self,
mkdir,
exists,
rmtree,
open
):
self.project.set_property("sphinx_run_apidoc", False)
generate_sphinx_apidocs(self.project, Mock(), self.reactor)
exists.assert_not_called()
|
|
"""
FlexGet build and development utilities - unfortunately this file is somewhat messy
"""
from __future__ import print_function
import glob
import os
import shutil
import sys
from paver.easy import environment, task, cmdopts, Bunch, path, call_task, might_call, consume_args
# These 2 packages do magic on import, even though they aren't used explicitly
import paver.virtual
import paver.setuputils
from paver.shell import sh
from paver.setuputils import setup, find_package_data, find_packages
sphinxcontrib = False
try:
from sphinxcontrib import paverutils
sphinxcontrib = True
except ImportError:
pass
sys.path.insert(0, '')
options = environment.options
install_requires = [
'FeedParser>=5.2.1',
# There is a bug in sqlalchemy 0.9.0, see gh#127
'SQLAlchemy >=0.7.5, !=0.9.0, <1.999',
'PyYAML',
# There is a bug in beautifulsoup 4.2.0 that breaks imdb parsing, see http://flexget.com/ticket/2091
'beautifulsoup4>=4.1, !=4.2.0, <4.5',
'html5lib>=0.11',
'PyRSS2Gen',
'pynzb',
'progressbar',
'rpyc',
'jinja2',
# There is a bug in requests 2.4.0 where it leaks urllib3 exceptions
'requests>=1.0, !=2.4.0, <2.99',
'python-dateutil!=2.0, !=2.2',
'jsonschema>=2.0',
'tmdb3',
'path.py',
'guessit>=0.9.3, <0.10.4',
'apscheduler',
'flask>=0.7',
'flask-restful>=0.3.3',
'ordereddict>=1.1',
'flask-restplus==0.7.2',
'cherrypy>=3.7.0',
'flask-assets>=0.11',
'cssmin>=0.2.0',
'flask-compress>=1.2.1',
'flask-login>=0.3.2',
'pyparsing>=2.0.3',
'pyScss>=1.3.4',
'pytvmaze>=1.4.3'
]
if sys.version_info < (2, 7):
# argparse is part of the standard library in python 2.7+
install_requires.append('argparse')
entry_points = {'console_scripts': ['flexget = flexget:main']}
# Provide an alternate exe on windows which does not cause a pop-up when scheduled
if sys.platform.startswith('win'):
entry_points.setdefault('gui_scripts', []).append('flexget-headless = flexget:main')
with open("README.rst") as readme:
long_description = readme.read()
# Populates __version__ without importing the package
__version__ = None
execfile('flexget/_version.py')
if not __version__:
print('Could not find __version__ from flexget/_version.py')
sys.exit(1)
setup(
name='FlexGet',
version=__version__, # release task may edit this
description='FlexGet is a program aimed to automate downloading or processing content (torrents, podcasts, etc.) '
'from different sources like RSS-feeds, html-pages, various sites and more.',
long_description=long_description,
author='Marko Koivusalo',
author_email='[email protected]',
license='MIT',
url='http://flexget.com',
download_url='http://download.flexget.com',
install_requires=install_requires,
packages=find_packages(exclude=['tests']),
package_data=find_package_data('flexget', package='flexget',
exclude=['FlexGet.egg-info', '*.pyc'],
exclude_directories=['node_modules', 'bower_components'],
only_in_packages=False), # NOTE: the exclude does not seem to work
zip_safe=False,
test_suite='nose.collector',
extras_require={
'memusage': ['guppy'],
'NZB': ['pynzb'],
'TaskTray': ['pywin32'],
},
entry_points=entry_points,
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
)
options(
minilib=Bunch(
# 'version' is included as workaround to https://github.com/paver/paver/issues/112, TODO: remove
extra_files=['virtual', 'svn', 'version']
),
virtualenv=Bunch(
paver_command_line='develop'
),
# sphinxcontrib.paverutils
sphinx=Bunch(
docroot='docs',
builddir='build',
builder='html',
confdir='docs'
),
)
def set_init_version(ver):
"""Replaces the version with ``ver`` in _version.py"""
import fileinput
for line in fileinput.FileInput('flexget/_version.py', inplace=1):
if line.startswith('__version__ = '):
line = "__version__ = '%s'\n" % ver
print(line, end='')
@task
def version():
"""Prints the version number of the source"""
print(__version__)
@task
@cmdopts([('dev', None, 'Bumps to new development version instead of release version.')])
def increment_version(options):
"""Increments either release or dev version by 1"""
print('current version: %s' % __version__)
ver_split = __version__.split('.')
dev = options.increment_version.get('dev')
if 'dev' in ver_split[-1]:
if dev:
# If this is already a development version, increment the dev count by 1
ver_split[-1] = 'dev%d' % (int(ver_split[-1].strip('dev') or 0) + 1)
else:
# Just strip off dev tag for next release version
ver_split = ver_split[:-1]
else:
# Increment the revision number by one
if len(ver_split) == 2:
# We don't have a revision number, assume 0
ver_split.append('1')
else:
ver_split[-1] = str(int(ver_split[-1]) + 1)
if dev:
ver_split.append('dev')
new_version = '.'.join(ver_split)
print('new version: %s' % new_version)
set_init_version(new_version)
@task
@cmdopts([
('online', None, 'Run online tests')
])
def test(options):
"""Run FlexGet unit tests"""
options.setdefault('test', Bunch())
import nose
from nose.plugins.manager import DefaultPluginManager
cfg = nose.config.Config(plugins=DefaultPluginManager(), verbosity=2)
args = []
# Adding the -v flag makes the tests fail in python 2.7
#args.append('-v')
args.append('--processes=4')
args.append('-x')
if not options.test.get('online'):
args.append('--attr=!online')
args.append('--where=tests')
# Store current path since --where changes it, restore when leaving
cwd = os.getcwd()
try:
return nose.run(argv=args, config=cfg)
finally:
os.chdir(cwd)
@task
def clean():
"""Cleans up the virtualenv"""
for p in ('bin', 'Scripts', 'build', 'dist', 'include', 'lib', 'man',
'share', 'FlexGet.egg-info', 'paver-minilib.zip', 'setup.py'):
pth = path(p)
if pth.isdir():
pth.rmtree()
elif pth.isfile():
pth.remove()
for pkg in set(options.setup.packages) | set(('tests',)):
for filename in glob.glob(pkg.replace('.', os.sep) + "/*.py[oc~]"):
path(filename).remove()
@task
@cmdopts([
('dist-dir=', 'd', 'directory to put final built distributions in'),
('revision=', 'r', 'minor revision number of this build')
])
def sdist(options):
"""Build tar.gz distribution package"""
print('sdist version: %s' % __version__)
# clean previous build
print('Cleaning build...')
for p in ['build']:
pth = path(p)
if pth.isdir():
pth.rmtree()
elif pth.isfile():
pth.remove()
else:
print('Unable to remove %s' % pth)
# remove pre-compiled pycs from tests, I don't know why paver even tries to include them ...
# seems to happen only with sdist though
for pyc in path('tests/').files('*.pyc'):
pyc.remove()
for t in ['minilib', 'generate_setup', 'setuptools.command.sdist']:
call_task(t)
@task
def coverage():
"""Make coverage.flexget.com"""
# --with-coverage --cover-package=flexget --cover-html --cover-html-dir /var/www/flexget_coverage/
import nose
from nose.plugins.manager import DefaultPluginManager
cfg = nose.config.Config(plugins=DefaultPluginManager(), verbosity=2)
argv = ['bin/paver']
argv.extend(['--attr=!online'])
argv.append('--with-coverage')
argv.append('--cover-html')
argv.extend(['--cover-package', 'flexget'])
argv.extend(['--cover-html-dir', '/var/www/flexget_coverage/'])
nose.run(argv=argv, config=cfg)
print('Coverage generated')
@task
@cmdopts([
('docs-dir=', 'd', 'directory to put the documetation in')
])
def docs():
if not sphinxcontrib:
print('ERROR: requires sphinxcontrib-paverutils')
sys.exit(1)
from paver import tasks
if not os.path.exists('build'):
os.mkdir('build')
if not os.path.exists(os.path.join('build', 'sphinx')):
os.mkdir(os.path.join('build', 'sphinx'))
setup_section = tasks.environment.options.setdefault("sphinx", Bunch())
setup_section.update(outdir=options.docs.get('docs_dir', 'build/sphinx'))
call_task('sphinxcontrib.paverutils.html')
@task
@might_call('test', 'sdist')
@cmdopts([('no-tests', None, 'skips unit tests')])
def release(options):
"""Run tests then make an sdist if successful."""
if not options.release.get('no_tests'):
if not test():
print('Unit tests did not pass')
sys.exit(1)
print('Making src release')
sdist()
@task
def install_tools():
"""Install development / jenkins tools and dependencies"""
try:
import pip
except ImportError:
print('FATAL: Unable to import pip, please install it and run this again!')
sys.exit(1)
try:
import sphinxcontrib
print('sphinxcontrib INSTALLED')
except ImportError:
pip.main(['install', 'sphinxcontrib-paverutils'])
pip.main(['install', '-r', 'jenkins-requirements.txt'])
@task
def clean_compiled():
for root, dirs, files in os.walk('flexget'):
for name in files:
fqn = os.path.join(root, name)
if fqn[-3:] == 'pyc' or fqn[-3:] == 'pyo' or fqn[-5:] == 'cover':
print('Deleting %s' % fqn)
os.remove(fqn)
@task
@consume_args
def pep8(args):
try:
import pep8
except:
print('Run bin/paver install_tools')
sys.exit(1)
# Ignoring certain errors
ignore = [
'E711', 'E712', # These are comparisons to singletons i.e. == False, and == None. We need these for sqlalchemy.
'W291', 'W293', 'E261',
'E128' # E128 continuation line under-indented for visual indent
]
styleguide = pep8.StyleGuide(show_source=True, ignore=ignore, repeat=1, max_line_length=120,
parse_argv=args)
styleguide.input_dir('flexget')
@task
@cmdopts([
('file=', 'f', 'name of the requirements file to create')
])
def requirements(options):
filename = options.requirements.get('file', 'requirements.txt')
with open(filename, mode='w') as req_file:
req_file.write('\n'.join(options.install_requires))
@task
def build_webui():
cwd = os.path.join('flexget', 'ui')
# Cleanup previous builds
for folder in ['bower_components' 'node_modules']:
folder = os.path.join(cwd, folder)
if os.path.exists(folder):
shutil.rmtree(folder)
# Install npm packages
sh(['npm', 'install'], cwd=cwd)
# Build the ui
sh(['bower', 'install'], cwd=cwd)
# Build the ui
sh('gulp', cwd=cwd)
|
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## This program is published under a GPLv2 license
import struct
from scapy.packet import *
from scapy.fields import *
from scapy.layers.inet import UDP,TCP
from scapy.layers.l2 import SourceMACField
class NetBIOS_DS(Packet):
name = "NetBIOS datagram service"
fields_desc = [
ByteEnumField("type",17, {17:"direct_group"}),
ByteField("flags",0),
XShortField("id",0),
IPField("src","127.0.0.1"),
ShortField("sport",138),
ShortField("len",None),
ShortField("ofs",0),
NetBIOSNameField("srcname",""),
NetBIOSNameField("dstname",""),
]
def post_build(self, p, pay):
p += pay
if self.len is None:
l = len(p)-14
p = p[:10]+struct.pack("!H", l)+p[12:]
return p
# ShortField("length",0),
# ShortField("Delimitor",0),
# ByteField("command",0),
# ByteField("data1",0),
# ShortField("data2",0),
# ShortField("XMIt",0),
# ShortField("RSPCor",0),
# StrFixedLenField("dest","",16),
# StrFixedLenField("source","",16),
#
# ]
#
#NetBIOS
# Name Query Request
# Node Status Request
class NBNSQueryRequest(Packet):
name="NBNS query request"
fields_desc = [ShortField("NAME_TRN_ID",0),
ShortField("FLAGS", 0x0110),
ShortField("QDCOUNT",1),
ShortField("ANCOUNT",0),
ShortField("NSCOUNT",0),
ShortField("ARCOUNT",0),
NetBIOSNameField("QUESTION_NAME","windows"),
ShortEnumField("SUFFIX",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
ByteField("NULL",0),
ShortEnumField("QUESTION_TYPE",0x20, {0x20:"NB",0x21:"NBSTAT"}),
ShortEnumField("QUESTION_CLASS",1,{1:"INTERNET"})]
# Name Registration Request
# Name Refresh Request
# Name Release Request or Demand
class NBNSRequest(Packet):
name="NBNS request"
fields_desc = [ShortField("NAME_TRN_ID",0),
ShortField("FLAGS", 0x2910),
ShortField("QDCOUNT",1),
ShortField("ANCOUNT",0),
ShortField("NSCOUNT",0),
ShortField("ARCOUNT",1),
NetBIOSNameField("QUESTION_NAME","windows"),
ShortEnumField("SUFFIX",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
ByteField("NULL",0),
ShortEnumField("QUESTION_TYPE",0x20, {0x20:"NB",0x21:"NBSTAT"}),
ShortEnumField("QUESTION_CLASS",1,{1:"INTERNET"}),
ShortEnumField("RR_NAME",0xC00C,{0xC00C:"Label String Pointer to QUESTION_NAME"}),
ShortEnumField("RR_TYPE",0x20, {0x20:"NB",0x21:"NBSTAT"}),
ShortEnumField("RR_CLASS",1,{1:"INTERNET"}),
IntField("TTL", 0),
ShortField("RDLENGTH", 6),
BitEnumField("G",0,1,{0:"Unique name",1:"Group name"}),
BitEnumField("OWNER_NODE_TYPE",00,2,{00:"B node",01:"P node",02:"M node",03:"H node"}),
BitEnumField("UNUSED",0,13,{0:"Unused"}),
IPField("NB_ADDRESS", "127.0.0.1")]
# Name Query Response
# Name Registration Response
class NBNSQueryResponse(Packet):
name="NBNS query response"
fields_desc = [ShortField("NAME_TRN_ID",0),
ShortField("FLAGS", 0x8500),
ShortField("QDCOUNT",0),
ShortField("ANCOUNT",1),
ShortField("NSCOUNT",0),
ShortField("ARCOUNT",0),
NetBIOSNameField("RR_NAME","windows"),
ShortEnumField("SUFFIX",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
ByteField("NULL",0),
ShortEnumField("QUESTION_TYPE",0x20, {0x20:"NB",0x21:"NBSTAT"}),
ShortEnumField("QUESTION_CLASS",1,{1:"INTERNET"}),
IntField("TTL", 0x493e0),
ShortField("RDLENGTH", 6),
ShortField("NB_FLAGS", 0),
IPField("NB_ADDRESS", "127.0.0.1")]
# Name Query Response (negative)
# Name Release Response
class NBNSQueryResponseNegative(Packet):
name="NBNS query response (negative)"
fields_desc = [ShortField("NAME_TRN_ID",0),
ShortField("FLAGS", 0x8506),
ShortField("QDCOUNT",0),
ShortField("ANCOUNT",1),
ShortField("NSCOUNT",0),
ShortField("ARCOUNT",0),
NetBIOSNameField("RR_NAME","windows"),
ShortEnumField("SUFFIX",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
ByteField("NULL",0),
ShortEnumField("RR_TYPE",0x20, {0x20:"NB",0x21:"NBSTAT"}),
ShortEnumField("RR_CLASS",1,{1:"INTERNET"}),
IntField("TTL",0),
ShortField("RDLENGTH",6),
BitEnumField("G",0,1,{0:"Unique name",1:"Group name"}),
BitEnumField("OWNER_NODE_TYPE",00,2,{00:"B node",01:"P node",02:"M node",03:"H node"}),
BitEnumField("UNUSED",0,13,{0:"Unused"}),
IPField("NB_ADDRESS", "127.0.0.1")]
# Node Status Response
class NBNSNodeStatusResponse(Packet):
name="NBNS Node Status Response"
fields_desc = [ShortField("NAME_TRN_ID",0),
ShortField("FLAGS", 0x8500),
ShortField("QDCOUNT",0),
ShortField("ANCOUNT",1),
ShortField("NSCOUNT",0),
ShortField("ARCOUNT",0),
NetBIOSNameField("RR_NAME","windows"),
ShortEnumField("SUFFIX",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
ByteField("NULL",0),
ShortEnumField("RR_TYPE",0x21, {0x20:"NB",0x21:"NBSTAT"}),
ShortEnumField("RR_CLASS",1,{1:"INTERNET"}),
IntField("TTL",0),
ShortField("RDLENGTH",83),
ByteField("NUM_NAMES",1)]
# Service for Node Status Response
class NBNSNodeStatusResponseService(Packet):
name="NBNS Node Status Response Service"
fields_desc = [StrFixedLenField("NETBIOS_NAME","WINDOWS ",15),
ByteEnumField("SUFFIX",0,{0:"workstation",0x03:"messenger service",0x20:"file server service",0x1b:"domain master browser",0x1c:"domain controller", 0x1e:"browser election service"}),
ByteField("NAME_FLAGS",0x4),
ByteEnumField("UNUSED",0,{0:"unused"})]
# End of Node Status Response packet
class NBNSNodeStatusResponseEnd(Packet):
name="NBNS Node Status Response"
fields_desc = [SourceMACField("MAC_ADDRESS"),
BitField("STATISTICS",0,57*8)]
# Wait for Acknowledgement Response
class NBNSWackResponse(Packet):
name="NBNS Wait for Acknowledgement Response"
fields_desc = [ShortField("NAME_TRN_ID",0),
ShortField("FLAGS", 0xBC07),
ShortField("QDCOUNT",0),
ShortField("ANCOUNT",1),
ShortField("NSCOUNT",0),
ShortField("ARCOUNT",0),
NetBIOSNameField("RR_NAME","windows"),
ShortEnumField("SUFFIX",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
ByteField("NULL",0),
ShortEnumField("RR_TYPE",0x20, {0x20:"NB",0x21:"NBSTAT"}),
ShortEnumField("RR_CLASS",1,{1:"INTERNET"}),
IntField("TTL", 2),
ShortField("RDLENGTH",2),
BitField("RDATA",10512,16)] #10512=0010100100010000
class NBTDatagram(Packet):
name="NBT Datagram Packet"
fields_desc= [ByteField("Type", 0x10),
ByteField("Flags", 0x02),
ShortField("ID", 0),
IPField("SourceIP", "127.0.0.1"),
ShortField("SourcePort", 138),
ShortField("Length", 272),
ShortField("Offset", 0),
NetBIOSNameField("SourceName","windows"),
ShortEnumField("SUFFIX1",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
ByteField("NULL",0),
NetBIOSNameField("DestinationName","windows"),
ShortEnumField("SUFFIX2",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
ByteField("NULL",0)]
class NBTSession(Packet):
name="NBT Session Packet"
fields_desc= [ByteEnumField("TYPE",0,{0x00:"Session Message",0x81:"Session Request",0x82:"Positive Session Response",0x83:"Negative Session Response",0x84:"Retarget Session Response",0x85:"Session Keepalive"}),
BitField("RESERVED",0x00,7),
BitField("LENGTH",0,17)]
bind_layers( UDP, NBNSQueryRequest, dport=137)
bind_layers( UDP, NBNSRequest, dport=137)
bind_layers( UDP, NBNSQueryResponse, sport=137)
bind_layers( UDP, NBNSQueryResponseNegative, sport=137)
bind_layers( UDP, NBNSNodeStatusResponse, sport=137)
bind_layers( NBNSNodeStatusResponse, NBNSNodeStatusResponseService, )
bind_layers( NBNSNodeStatusResponse, NBNSNodeStatusResponseService, )
bind_layers( NBNSNodeStatusResponseService, NBNSNodeStatusResponseService, )
bind_layers( NBNSNodeStatusResponseService, NBNSNodeStatusResponseEnd, )
bind_layers( UDP, NBNSWackResponse, sport=137)
bind_layers( UDP, NBTDatagram, dport=138)
bind_layers( TCP, NBTSession, dport=139)
|
|
import unittest
from Orange.data import Table, Variable, ContinuousVariable
from Orange.preprocess.continuize import DomainContinuizer
from Orange.preprocess import Continuize
from Orange.preprocess import transformation
class ContinuizerTest(unittest.TestCase):
def setUp(self):
Variable._clear_all_caches()
self.data = Table("test4")
def test_default(self):
for inp in (self.data, self.data.domain):
dom = DomainContinuizer(inp)
self.assertTrue(all(isinstance(attr, ContinuousVariable)
for attr in dom.attributes))
self.assertIs(dom.class_var, self.data.domain.class_var)
self.assertIs(dom[0], self.data.domain[0])
self.assertIs(dom[1], self.data.domain[1])
self.assertEqual([attr.name for attr in dom.attributes],
["c1", "c2", "d2=a", "d2=b", "d3=a", "d3=b",
"d3=c"])
self.assertIsInstance(dom[2].compute_value,
transformation.Indicator)
dat2 = Table(dom, self.data)
# c1 c2 d2 d3 cl1
self.assertEqual(dat2[0], [1, -2, 1, 0, 1, 0, 0, "a"])
self.assertEqual(dat2[1], [0, 0, 0, 1, 0, 1, 0, "b"])
self.assertEqual(dat2[2], [2, 2, 0, 1, 0, 0, 1, "c"])
def test_continuous(self):
self.assertRaises(TypeError, DomainContinuizer, self.data.domain,
normalize_continuous=Continuize.NormalizeBySpan)
domZB = DomainContinuizer(
self.data, normalize_continuous=Continuize.NormalizeBySpan,
zero_based=True)
dom = DomainContinuizer(
self.data, normalize_continuous=Continuize.NormalizeBySpan,
zero_based=False)
self.assertTrue(all(isinstance(attr, ContinuousVariable)
for attr in domZB.attributes))
self.assertIs(domZB.class_var, self.data.domain.class_var)
self.assertIsNot(domZB[0], self.data.domain[0])
self.assertIsNot(domZB[1], self.data.domain[1])
self.assertEqual([attr.name for attr in domZB.attributes],
["c1", "c2", "d2=a", "d2=b", "d3=a", "d3=b",
"d3=c"])
self.assertIsInstance(domZB[2].compute_value, transformation.Indicator)
dat2 = Table(domZB, self.data)
# c1 c2 d2 d3 cl1
self.assertEqual(dat2[0], [0.5, 0, 1, 0, 1, 0, 0, "a"])
self.assertEqual(dat2[1], [0, 0.5, 0, 1, 0, 1, 0, "b"])
self.assertEqual(dat2[2], [1, 1, 0, 1, 0, 0, 1, "c"])
self.assertIs(dom.class_var, self.data.domain.class_var)
self.assertIsNot(dom[0], self.data.domain[0])
self.assertIsNot(dom[1], self.data.domain[1])
self.assertEqual([attr.name for attr in dom.attributes],
["c1", "c2", "d2=a", "d2=b", "d3=a", "d3=b",
"d3=c"])
self.assertIsInstance(dom[2].compute_value,
transformation.Indicator1)
dat3 = Table(dom, self.data)
# c1 c2 d2 d3 cl1
self.assertEqual(dat3[0], [0, -1, 1, -1, 1, -1, -1, "a"])
self.assertEqual(dat3[1], [-1, 0, -1, 1, -1, 1, -1, "b"])
self.assertEqual(dat3[2], [1, 1, -1, 1, -1, -1, 1, "c"])
def test_continuous_by_standard_deviation(self):
self.assertRaises(TypeError, DomainContinuizer, self.data.domain,
normalize_continuous=Continuize.NormalizeBySD)
domZB = DomainContinuizer(
self.data, normalize_continuous=Continuize.NormalizeBySD,
zero_based=True)
dom = DomainContinuizer(
self.data, normalize_continuous=Continuize.NormalizeBySD,
zero_based=False)
self.assertTrue(all(isinstance(attr, ContinuousVariable)
for attr in domZB.attributes))
self.assertIs(domZB.class_var, self.data.domain.class_var)
self.assertIsNot(domZB[0], self.data.domain[0])
self.assertIsNot(domZB[1], self.data.domain[1])
self.assertEqual([attr.name for attr in domZB.attributes],
["c1", "c2", "d2=a", "d2=b", "d3=a", "d3=b",
"d3=c"])
self.assertIsInstance(domZB[2].compute_value,
transformation.Indicator)
solution = [[ 0, -1.225, 1, 0, 1, 0, 0, 0],
[-1.225, 0, 0, 1, 0, 1, 0, 1],
[ 1.225, 1.225, 0, 1, 0, 0, 1, 2]]
# I'm sorry about that, but checking whole rows with assertEqual doesn't work here
# because of the rounding errors I guess
dat2 = Table(domZB, self.data)
for rd,rs in zip(dat2, solution):
for x,y in zip(rd,rs):
self.assertAlmostEqual(x,y, places=3)
self.assertIsInstance(dom[2].compute_value,
transformation.Indicator1)
dat3 = Table(dom, self.data)
self.assertEqual(list(dat2.X[0,:2]),list(dat3.X[0,:2]))
self.assertEqual(list(dat2.X[1,:2]),list(dat3.X[1,:2]))
self.assertEqual(list(dat2.X[2,:2]),list(dat3.X[2,:2]))
def test_continuous_transform_class(self):
for inp in (self.data, self.data.domain):
dom = DomainContinuizer(inp, transform_class=True)
self.assertTrue(all(isinstance(attr, ContinuousVariable)
for attr in dom))
self.assertIsNot(dom.class_var, self.data.domain.class_var)
self.assertIs(dom[0], self.data.domain[0])
self.assertIs(dom[1], self.data.domain[1])
self.assertEqual([attr.name for attr in dom.attributes],
["c1", "c2", "d2=a", "d2=b", "d3=a", "d3=b",
"d3=c"])
self.assertIsInstance(dom[2].compute_value,
transformation.Indicator)
dat2 = Table(dom, self.data)
# c1 c2 d2 d3 cl1
self.assertEqual(dat2[0], [1, -2, 1, 0, 1, 0, 0, 1, 0, 0])
self.assertEqual(dat2[1], [0, 0, 0, 1, 0, 1, 0, 0, 1, 0])
self.assertEqual(dat2[2], [2, 2, 0, 1, 0, 0, 1, 0, 0, 1])
def test_continuous_transform_class_minus_one(self):
self.assertRaises(TypeError, DomainContinuizer,
self.data.domain, normalize_continuous=True)
dom = DomainContinuizer(
self.data,
normalize_continuous=Continuize.NormalizeBySpan,
transform_class=True, zero_based=False)
self.assertTrue(all(isinstance(attr, ContinuousVariable)
for attr in dom))
self.assertIsNot(dom.class_var, self.data.domain.class_var)
self.assertIsNot(dom[0], self.data.domain[0])
self.assertIsNot(dom[1], self.data.domain[1])
self.assertEqual([attr.name for attr in dom.attributes],
["c1", "c2", "d2=a", "d2=b", "d3=a", "d3=b",
"d3=c"])
self.assertEqual([attr.name for attr in dom.class_vars],
["cl1=a", "cl1=b", "cl1=c"])
self.assertIsInstance(dom[2].compute_value,
transformation.Indicator1)
dat2 = Table(dom, self.data)
# c1 c2 d2 d3 cl1
self.assertEqual(dat2[0], [0, -1, 1, -1, 1, -1, -1, 1, -1, -1])
self.assertEqual(dat2[1], [-1, 0, -1, 1, -1, 1, -1, -1, 1, -1])
self.assertEqual(dat2[2], [1, 1, -1, 1, -1, -1, 1, -1, -1, 1])
def test_multi_indicators(self):
for inp in (self.data, self.data.domain):
dom = DomainContinuizer(inp,
multinomial_treatment=Continuize.Indicators)
self.assertTrue(all(isinstance(attr, ContinuousVariable)
for attr in dom.attributes))
self.assertIs(dom.class_var, self.data.domain.class_var)
self.assertIs(dom[0], self.data.domain[0])
self.assertIs(dom[1], self.data.domain[1])
self.assertEqual([attr.name for attr in dom.attributes],
["c1", "c2", "d2=a", "d2=b", "d3=a", "d3=b",
"d3=c"])
self.assertIsInstance(dom[2].compute_value,
transformation.Indicator)
dat2 = Table(dom, self.data)
# c1 c2 d2 d3 cl1
self.assertEqual(dat2[0], [1, -2, 1, 0, 1, 0, 0, "a"])
self.assertEqual(dat2[1], [0, 0, 0, 1, 0, 1, 0, "b"])
self.assertEqual(dat2[2], [2, 2, 0, 1, 0, 0, 1, "c"])
def test_multi_lowest_base(self):
for inp in (self.data, self.data.domain):
dom = DomainContinuizer(
inp, multinomial_treatment=Continuize.FirstAsBase)
self.assertTrue(all(isinstance(attr, ContinuousVariable)
for attr in dom.attributes))
self.assertIs(dom.class_var, self.data.domain.class_var)
self.assertIs(dom[0], self.data.domain[0])
self.assertIs(dom[1], self.data.domain[1])
self.assertEqual([attr.name for attr in dom.attributes],
["c1", "c2", "d2=b", "d3=b", "d3=c"])
self.assertIsInstance(dom[2].compute_value,
transformation.Indicator)
dat2 = Table(dom, self.data)
# c1 c2 d2 d3 cl1
self.assertEqual(dat2[0], [1, -2, 0, 0, 0, "a"])
self.assertEqual(dat2[1], [0, 0, 1, 1, 0, "b"])
self.assertEqual(dat2[2], [2, 2, 1, 0, 1, "c"])
def test_multi_lowest_base_base(self):
self.data.domain[4].base_value=1
for inp in (self.data, self.data.domain):
dom = DomainContinuizer(
inp, multinomial_treatment=Continuize.FirstAsBase)
self.assertTrue(all(isinstance(attr, ContinuousVariable)
for attr in dom.attributes))
self.assertIs(dom.class_var, self.data.domain.class_var)
self.assertIs(dom[0], self.data.domain[0])
self.assertIs(dom[1], self.data.domain[1])
self.assertEqual([attr.name for attr in dom.attributes],
["c1", "c2", "d2=b", "d3=a", "d3=c"])
self.assertIsInstance(dom[2].compute_value,
transformation.Indicator)
dat2 = Table(dom, self.data)
# c1 c2 d2 d3 cl1
self.assertEqual(dat2[0], [1, -2, 0, 1, 0, "a"])
self.assertEqual(dat2[1], [0, 0, 1, 0, 0, "b"])
self.assertEqual(dat2[2], [2, 2, 1, 0, 1, "c"])
def test_multi_ignore(self):
dom = DomainContinuizer(self.data.domain,
multinomial_treatment=Continuize.Remove)
self.assertTrue(all(isinstance(attr, ContinuousVariable)
for attr in dom.attributes))
self.assertEqual([attr.name for attr in dom.attributes],
["c1", "c2"])
def test_multi_ignore_class(self):
dom = DomainContinuizer(self.data.domain,
multinomial_treatment=Continuize.Remove,
transform_class=True)
self.assertTrue(all(isinstance(attr, ContinuousVariable)
for attr in dom.attributes))
self.assertEqual([attr.name for attr in dom.attributes],
["c1", "c2"])
self.assertEqual(len(dom.class_vars), 0)
self.assertIsNone(dom.class_var)
def test_multi_ignore_multi(self):
dom = DomainContinuizer(
self.data.domain,
multinomial_treatment=Continuize.RemoveMultinomial)
self.assertTrue(all(isinstance(attr, ContinuousVariable)
for attr in dom.attributes))
self.assertEqual([attr.name for attr in dom],
["c1", "c2", "d2=b", "cl1"])
def test_multi_ignore_class(self):
dom = DomainContinuizer(
self.data.domain,
multinomial_treatment=Continuize.RemoveMultinomial,
transform_class=True)
self.assertTrue(all(isinstance(attr, ContinuousVariable)
for attr in dom.attributes))
self.assertEqual([attr.name for attr in dom.attributes],
["c1", "c2", "d2=b"])
self.assertEqual(len(dom.class_vars), 0)
self.assertIsNone(dom.class_var)
def test_multi_error(self):
self.assertRaises(ValueError, DomainContinuizer,
self.data.domain,
multinomial_treatment=Continuize.ReportError)
def test_as_ordinal(self):
for inp in (self.data, self.data.domain):
dom = DomainContinuizer(
inp, multinomial_treatment=Continuize.AsOrdinal)
self.assertTrue(all(isinstance(attr, ContinuousVariable)
for attr in dom.attributes))
self.assertIs(dom.class_var, self.data.domain.class_var)
self.assertIs(dom[0], self.data.domain[0])
self.assertIs(dom[1], self.data.domain[1])
self.assertEqual([attr.name for attr in dom],
["c1", "c2", "d2", "d3", "cl1"])
dat2 = Table(dom, self.data)
# c1 c2 d2 d3 cl1
self.assertEqual(dat2[0], [1, -2, 0, 0, "a"])
self.assertEqual(dat2[1], [0, 0, 1, 1, "b"])
self.assertEqual(dat2[2], [2, 2, 1, 2, "c"])
def test_as_ordinal_class(self):
for inp in (self.data, self.data.domain):
dom = DomainContinuizer(
inp, multinomial_treatment=Continuize.AsOrdinal,
transform_class=True)
self.assertTrue(all(isinstance(attr, ContinuousVariable)
for attr in dom.attributes))
self.assertIsInstance(dom.class_var, ContinuousVariable)
self.assertIs(dom[0], self.data.domain[0])
self.assertIs(dom[1], self.data.domain[1])
self.assertEqual([attr.name for attr in dom],
["c1", "c2", "d2", "d3", "cl1"])
dat2 = Table(dom, self.data)
# c1 c2 d2 d3 cl1
self.assertEqual(dat2[0], [1, -2, 0, 0, 0])
self.assertEqual(dat2[1], [0, 0, 1, 1, 1])
self.assertEqual(dat2[2], [2, 2, 1, 2, 2])
def test_as_normalized_ordinal(self):
for inp in (self.data, self.data.domain):
dom = DomainContinuizer(
inp, multinomial_treatment=Continuize.AsNormalizedOrdinal)
self.assertTrue(all(isinstance(attr, ContinuousVariable)
for attr in dom.attributes))
self.assertIs(dom.class_var, self.data.domain.class_var)
self.assertIs(dom[0], self.data.domain[0])
self.assertIs(dom[1], self.data.domain[1])
self.assertEqual([attr.name for attr in dom],
["c1", "c2", "d2", "d3", "cl1"])
dat2 = Table(dom, self.data)
# c1 c2 d2 d3 cl1
self.assertEqual(dat2[0], [1, -2, 0, 0, "a"])
self.assertEqual(dat2[1], [0, 0, 1, 0.5, "b"])
self.assertEqual(dat2[2], [2, 2, 1, 1, "c"])
|
|
'''
------------------------------------------------------------------------
Last updated 7/13/2015
Functions for generating omega, the T x S array which describes the
demographics of the population
This py-file calls the following other file(s):
utils.py
data\demographic\demographic_data.csv
data\demographic\mortality_rates.csv
This py-file creates the following other file(s):
(make sure that an OUTPUT folder exists)
OUTPUT/fert_rates.png
OUTPUT/mort_rates.png
OUTPUT/survival_rate.png
OUTPUT/cum_mort_rate.png
OUTPUT/imm_rates.png
OUTPUT/Population.png
OUTPUT/Population_growthrate.png
OUTPUT/omega_init.png
OUTPUT/omega_ss.png
------------------------------------------------------------------------
'''
'''
------------------------------------------------------------------------
Packages
------------------------------------------------------------------------
'''
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import numpy.polynomial.polynomial as poly
import scipy.optimize as opt
import utils
cur_path = os.path.split(os.path.abspath(__file__))[0]
DEMO_DIR = os.path.join(cur_path, "data", "demographic")
pd.options.mode.chained_assignment = None
'''
------------------------------------------------------------------------
Import data sets
------------------------------------------------------------------------
Population data:
Obtained from:
Annual Estimates of the Resident Population by Single Year of
Age and Sex: April 1, 2010 to July 1, 2013
(Both sexes)
National Characteristics, Vintage 2013
US Census Bureau
http://www.census.gov/popest/data/national/asrh/2013/index.html
Mortality rates data:
Obtained from:
Male and Female death probabilities
Actuarial Life table, 2010
Social Security Administration
http://www.ssa.gov/oact/STATS/table4c6.html
Fertility rates data:
Obtained from:
Births and birth rates, by age of mother, US, 2010
National Vital Statistics Reports, CDC
http://www.cdc.gov/nchs/data/nvsr/nvsr60/nvsr60_02.pdf
Since rates are per 1000 women, the data is divided by 1000
------------------------------------------------------------------------
'''
# Population data
demo_file = utils.read_file(cur_path, "data/demographic/demographic_data.csv")
data = pd.read_table(demo_file, sep=',', header=0)
data = data.set_index('Age')
# Remove commas in the data
for index, value in enumerate(data['2010']):
data['2010'][index] = int(value.replace(',', ''))
for index, value in enumerate(data['2011']):
data['2011'][index] = int(value.replace(',', ''))
for index, value in enumerate(data['2012']):
data['2012'][index] = int(value.replace(',', ''))
for index, value in enumerate(data['2013']):
data['2013'][index] = int(value.replace(',', ''))
# Create a copy of the data to be used elsewhere, without changing the
# main data
data_raw = data.copy(deep=True)
# Mortality rates data
#mort_data = pd.read_table(os.path.join(DEMO_DIR, 'mortality_rates.csv'), sep=',')
mort_file = utils.read_file(cur_path, "data/demographic/mortality_rates.csv")
mort_data = pd.read_table(mort_file, sep=',')
# Remove commas in the data
for index, value in enumerate(mort_data['male_weight']):
mort_data['male_weight'][index] = float(value.replace(',', ''))
for index, value in enumerate(mort_data['female_weight']):
mort_data['female_weight'][index] = float(value.replace(',', ''))
# Average male and female death rates
mort_data['mort_rate'] = (
(np.array(mort_data.male_death.values).astype(float) * np.array(
mort_data.male_weight.values).astype(float)) + (np.array(
mort_data.female_death.values).astype(float) * np.array(
mort_data.female_weight.values).astype(float))) / (
np.array(mort_data.male_weight.values).astype(float) + np.array(
mort_data.female_weight.values).astype(float))
mort_data = mort_data[mort_data.mort_rate.values < 1]
del mort_data['male_death'], mort_data[
'female_death'], mort_data['male_weight'], mort_data[
'female_weight'], mort_data['male_expectancy'], mort_data[
'female_expectancy']
# As the data gives the probability of death, one minus the rate will
# give the survial rate
mort_data['surv_rate'] = 1 - mort_data.mort_rate
# Create an array of death rates of children
# Fertility rates data
fert_data = np.array(
[.4, 34.3, 17.3, 58.3, 90.0, 108.3, 96.6, 45.9, 10.2, .7]) / 1000
# Fertility rates are given in age groups of 5 years, so the following
# are the midpoints of those groups
age_midpoint = np.array([12, 17, 16, 18.5, 22, 27, 32, 37, 42, 49.5])
'''
------------------------------------------------------------------------
Define functions
------------------------------------------------------------------------
'''
def fit_exp_right(params, point1, point2):
# Fit exponentials to two points for right tail of distributions
a, b = params
x1, y1 = point1
x2, y2 = point2
error1 = a*b**(-x1) - y1
error2 = a*b**(-x2) - y2
return [error1, error2]
def fit_exp_left(params, point1, point2):
# Fit exponentials to two points for left tail of distributions
a, b = params
x1, y1 = point1
x2, y2 = point2
error1 = a*b**(x1) - y1
error2 = a*b**(x2) - y2
return [error1, error2]
def exp_int(points, a, b):
top = a * ((1.0/(b**40)) - b**(-points))
bottom = np.log(b)
return top / bottom
def integrate(func, points):
params_guess = [1, 1]
a, b = opt.fsolve(fit_exp_right, params_guess, args=(
[40, poly.polyval(40, func)], [49.5, .0007]))
func_int = poly.polyint(func)
integral = np.empty(points.shape)
integral[points <= 40] = poly.polyval(points[points <= 40], func_int)
integral[points > 40] = poly.polyval(40, func_int) + exp_int(
points[points > 40], a, b)
return np.diff(integral)
'''
------------------------------------------------------------------------
Survival Rates
------------------------------------------------------------------------
'''
def get_survival(S, starting_age, ending_age, E):
'''
Parameters:
S - Number of age cohorts (scalar)
starting_age = initial age of cohorts (scalar)
ending_age = ending age of cohorts (scalar)
E = number of children (scalar)
Returns:
surv_array - S x 1 array of survival rates for each age cohort
children_rate - starting_age x 1 array of survival
rates for children
'''
mort_rate = np.array(mort_data.mort_rate)
mort_poly = poly.polyfit(np.arange(mort_rate.shape[0]), mort_rate, deg=18)
mort_int = poly.polyint(mort_poly)
child_rate = poly.polyval(np.linspace(0, starting_age, E+1), mort_int)
child_rate = np.diff(child_rate)
mort_rate = poly.polyval(
np.linspace(starting_age, ending_age, S+1), mort_int)
mort_rate = np.diff(mort_rate)
child_rate[child_rate < 0] = 0.0
mort_rate[mort_rate < 0] = 0.0
return 1.0 - mort_rate, 1.0 - child_rate
'''
------------------------------------------------------------------------
Immigration Rates
------------------------------------------------------------------------
'''
pop_2010, pop_2011, pop_2012, pop_2013 = np.array(
data_raw['2010'], dtype='f'), np.array(
data_raw['2011'], dtype='f'), np.array(
data_raw['2012'], dtype='f'), np.array(
data_raw['2013'], dtype='f')
def get_immigration1(S, starting_age, ending_age, pop_2010, pop_2011, E):
'''
Parameters:
S - Number of age cohorts
starting_age - initial age of cohorts
pop1 - initial population
pop2 - population one year later
Returns:
im_array - S+E x 1 array of immigration rates for each
age cohort
'''
# Get survival rates for the S age groups
surv_array, children_rate = get_survival(
ending_age-starting_age, starting_age, ending_age, starting_age)
surv_array = np.array(list(children_rate) + list(surv_array))
# Only keep track of individuals in 2010 that don't die
pop_2010 = pop_2010[:ending_age]
# In 2011, individuals will have aged one year
pop_2011 = pop_2011[1:ending_age+1]
# The immigration rate will be 1 plus the percent change in
# population (since death has already been accounted for)
perc_change = ((pop_2011 - pop_2010) / pop_2010)
# Remove the last entry, since individuals in the last period will die
im_array = perc_change - (surv_array - 1)
return im_array
def get_immigration2(S, starting_age, ending_age, E):
'''
Parameters:
S - Number of age cohorts
starting age - initial age of cohorts
Returns:
im_array - S x 1 array of immigration rates for each
age cohort
child_imm_rate - starting_age x 1 array of immigration
rates for children
'''
imm_rate_condensed1 = get_immigration1(
S, starting_age, ending_age, pop_2010, pop_2011, E)
imm_rate_condensed2 = get_immigration1(
S, starting_age, ending_age, pop_2011, pop_2012, E)
imm_rate_condensed3 = get_immigration1(
S, starting_age, ending_age, pop_2012, pop_2013, E)
im_array = (
imm_rate_condensed1 + imm_rate_condensed2 + imm_rate_condensed3) / 3.0
poly_imm = poly.polyfit(np.linspace(
1, ending_age, ending_age-1), im_array[:-1], deg=18)
poly_imm_int = poly.polyint(poly_imm)
child_imm_rate = poly.polyval(np.linspace(
0, starting_age, E+1), poly_imm_int)
imm_rate = poly.polyval(np.linspace(
starting_age, ending_age, S+1), poly_imm_int)
child_imm_rate = np.diff(child_imm_rate)
imm_rate = np.diff(imm_rate)
imm_rate[-1] = 0.0
return imm_rate, child_imm_rate
'''
------------------------------------------------------------------------
Fertility Rates
------------------------------------------------------------------------
'''
def get_fert(S, starting_age, ending_age, E):
'''
Parameters:
S - Number of age cohorts
starting age - initial age of cohorts
Returns:
fert_rate - Sx1 array of fertility rates for each
age cohort
children_fertrate - starting_age x 1 array of zeros, to be
used in get_omega()
'''
# Fit a polynomial to the fertility rates
poly_fert = poly.polyfit(age_midpoint, fert_data, deg=4)
fert_rate = integrate(poly_fert, np.linspace(
starting_age, ending_age, S+1))
fert_rate /= 2.0
children_fertrate_int = poly.polyint(poly_fert)
children_fertrate_int = poly.polyval(np.linspace(
0, starting_age, E + 1), children_fertrate_int)
children_fertrate = np.diff(children_fertrate_int)
children_fertrate /= 2.0
children_fertrate[children_fertrate < 0] = 0
children_fertrate[:int(10*S/float(ending_age-starting_age))] = 0
return fert_rate, children_fertrate
'''
------------------------------------------------------------------------
Generate graphs of mortality, fertility, and immigration rates
------------------------------------------------------------------------
'''
def rate_graphs(S, starting_age, ending_age, imm, fert, surv, child_imm, child_fert, child_mort):
domain = np.arange(child_fert.shape[0] + S) + 1
mort = mort_data.mort_rate
domain2 = np.arange(mort.shape[0]) + 1
domain4 = np.arange(child_imm.shape[0] + imm.shape[0]) + 1
# Graph of fertility rates
plt.figure()
plt.plot(
domain, list(child_fert)+list(fert), linewidth=2, color='blue')
plt.xlabel(r'age $s$')
plt.ylabel(r'fertility $f_s$')
plt.savefig('OUTPUT/Demographics/fert_rates')
# Graph of mortality rates
plt.figure()
plt.plot(domain2[:ending_age-1], (1-np.array(list(child_mort)+list(surv)))[:-1], color='blue', linewidth=2)
plt.plot(domain2[ending_age:], mort[
ending_age:], color='blue', linestyle='--', linewidth=2)
plt.axvline(x=ending_age, color='red', linestyle='-', linewidth=1)
plt.xlabel(r'age $s$')
plt.ylabel(r'mortality $\rho_s$')
plt.savefig('OUTPUT/Demographics/mort_rates')
cum_surv_arr = np.cumprod(surv)
domain3 = np.arange(surv.shape[0]) + 1
# Graph of cumulative mortality rates
plt.figure()
plt.plot(domain3, cum_surv_arr)
plt.xlabel(r'age $s$')
plt.ylabel(r'survival rate $1-\rho_s$')
plt.savefig('OUTPUT/Demographics/survival_rate')
cum_mort_rate = 1-cum_surv_arr
plt.figure()
plt.plot(domain3, cum_mort_rate)
plt.xlabel(r'age $s$')
plt.ylabel(r'cumulative mortality rate')
plt.savefig('OUTPUT/Demographics/cum_mort_rate')
# Graph of immigration rates
plt.figure()
plt.plot(domain4, list(
child_imm)+list(imm), linewidth=2, color='blue')
plt.xlabel(r'age $s$')
plt.ylabel(r'immigration $i_s$')
plt.savefig('OUTPUT/Demographics/imm_rates')
'''
------------------------------------------------------------------------
Generate graphs of Population
------------------------------------------------------------------------
'''
def pop_graphs(S, T, starting_age, ending_age, children, g_n, omega):
N = omega[T].sum() + children[T].sum()
x = children.sum(1) + omega.sum(1)
x2 = 100 * np.diff(x)/x[:-1]
plt.figure()
plt.plot(np.arange(T+S)+1, x, 'b', linewidth=2)
plt.title('Population Size (as a percent of the initial population)')
plt.xlabel(r'Time $t$')
# plt.ylabel('Population size, as a percent of initial population')
plt.savefig('OUTPUT/Demographics/Population')
plt.figure()
plt.plot(np.arange(T+S-1)+1, x2, 'b', linewidth=2)
plt.axhline(y=100 * g_n, color='r', linestyle='--', label=r'$\bar{g}_n$')
plt.legend(loc=0)
plt.xlabel(r'Time $t$')
plt.ylabel(r'Population growth rate $g_n$')
# plt.title('Population Growth rate over time')
plt.savefig('OUTPUT/Demographics/Population_growthrate')
plt.figure()
plt.plot(np.arange(S+int(starting_age * S / (
ending_age-starting_age)))+1, list(
children[0, :]) + list(
omega[0, :]), linewidth=2, color='blue')
plt.xlabel(r'age $s$')
plt.ylabel(r'$\omega_{s,1}$')
plt.savefig('OUTPUT/Demographics/omega_init')
plt.figure()
plt.plot(np.arange(S+int(starting_age * S / (
ending_age-starting_age)))+1, list(
children[T, :]/N) + list(
omega[T, :]/N), linewidth=2, color='blue')
plt.xlabel(r'age $s$')
plt.ylabel(r'$\overline{\omega}$')
plt.savefig('OUTPUT/Demographics/omega_ss')
'''
------------------------------------------------------------------------
Generate Demographics
------------------------------------------------------------------------
'''
def get_omega(S, T, starting_age, ending_age, E, flag_graphs):
'''
Inputs:
S - Number of age cohorts (scalar)
T - number of time periods in TPI (scalar)
starting_age - initial age of cohorts (scalar)
ending_age = ending age of cohorts (scalar)
E = number of children (scalar)
flag_graphs = graph variables or not (bool)
Outputs:
omega_big = array of all population weights over time ((T+S)x1 array)
g_n_SS = steady state growth rate (scalar)
omega_SS = steady state population weights (Sx1 array)
surv_array = survival rates (Sx1 array)
rho = mortality rates (Sx1 array)
g_n_vec = population growth rate over time ((T+S)x1 array)
'''
data1 = data
pop_data = np.array(data1['2010'])
poly_pop = poly.polyfit(np.linspace(
0, pop_data.shape[0]-1, pop_data.shape[0]), pop_data, deg=11)
poly_int_pop = poly.polyint(poly_pop)
pop_int = poly.polyval(np.linspace(
starting_age, ending_age, S+1), poly_int_pop)
new_omega = pop_int[1:]-pop_int[:-1]
surv_array, children_rate = get_survival(S, starting_age, ending_age, E)
surv_array[-1] = 0.0
imm_array, children_im = get_immigration2(S, starting_age, ending_age, E)
imm_array *= 0.0
fert_rate, children_fertrate = get_fert(S, starting_age, ending_age, E)
cum_surv_rate = np.cumprod(surv_array)
if flag_graphs:
rate_graphs(S, starting_age, ending_age, imm_array, fert_rate, surv_array, children_im, children_fertrate, children_rate)
children_int = poly.polyval(np.linspace(0, starting_age, E + 1), poly_int_pop)
sum2010 = pop_int[-1] - children_int[0]
new_omega /= sum2010
children = np.diff(children_int)
children /= sum2010
children = np.tile(children.reshape(1, E), (T + S, 1))
omega_big = np.tile(new_omega.reshape(1, S), (T + S, 1))
# Generate the time path for each age group
for t in xrange(1, T + S):
# Children are born and then have to wait 20 years to enter the model
omega_big[t, 0] = children[t-1, -1] * (children_rate[-1] + children_im[-1])
omega_big[t, 1:] = omega_big[t-1, :-1] * (surv_array[:-1] + imm_array[:-1])
children[t, 1:] = children[t-1, :-1] * (children_rate[:-1] + children_im[:-1])
children[t, 0] = (omega_big[t-1, :] * fert_rate).sum(0) + (children[t-1] * children_fertrate).sum(0)
OMEGA = np.zeros(((S + E), (S + E)))
OMEGA[0, :] = np.array(list(children_fertrate) + list(fert_rate))
OMEGA += np.diag(np.array(list(children_rate) + list(surv_array[:-1])) + np.array(list(children_im) + list(imm_array[:-1])), -1)
eigvalues, eigvectors = np.linalg.eig(OMEGA)
mask = eigvalues.real != 0
eigvalues = eigvalues[mask]
mask2 = eigvalues.imag == 0
eigvalues = eigvalues[mask2].real
g_n_SS = eigvalues - 1
eigvectors = np.abs(eigvectors.T)
eigvectors = eigvectors[mask]
omega_SS = eigvectors[mask2].real
if eigvalues.shape[0] != 1:
ind = ((abs(omega_SS.T/omega_SS.T.sum(0) - np.array(list(children[-1, :]) + list(omega_big[-1, :])).reshape(S+E, 1)).sum(0))).argmin()
omega_SS = omega_SS[ind]
g_n_SS = [g_n_SS[ind]]
omega_SS = omega_SS[E:]
omega_SS /= omega_SS.sum()
# Creating the different ability level bins
if flag_graphs:
pop_graphs(S, T, starting_age, ending_age, children, g_n_SS[0], omega_big)
N_vector = omega_big.sum(1)
g_n_vec = N_vector[1:] / N_vector[:-1] -1.0
g_n_vec = np.append(g_n_vec, g_n_SS[0])
rho = 1.0 - surv_array
return omega_big, g_n_SS[0], omega_SS, surv_array, rho, g_n_vec
|
|
# coding: utf-8
#
# Esri start of added variables
g_ESRI_variable_1 = r'%scratchGDB%\tempSortedPoints'
# Esri end of added variables
#
#------------------------------------------------------------------------------
# Copyright 2015 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
#
# ==================================================
# NumberFeatures.py
# --------------------------------------------------
# Built on ArcGIS
# ==================================================
#
# This tool will number features within a point feature class spatially from left to right,
# top to bottom for a selected area.
#
# ==================================================
# HISTORY:
#
# 8/25/2015 - mf - Needed to update script for non-ArcMap/Pro testing environment
#
# ==================================================
import os, sys, math, traceback
import arcpy
from arcpy import env
from arcpy import sa
import Utilities
# Read in the Parameters
areaToNumber = arcpy.GetParameterAsText(0)
pointFeatures = arcpy.GetParameterAsText(1)
numberingField = arcpy.GetParameterAsText(2)
outputFeatureClass = arcpy.GetParameterAsText(3)
DEBUG = True
appEnvironment = None
mxd, df, aprx, mp, mapList = None, None, None, None, None
def labelFeatures(layer, field):
''' set up labeling for layer '''
if appEnvironment == "ARCGIS_PRO":
if layer.supports("SHOWLABELS"):
for lblclass in layer.listLabelClasses():
lblclass.visible = True
lblclass.expression = "$feature.Number"
layer.showLabels = True
elif appEnvironment == "ARCMAP":
if layer.supports("LABELCLASSES"):
for lblclass in layer.labelClasses:
lblclass.showClassLabels = True
lblclass.expression = " [" + str(field) + "]"
layer.showLabels = True
arcpy.RefreshActiveView()
else:
pass # if returns "OTHER"
def findLayerByName(layerName):
#UPDATE
if appEnvironment == "ARCGIS_PRO":
#if gisVersion == "1.0": #Pro:
if DEBUG == True:
arcpy.AddMessage("Pro labeling for " + layerName + "...")
for layer in mapList.listLayers():
if layer.name == layerName:
arcpy.AddMessage("Found matching layer [" + layer.name + "]")
return layer
else:
arcpy.AddMessage("Incorrect layer: [" + layer.name + "]")
#else:
elif appEnvironment == "ARCMAP":
if DEBUG == True: arcpy.AddMessage("Map labeling for " + layerName + "...")
for layer in arcpy.mapping.ListLayers(mxd):
if layer.name == layerName:
arcpy.AddMessage("Found matching layer [" + layer.name + "]")
return layer
else:
arcpy.AddMessage("Incorrect layer: [" + layer.name + "]")
else:
arcpy.AddMessage("Non-map environment, no layers to find...")
def GetApplication():
'''Return app environment as ARCMAP, ARCGIS_PRO, OTHER'''
try:
from arcpy import mp
return "ARCGIS_PRO"
except ImportError:
try:
from arcpy import mapping
mxd = arcpy.mapping.MapDocument("CURRENT")
return "ARCMAP"
except:
return "OTHER"
def main():
''' main '''
#UPDATE
# Create a feature layer from the input point features if it is not one already
#df = arcpy.mapping.ListDataFrames(mxd)[0]
pointFeatureName = os.path.basename(pointFeatures)
layerExists = False
try:
# Check that area to number is a polygon
descArea = arcpy.Describe(areaToNumber)
areaGeom = descArea.shapeType
arcpy.AddMessage("Shape type: " + str(areaGeom))
if (descArea.shapeType != "Polygon"):
raise Exception("ERROR: The area to number must be a polygon.")
gisVersion = arcpy.GetInstallInfo()["Version"]
global appEnvironment
appEnvironment = Utilities.GetApplication()
if DEBUG == True: arcpy.AddMessage("App environment: " + appEnvironment)
global mxd
global df
global aprx
global mp
global mapList
# mxd, df, aprx, mp = None, None, None, None
#if gisVersion == "1.0": #Pro:
if appEnvironment == "ARCGIS_PRO":
from arcpy import mp
aprx = arcpy.mp.ArcGISProject("CURRENT")
mapList = aprx.listMaps()[0]
for lyr in mapList.listLayers():
if lyr.name == pointFeatureName:
layerExists = True
#else:
if appEnvironment == "ARCMAP":
from arcpy import mapping
mxd = arcpy.mapping.MapDocument('CURRENT')
df = arcpy.mapping.ListDataFrames(mxd)[0]
for lyr in arcpy.mapping.ListLayers(mxd):
if lyr.name == pointFeatureName:
layerExists = True
if layerExists == False:
arcpy.MakeFeatureLayer_management(pointFeatures, pointFeatureName)
else:
pointFeatureName = pointFeatures
# Select all the points that are inside of area
arcpy.AddMessage("Selecting points from (" + str(os.path.basename(pointFeatureName)) +\
") inside of the area (" + str(os.path.basename(areaToNumber)) + ")")
selectionLayer = arcpy.SelectLayerByLocation_management(pointFeatureName, "INTERSECT",
areaToNumber, "#", "NEW_SELECTION")
if DEBUG == True:
arcpy.AddMessage("Selected " + str(arcpy.GetCount_management(pointFeatureName).getOutput(0)) + " points")
# If no output FC is specified, then set it a temporary one, as this will be copied to the input and then deleted.
# Sort layer by upper right across and then down spatially,
overwriteFC = False
global outputFeatureClass
if outputFeatureClass == "":
outputFeatureClass = g_ESRI_variable_1
overwriteFC = True;
arcpy.AddMessage("Sorting the selected points geographically, right to left, top to bottom")
arcpy.Sort_management(selectionLayer, outputFeatureClass, [["Shape", "ASCENDING"]])
# Number the fields
arcpy.AddMessage("Numbering the fields")
i = 1
cursor = arcpy.UpdateCursor(outputFeatureClass)
for row in cursor:
row.setValue(numberingField, i)
cursor.updateRow(row)
i += 1
# Clear the selection
arcpy.AddMessage("Clearing the selection")
arcpy.SelectLayerByAttribute_management(pointFeatureName, "CLEAR_SELECTION")
# Overwrite the Input Point Features, and then delete the temporary output feature class
targetLayerName = ""
if (overwriteFC):
arcpy.AddMessage("Copying the features to the input, and then deleting the temporary feature class")
desc = arcpy.Describe(pointFeatures)
if hasattr(desc, "layer"):
overwriteFC = desc.layer.catalogPath
else:
overwriteFC = desc.catalogPath
fields = (numberingField, "SHAPE@")
overwriteCursor = arcpy.da.UpdateCursor(overwriteFC, fields)
for overwriteRow in overwriteCursor:
sortedPointsCursor = arcpy.da.SearchCursor(outputFeatureClass, fields)
for sortedRow in sortedPointsCursor:
if sortedRow[1].equals(overwriteRow[1]):
overwriteRow[0] = sortedRow[0]
overwriteCursor.updateRow(overwriteRow)
arcpy.Delete_management(outputFeatureClass)
#UPDATE
#if layerExists == False:
#layerToAdd = arcpy.mapping.Layer(pointFeatureName)
#arcpy.mapping.AddLayer(df, layerToAdd, "AUTO_ARRANGE")
targetLayerName = pointFeatureName
else:
#UPDATE
#layerToAdd = arcpy.mapping.Layer(outputFeatureClass)
#arcpy.mapping.AddLayer(df, layerToAdd, "AUTO_ARRANGE")
targetLayerName = os.path.basename(outputFeatureClass)
# Get and label the output feature
if appEnvironment == "ARCGIS_PRO":
#params = arcpy.GetParameterInfo()
##get the symbology from the NumberedStructures.lyr
#scriptPath = sys.path[0]
#arcpy.AddMessage(scriptPath)
#layerFilePath = os.path.join(scriptPath,r"commondata\userdata\NumberedStructures.lyrx")
#params[3].symbology = layerFilePath
#arcpy.AddMessage("Applying Symbology from {0}".format(layerFilePath))
arcpy.AddMessage("Applying symbology on the script tool based on best practice")
elif appEnvironment == "ARCMAP":
#arcpy.AddMessage("Adding features to map (" + str(targetLayerName) + ")...")
#arcpy.MakeFeatureLayer_management(outputFeatureClass, targetLayerName)
# create a layer object
#layer = arcpy.mapping.Layer(targetLayerName)
# get the symbology from the NumberedStructures.lyr
#layerFilePath = os.path.join(os.getcwd(),r"data\Layers\NumberedStructures.lyr")
#layerFilePath = os.path.join(os.path.dirname(os.path.dirname(__file__)),r"layers\NumberedStructures.lyr")
# apply the symbology to the layer
#arcpy.ApplySymbologyFromLayer_management(layer, layerFilePath)
# add layer to map
#arcpy.mapping.AddLayer(df, layer, "AUTO_ARRANGE")
# find the target layer in the map
#mapLyr = arcpy.mapping.ListLayers(mxd, targetLayerName)[0]
#arcpy.AddMessage("Labeling output features (" + str(targetLayerName) + ")...")
# Work around needed as ApplySymbologyFromLayer_management does not honour labels
#labelLyr = arcpy.mapping.Layer(layerFilePath)
# copy the label info from the source to the map layer
#mapLyr.labelClasses = labelLyr.labelClasses
# turn labels on
#mapLyr.showLabels = True
arcpy.AddMessage("Applying symbology on the script tool based on best practice")
else:
arcpy.AddMessage("Non-map application, skipping labeling...")
arcpy.SetParameter(3, outputFeatureClass)
except arcpy.ExecuteError:
# Get the tool error messages
msgs = arcpy.GetMessages()
arcpy.AddError(msgs)
print(msgs)
except:
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"
# Return python error messages for use in script tool or Python Window
arcpy.AddError(pymsg)
arcpy.AddError(msgs)
# Print Python error messages for use in Python / Python Window
print(pymsg + "\n")
print(msgs)
# MAIN =============================================
if __name__ == "__main__":
main()
|
|
# Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import re
from dateutil import parser as dateutil_parser
from oslo_utils import timeutils
from sqlalchemy.dialects import sqlite
from sqlalchemy import func
from sqlalchemy import MetaData
from sqlalchemy import select
from nova import context
from nova.db import api as db
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova.tests.functional import integrated_helpers
class TestDatabaseArchive(integrated_helpers._IntegratedTestBase):
"""Tests DB API for archiving (soft) deleted records"""
def setUp(self):
super(TestDatabaseArchive, self).setUp()
# TODO(mriedem): pull this out so we can re-use it in
# test_archive_deleted_rows_fk_constraint
# SQLite doesn't enforce foreign key constraints without a pragma.
engine = sqlalchemy_api.get_engine()
dialect = engine.url.get_dialect()
if dialect == sqlite.dialect:
# We're seeing issues with foreign key support in SQLite 3.6.20
# SQLAlchemy doesn't support it at all with < SQLite 3.6.19
# It works fine in SQLite 3.7.
# So return early to skip this test if running SQLite < 3.7
import sqlite3
tup = sqlite3.sqlite_version_info
if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7):
self.skipTest(
'sqlite version too old for reliable SQLA foreign_keys')
engine.connect().execute("PRAGMA foreign_keys = ON")
def test_archive_deleted_rows(self):
# Boots a server, deletes it, and then tries to archive it.
server = self._create_server()
server_id = server['id']
# Assert that there are instance_actions. instance_actions are
# interesting since we don't soft delete them but they have a foreign
# key back to the instances table.
actions = self.api.get_instance_actions(server_id)
self.assertTrue(len(actions),
'No instance actions for server: %s' % server_id)
self._delete_server(server)
# Verify we have the soft deleted instance in the database.
admin_context = context.get_admin_context(read_deleted='yes')
# This will raise InstanceNotFound if it's not found.
instance = db.instance_get_by_uuid(admin_context, server_id)
# Make sure it's soft deleted.
self.assertNotEqual(0, instance.deleted)
# Verify we have some system_metadata since we'll check that later.
self.assertTrue(len(instance.system_metadata),
'No system_metadata for instance: %s' % server_id)
# Now try and archive the soft deleted records.
results, deleted_instance_uuids, archived = \
db.archive_deleted_rows(max_rows=100)
# verify system_metadata was dropped
self.assertIn('instance_system_metadata', results)
self.assertEqual(len(instance.system_metadata),
results['instance_system_metadata'])
# Verify that instances rows are dropped
self.assertIn('instances', results)
# Verify that instance_actions and actions_event are dropped
# by the archive
self.assertIn('instance_actions', results)
self.assertIn('instance_actions_events', results)
self.assertEqual(sum(results.values()), archived)
def test_archive_deleted_rows_with_undeleted_residue(self):
# Boots a server, deletes it, and then tries to archive it.
server = self._create_server()
server_id = server['id']
# Assert that there are instance_actions. instance_actions are
# interesting since we don't soft delete them but they have a foreign
# key back to the instances table.
actions = self.api.get_instance_actions(server_id)
self.assertTrue(len(actions),
'No instance actions for server: %s' % server_id)
self._delete_server(server)
# Verify we have the soft deleted instance in the database.
admin_context = context.get_admin_context(read_deleted='yes')
# This will raise InstanceNotFound if it's not found.
instance = db.instance_get_by_uuid(admin_context, server_id)
# Make sure it's soft deleted.
self.assertNotEqual(0, instance.deleted)
# Undelete the instance_extra record to make sure we delete it anyway
extra = db.instance_extra_get_by_instance_uuid(admin_context,
instance.uuid)
self.assertNotEqual(0, extra.deleted)
db.instance_extra_update_by_uuid(admin_context, instance.uuid,
{'deleted': 0})
extra = db.instance_extra_get_by_instance_uuid(admin_context,
instance.uuid)
self.assertEqual(0, extra.deleted)
# Verify we have some system_metadata since we'll check that later.
self.assertTrue(len(instance.system_metadata),
'No system_metadata for instance: %s' % server_id)
# Create a pci_devices record to simulate an instance that had a PCI
# device allocated at the time it was deleted. There is a window of
# time between deletion of the instance record and freeing of the PCI
# device in nova-compute's _complete_deletion method during RT update.
db.pci_device_update(admin_context, 1, 'fake-address',
{'compute_node_id': 1,
'address': 'fake-address',
'vendor_id': 'fake',
'product_id': 'fake',
'dev_type': 'fake',
'label': 'fake',
'status': 'allocated',
'instance_uuid': instance.uuid})
# Now try and archive the soft deleted records.
results, deleted_instance_uuids, archived = \
db.archive_deleted_rows(max_rows=100)
# verify system_metadata was dropped
self.assertIn('instance_system_metadata', results)
self.assertEqual(len(instance.system_metadata),
results['instance_system_metadata'])
# Verify that instances rows are dropped
self.assertIn('instances', results)
# Verify that instance_actions and actions_event are dropped
# by the archive
self.assertIn('instance_actions', results)
self.assertIn('instance_actions_events', results)
self.assertEqual(sum(results.values()), archived)
# Verify that the pci_devices record has not been dropped
self.assertNotIn('pci_devices', results)
def _get_table_counts(self):
engine = sqlalchemy_api.get_engine()
conn = engine.connect()
meta = MetaData(engine)
meta.reflect()
shadow_tables = sqlalchemy_api._purgeable_tables(meta)
results = {}
for table in shadow_tables:
r = conn.execute(
select([func.count()]).select_from(table)).fetchone()
results[table.name] = r[0]
return results
def test_archive_then_purge_all(self):
server = self._create_server()
server_id = server['id']
self._delete_server(server)
results, deleted_ids, archived = db.archive_deleted_rows(max_rows=1000)
self.assertEqual([server_id], deleted_ids)
lines = []
def status(msg):
lines.append(msg)
admin_context = context.get_admin_context()
deleted = sqlalchemy_api.purge_shadow_tables(admin_context,
None, status_fn=status)
self.assertNotEqual(0, deleted)
self.assertNotEqual(0, len(lines))
self.assertEqual(sum(results.values()), archived)
for line in lines:
self.assertIsNotNone(re.match(r'Deleted [1-9][0-9]* rows from .*',
line))
results = self._get_table_counts()
# No table should have any rows
self.assertFalse(any(results.values()))
def test_archive_then_purge_by_date(self):
server = self._create_server()
server_id = server['id']
self._delete_server(server)
results, deleted_ids, archived = db.archive_deleted_rows(max_rows=1000)
self.assertEqual([server_id], deleted_ids)
self.assertEqual(sum(results.values()), archived)
pre_purge_results = self._get_table_counts()
past = timeutils.utcnow() - datetime.timedelta(hours=1)
admin_context = context.get_admin_context()
deleted = sqlalchemy_api.purge_shadow_tables(admin_context,
past)
# Make sure we didn't delete anything if the marker is before
# we started
self.assertEqual(0, deleted)
results = self._get_table_counts()
# Nothing should be changed if we didn't purge anything
self.assertEqual(pre_purge_results, results)
future = timeutils.utcnow() + datetime.timedelta(hours=1)
deleted = sqlalchemy_api.purge_shadow_tables(admin_context, future)
# Make sure we deleted things when the marker is after
# we started
self.assertNotEqual(0, deleted)
results = self._get_table_counts()
# There should be no rows in any table if we purged everything
self.assertFalse(any(results.values()))
def test_purge_with_real_date(self):
"""Make sure the result of dateutil's parser works with the
query we're making to sqlalchemy.
"""
server = self._create_server()
server_id = server['id']
self._delete_server(server)
results, deleted_ids, archived = db.archive_deleted_rows(max_rows=1000)
self.assertEqual([server_id], deleted_ids)
date = dateutil_parser.parse('oct 21 2015', fuzzy=True)
admin_context = context.get_admin_context()
deleted = sqlalchemy_api.purge_shadow_tables(admin_context, date)
self.assertEqual(0, deleted)
self.assertEqual(sum(results.values()), archived)
|
|
# Simplex
"""Provides classes to draw different types of two dimensional simplex
diagrams. A two dimensional simplex diagram is an equal sided triangle,
where each point inside the triangle represents a ratio of three
variables. This could for example be the population ratio of three
species in biology.
"""
import math, random
try:
import Graph, Gfx
except ImportError:
from . import Graph, Gfx
try:
from Compatibility import *
except ImportError:
from . import Compatibility
globals().update(Compatibility.__dict__)
# population ratios should add up to 1.0. This following constant
# defines the maximum allowed error for this sum.
ERROR_TOLERANCE = 1.0 / 10000.0
ERROR_CHECKING = False
########################################################################
#
# class Plotter
#
########################################################################
SIMPLEX_H = math.sqrt(0.75)
MAGIC_TITLESIZE_FACTOR = 3
def drawArrow(gfx, x1, y1, x2, y2):
if x1 == x2 and y1 == y2:
gfx.drawPoint(x1, y1)
#gfx.drawLine(x1-1, y1-1, x1+1, y1+1)
#gfx.drawLine(x1-1, y1+1, x1+1, y1-1)
else:
gfx.drawLine(x1, y1, x2, y2)
dx = float(x2-x1); dy = float(y2-y1)
l = math.sqrt(dx*dx + dy*dy)
if l > 0:
dx /= l; dy /= l
l /= 3.
px = x2 - dx * l; py = y2 - dy * l
ax = int(px - dy * l + 0.5); ay = int(py + dx * l + 0.5)
bx = int(px + dy * l + 0.5); by = int(py - dx * l + 0.5)
gfx.drawLine(x2, y2, ax, ay)
gfx.drawLine(x2, y2, bx, by)
class Plotter(Graph.HardFramedScreen):
"""Plotter for simple triangle diagrams.
Triangle diagrams are commonly used in game theory to visualize
the evolution of dynamical systems of three types of players. The
edges of the triangle represent system states where the whole
population consists only of players of one single type, whereas
points in the interior of the triangle represent mixed populations
according to the given player ratios.
"""
def __init__(self, gfx, title="Simplex Diagram",
p1="P1", p2="P2", p3="P3", styleFlags = 0,
titlePen = Gfx.BLACK_PEN, labelPen = Gfx.BLACK_PEN,
simplexPen=Gfx.BLACK_PEN, backgroundPen=Gfx.WHITE_PEN,
section=Graph.REGION_FULLSCREEN):
top = self._calcTitleSize(gfx, title, titlePen)
Graph.HardFramedScreen.__init__(self, gfx, -0.1, -0.1,
1.1, SIMPLEX_H + 0.1, section, top, 0, 0, 0, keepAspect=True)
self.title = title
self.p1, self.p2, self.p3 = p1, p2, p3
self.styleFlags = styleFlags
self.titlePen = titlePen; self.labelPen = labelPen
self.simplexPen = simplexPen; self.backgroundPen = backgroundPen
self.clear()
def _calcTitleSize(self, gfx, title, titlePen):
"""Calculates the size of the title frame, depending on the
selected font."""
if title == "": return 0
gfx.applyPen(titlePen, Gfx.MASK_FONT)
th = gfx.getTextSize("0g")[1]
return int(th * MAGIC_TITLESIZE_FACTOR + 0.5)
def _clearLabels(self):
"""Clears the labels."""
x1, y1, x2, y2 = self.innerFrame()
ya = self.tr.Y(0.0)-1; yb = self.tr.Y(SIMPLEX_H)+1
self.gfx.applyPen(self.backgroundPen, Gfx.MASK_FILL)
self.gfx.fillRect(x1, y1, x2-x1+1, ya-y1+1)
self.gfx.fillRect(x1, yb, x2-x1+1, y2-yb+1)
def _clearTitle(self):
"""Clears the title."""
if self.title != "":
self.gfx.applyPen(self.backgroundPen, Gfx.MASK_FILL)
x1, y1, x2, y2 = self.topFrame()
self.gfx.fillRect(x1, y1, x2-x1+1, y2-y1+1)
def _clearSimplex(self):
"""Clears the simplex."""
x1 = self.tr.X(0.0); y1 = self.tr.Y(0.0)
x2 = self.tr.X(1.0); y2 = self.tr.Y(SIMPLEX_H)
self.gfx.applyPen(self.backgroundPen, Gfx.MASK_FILL)
self.gfx.fillRect(x1, y1, x2-x1+1, y2-y1+1)
def _clear(self):
"""Clears everything (simplex, title and labels)."""
self.gfx.applyPen(self.backgroundPen, Gfx.MASK_FILL)
self.gfx.fillRect(self.sx1, self.sy1,
self.sx2-self.sx1+1, self.sy2-self.sy1+1)
def _drawLabels(self):
"""Draws the labes at the three corners of the simplex."""
self.gfx.applyPen(self.labelPen, Gfx.MASK_FONT)
w,h = self.gfx.getTextSize(self.p1)
x,y = self.tr.X(0.0)-w/2, self.tr.Y(0.0)-h*12/10
self.gfx.writeStr(x, y, self.p1)
w,h = self.gfx.getTextSize(self.p2)
x,y = self.tr.X(1.0)-w/2, self.tr.Y(0.0)-h*12/10
self.gfx.writeStr(x, y, self.p2)
w,h = self.gfx.getTextSize(self.p3)
x,y = self.tr.X(0.5)-w/2, self.tr.Y(SIMPLEX_H)+h/2
self.gfx.writeStr(x, y, self.p3)
def _drawTitle(self):
"""Draws the title."""
if self.title != "":
sx1, sy1, sx2, sy2 = self.topFrame()
self.gfx.applyPen(self.titlePen, Gfx.MASK_FONT)
w, h = self.gfx.getTextSize(self.title)
x = sx1 + (sx2-sx1+1-w) / 2; y = sy1 + (sy2-sy1+1-h) / 2
self.gfx.writeStr(x, y, self.title)
def _drawEmptySimplex(self):
"""Draws the empty simplex."""
self.gfx.applyPen(self.simplexPen, Gfx.MASK_LINE)
self.gfx.drawPoly(self.tr.transform([
self.transform((1.0, 0.0, 0.0)),
self.transform((0.0, 1.0, 0.0)),
self.transform((0.0, 0.0, 1.0)),
self.transform((1.0, 0.0, 0.0))]))
def clear(self):
"""Clears the whole canvas and redraws titel, labels and an
empty simplex."""
self._clear()
self._drawLabels()
self._drawTitle()
self._drawEmptySimplex()
def clearSimplex(self):
"""Clears only the simplx diagram without affecting the
title or the labels."""
self._clearSimplex()
self._drawEmptySimplex()
def setStyle(self, styleFlags=None, titlePen=None, labelPen=None,
simplexPen=None, backgroundPen=None):
"""Sets the drawing style of the simplex diagram"""
redraw = False
if styleFlags != None and self.styleFlags != styleFlags:
self.styleFlags = styleFlags
redraw = True
if labelPen != None: self.labelPen = labelPen
if simplexPen != None: self.simplexPen = simplexPen
if backgroundPen != None: self.backgroundPen = backgroundPen
if titlePen != None:
self.titlePen = titlePen
top = self._calcTitleSize(self.gfx, self.title, titlePen)
if top != self.top:
self.adjustFrame(top, 0, 0, 0)
redraw = True
if redraw: self.redraw()
def setTitle(self, title):
"""Sets the player title"""
self.title = title
top = self._calcTitleSize(self.gfx, self.title, self.titlePen)
if top != self.top:
self.adjustFrame(top, 0, 0, 0)
self.clear()
else:
self._clearTitle()
self._drawTitle()
def setLabels(self, p1, p2, p3):
"""Sets the player names"""
self.p1, self.p2, self.p3 = p1, p2, p3
self._clearLabels(); self._drawLabels()
def resizedGfx(self):
"""Takes notice of a resized graphics context.
Note: The triangle diagram will be cleared after resizing!
"""
Graph.HardFramedScreen.resizedGfx(self)
self.clear()
def redraw(self):
"""Redraws the (empty) diagram."""
self.clear()
def transform(self, pr):
"""Population Ratio (3-tuple) -> (x,y)"""
return (pr[1]+pr[2]*0.5, SIMPLEX_H*pr[2])
def inverse(self, xy):
"""Virtual Coordinates (x,y) -> Population ratio (3-tuple)"""
p3 = xy[1] / SIMPLEX_H
p2 = xy[0] - p3*0.5
p1 = 1.0 - p2
return (p1, p2, p3)
def peek(self, x, y):
"""Screen coordinates -> Population ratio (3-tuple)
In case (x,y) is not a point within the triangle diagram,
the zero population (0.0, 0.0, 0.0) is being returned.
"""
vx, vy = self.tr.invX(x), self.tr.invY(y)
pr = self.inverse((vx,vy))
if abs(pr[0]+pr[1]+pr[2]-1.0) < ERROR_TOLERANCE and \
pr[0] > 0.0 and pr[1] > 0.0 and pr[2] > 0.0:
return pr
else: return (0.0, 0.0, 0.0)
#
# following: the drawing operations
#
def applyPen(self, pen):
"""Sets the pen for the following drawing operations."""
self.gfx.applyPen(pen, Gfx.MASK_LINE)
def setColor(self, rgbTuple):
"""Sets the color fo the following drawing operations.
A call to apply pen will override this setting."""
self.gfx.setColor(rgbTuple)
def plot(self, pr):
"""Draws a point at position pr (population ration as 3-tuple).
"""
x, y = self.transform(pr)
self.gfx.drawPoint(self.tr.X(x), self.tr.Y(y))
def line(self, pr1, pr2):
"""Draws a line from pr1 to pr2."""
x1, y1 = self.transform(pr1)
x2, y2 = self.transform(pr2)
self.gfx.drawLine(self.tr.X(x1), self.tr.Y(y1),
self.tr.X(x2), self.tr.Y(y2))
def turtleDraw(self, pr1, pr2, length, autoScale=True, arrow=True):
"""Draws a line from pr1 in the direction of pr2."""
if autoScale:
sf = math.sqrt((pr2[0]-pr1[0])**2 + (pr2[1]-pr1[1])**2 + \
(pr2[2]-pr1[2])**2)
length = (length*sf*0.05)**0.45
x1, y1 = self.transform(pr1)
x2, y2 = self.transform(pr2)
dx = x2-x1; dy = y2-y1
l = math.sqrt(dx*dx + dy*dy)
if l > 0.000000000001: # l != 0, but with regard of precision errors
x2 = x1+dx*length/l
y2 = y1+dy*length/l
else: x2, y2 = x1, y1
if arrow:
drawArrow(self.gfx, self.tr.X(x1), self.tr.Y(y1),
self.tr.X(x2), self.tr.Y(y2))
else:
self.gfx.drawLine(self.tr.X(x1), self.tr.Y(y1),
self.tr.X(x2), self.tr.Y(y2))
def spot(self, pr, size):
"""Draws a spot (rectangle) of size 'size' around pr."""
x, y = self.transform(pr)
s = size*1.2 # since gfx-area ranges from -0.1 to 1.1
x1 = x - s / 2.0; y1 = y - (s*SIMPLEX_H)/2.0
w1 = x + s/2.0; w2 = x - s/2.0
h1 = y + (s*SIMPLEX_H)/2.0; h2 = y - (s*SIMPLEX_H)/2.0
self.gfx.fillRect(self.tr.X(x1)+1, self.tr.Y(y1),
self.tr.X(w1) - self.tr.X(w2),
self.tr.Y(h1) - self.tr.Y(h2))
class PermutedPlotter(Plotter):
"""Plotter for simple triangle diagrams.
In contrast to class Plotter, the class PermutedPlotter
allows to permutate the edges of the diagram in an arbitrary way
before plotting.
"""
def __init__(self, gfx, title="Simplex Diagram",
p1="P1", p2="P2", p3="P3", styleFlags = 0,
titlePen = Gfx.BLACK_PEN, labelPen = Gfx.BLACK_PEN,
simplexPen=Gfx.BLACK_PEN, backgroundPen=Gfx.WHITE_PEN,
section=Graph.REGION_FULLSCREEN, permutation=(0,1,2)):
Plotter.__init__(self, gfx, title, p1, p2, p3, styleFlags,
titlePen, labelPen, simplexPen, backgroundPen,
section)
self.pm = (0,1,2); self.inv = (0,1,2)
self.map = self.zero_permutation
self.setPermutation(permutation)
def setPermutation(self, permutation):
"""Use 'permutation' for the following drawing commands."""
l = list(permutation).sort()
if l != [0,1,2]:
raise ValueError("%s is not a valid permutation!" \
% str(permutation))
self.pm = permutation
for i in range(3):
self.inv[self.pm[i]] = i
if self.pm == (0,1,2):
self.map = self.zero_permutation
else: self.map = self.permutate
def permutate(self, pr):
"""-> permutation of point pr"""
return (pr[self.pm[0]], pr[self.pm[1]], pr[self.pm[2]])
def zero_permutation(self, pr):
"""-> pr"""
return pr
def transform(self, pr):
return Plotter.transform(self, self.map(pr))
def inverse(self, xy):
pr = Plotter.inverse(self, xy)
return (pr[self.inv[0]], pr[self.inv[1]], pr[self.inv[2]])
########################################################################
#
# misc. functions
#
########################################################################
RASTER_PureStrategies = [(1.0,0.0,0.0),(0.0,1.0,0.0),(0.0,0.0,1.0)]
RASTER_StrategyPairs = [(0.5,0.5,0.0),(0.5,0.0,0.5),(0.0,0.5,0.5)]
RASTER_Center = [(1/3.0, 1/3.0, 1/3.0)]
RASTER_WeightedPairs = [(0.5*5/6, 0.5*5/6, 1.0/6),
(0.5*5/6, 1.0/6, 0.5*5/6),
(1.0/6, 0.5*5/6, 0.5*5/6)]
RASTER_1 = RASTER_PureStrategies + RASTER_StrategyPairs + \
RASTER_Center + RASTER_WeightedPairs
def GenRaster(density):
"""density (= points in one row) -> point list."""
assert density >= 2, "density is %i, but must be >= 2" % density
pl = []
for y in range(density):
n = density-y
p3 = float(y) / (density-1)
f = 1.0 - p3
for x in range(n):
if n > 1:
p1 = float(x) / (n-1) * f
p2 = float(n-1-x) / (n-1) * f
else: p1 = 0.0; p2 = 0.0
pl.append((p1, p2, p3))
return pl
def RandomGrid(N):
"""Generate a random raster with N points."""
pl = []
while N > 0:
a = random.random()
b = 1.0 - random.random()
if b < a: c = a; a = b; b = c
pl.append((a, b-a, 1.0-b))
N -= 1
return pl
RASTER_DEFAULT = GenRaster(30)
RASTER_RANDOM = RandomGrid(500)
##~
##~ def DarkenColor(color, strength=0.1):
##~ """Darken Color: rgbTuple -> rgbTuple"""
##~ return tuple([x-x*strength for x in color])
##~
##~ def InvertedDarkenColor(color, strength=0.1):
##~ """Brighten Color: rgbTuple -> rgbTuple"""
##~ st = strength**0.2
##~ return tuple([x-x*(1.0-st) for x in color])
##~
##~ def KeepColor(color, strengh=0.0):
##~ """Retrun the same color: rgbTuple -> tgbTuple"""
##~ return color
def scaleColor(ca, cb, strength):
"Interpolates between the colors ca and cb using atan(strength)"
factor = math.atan(strength) / (math.pi/2)
r = ca[0] + (cb[0]-ca[0]) * factor
g = ca[1] + (cb[1]-ca[1]) * factor
b = ca[2] + (cb[2]-ca[2]) * factor
return (r,g,b)
########################################################################
#
# class NullVisualizer
#
########################################################################
class NullVisualizer(object):
"""This is a dummy visualizer. It has a similar interface like
the other visualizer classes, but it does not draw anything except
the empty simplex.
"""
def __init__(self, plotter, function=None, raster=None,
baseColor=None, endColor=None, colorFunc=None):
self.plotter = plotter
def setFunction(self, function):
pass
def setRaster(self, raster):
pass
def setDensity(self, density):
pass
def changeColors(self, baseColor=None, endColor=None,
colorFunc=None):
pass
def show(self):
pass
def resizedGfx(self):
self.plotter.resizedGfx()
def redraw(self):
self.plotter.clear()
########################################################################
#
# class VectorField
#
########################################################################
class VectorField(NullVisualizer):
"""Draws a vectorfield based on an arbitrary set of sample points.
"""
def __init__(self, plotter, function, raster=RASTER_DEFAULT,
baseColor=(0.0, 0.0, 0.0), endColor=(1.0, 0.7, 0.3),
colorFunc=scaleColor, scaleArrows = False):
"""Init with a triangle plotter, a list of points each of which
represents a certain population distribution among the three
players and a population dynamical function.
"""
self.plotter = plotter
self.function = function
self.points = raster
self.arrowLen = self._arrowLength()
self.transformed = [self.function(p) for p in self.points]
self.color = baseColor
self.colorB = endColor
self.colorFunc = colorFunc
self.scale = scaleArrows
self.attributesChangedFlag = False
def _arrowLength(self):
"""-> length of arrows, depending on the density of the raster."""
return 1.0 / (2.0*(math.sqrt(len(self.points)+0.25)-0.5))
def setFunction(self, function):
"""Changes the function to display."""
self.function = function
self.attributesChangedFlag = True
def setRaster(self, raster):
"""Changes the sample points raster."""
self.points = raster
self.arrowLen = self._arrowLength()
self.attributesChangedFlag = True
def setDensity(self, density):
"""Sets the granularity of the diagram."""
self.setRaster(GenRaster(density))
self.attributesChangedFlag = True
def changeColors(self, baseColor=(0.0, 0.0, 0.0),
endColor=(1.0, 0.7, 0.3), colorFunc=scaleColor):
"""Change the colors of the diagram."""
self.color = baseColor
self.colorB = endColor
self.colorFunc = colorFunc
self.attributesChangedFlag = True
def _plot(self, pl):
"""Draw a list of points onto the simplex diagram."""
self.plotter.setColor(self.color)
for pr in pl:
self.plotter.plot(pr)
def show(self):
"""Draw VectorField."""
def distance(a,b):
"""Difference between two population ratios"""
return math.sqrt(((b[0]-a[0])**2 + (b[1]-a[1])**2 + \
(b[2]-a[2])**2) / 3.0)
if self.attributesChangedFlag:
self.plotter.clearSimplex()
self.transformed = [self.function(p) for p in self.points]
self.attributesChangedFlag = False
for a,b in zip(self.points, self.transformed):
d = distance(a, b)
c = self.colorFunc(self.color, self.colorB, d)
self.plotter.setColor(c)
self.plotter.turtleDraw(a, b, self.arrowLen, self.scale)
def resizedGfx(self):
"""Takes notice of a resized graphics context and redraws the
diagram."""
self.plotter.resizedGfx()
self.attributesChangedFlag = False
self.show()
def redraw(self):
"""Redraws the diagram"""
self.plotter.clear()
self.attributesChangedFlag = False
self.show()
########################################################################
#
# class Trajectory
#
########################################################################
class TrajectoryDiagram(VectorField):
"""Draw the trajectories of an arbitrary set of starting points
into a triangle diagram.
"""
def __init__(self, plotter, function, raster=RASTER_DEFAULT,
baseColor=(1.0, 0.6, 0.6), endColor=(0.0, 0.0, 0.0),
colorFunc=scaleColor, redrawable=True):
VectorField.__init__(self, plotter, function, raster,
baseColor, endColor, colorFunc)
self.shade = 0.0
self.redrawable = redrawable
self.originalRaster = self.points
self.history = [self.points]
self.MAGIC_SHOW = 8
def setRedrawable(self, redrawable):
"""Turns the auto redraw property on or off."""
self.redrawable = redrawable
def setFunction(self, function):
self.points = self.originalRaster
self.history = [self.points]
VectorField.setFunction(self, function)
def setRaster(self, raster):
self.points = raster
self.originalRaster = self.points
self.history = [self.points]
self.attributesChangedFlag = True
def _draw(self, pl, npl):
"""Connect old points to new (transformed) points."""
c = self.colorFunc(self.color, self.colorB, self.shade)
self.plotter.setColor(c)
for i in range(len(pl)):
self.plotter.line(pl[i], npl[i])
self.shade += 0.15
def step(self, n=1):
"""Calulate and draw the next n steps."""
if self.attributesChangedFlag:
self.attributesChangedFlag = False
self.plotter.clearSimplex()
self.shade = 0.0
while n > 0:
newPoints = [self.function(p) for p in self.points]
self._draw(self.points, newPoints)
self.points = newPoints
if self.redrawable: self.history.append(self.points)
n -= 1
def show(self):
"""Shows the trajectories."""
self.shade = 0.0
if len(self.history) > 1:
self.plotter.clearSimplex()
self.attributesChangedFlag = False
for i in range(1, len(self.history)):
self._draw(self.history[i-1], self.history[i])
else: self.step(self.MAGIC_SHOW)
########################################################################
#
# class Patches
#
########################################################################
def VerifyPointList(pl, tolerance):
"""For every point check the condition abs((p1+p2+p3)-1)<=tolerance
"""
for pr in pl:
assert abs(pr[0]+pr[1]+pr[2]-1.0) <= tolerance, \
"Point (%f, %f, %f) is not a valid " % pr + \
"population ratio!"
class PatchDiagram(object):
"""A kind of simplex diagram that allows for easy visualization
of attraction areas and points of attraction."""
def __init__(self, plotter, function, density = 51,
color1 = (1.0, 0.0, 0.0), color2 = (0.0, 1.0, 0.0),
color3 = (0.0, 0.0, 1.0), check = ERROR_CHECKING):
"""Init Diagram with a triangle plotter, the population
dynamical function, the density (e.g. resolution) and the
colors for every corner of the triangle.
"""
self.plotter = plotter
self.checkingFlag = check
self.offsets = []
self.colorTable = {}
self.c1 = color1
self.c2 = color2
self.c3 = color3
self.function = function
self.density = 2
self.MAGIC_SHOW = 30
self.setDensity(density)
def _assignColor(self, pr):
"""Return the color assoziated with the point pr."""
r = self.c1[0]*pr[0] + self.c2[0]*pr[1] + self.c3[0]*pr[2]
g = self.c1[1]*pr[0] + self.c2[1]*pr[1] + self.c3[1]*pr[2]
b = self.c1[2]*pr[0] + self.c2[2]*pr[1] + self.c3[2]*pr[2]
return (r, g, b)
def _genColorTable(self):
colorTable = {}
for p in self.points:
colorTable[p] = self._assignColor(p)
return colorTable
def _checkColorTable(self):
if self.colorTable == {}:
self.colorTable = self._genColorTable()
def setFunction(self, function):
self.function = function
self.colorTable = {}
def setDensity(self, density):
"""Sets the granularity of the patches diagram."""
assert density >= 2, "density is %i, but must be >= 2"%density
self.density = density
self.spotWidth = ((self.density+1)/float(self.density)) / \
float(self.density)
self.spotWidth *= 1.05 # rounding error correction
self.points = GenRaster(self.density)
self.offsets = [] # create fast lookup table
y = self.density; offset = 0
while y > 0:
self.offsets.append(offset)
offset += y
y -= 1
self.colorTable = {}
def setEdgeColors(self, color1, color2, color3):
"""Sets the edge colors."""
self.c1 = color1
self.c2 = color2
self.c3 = color3
self.colorTable = {}
def _draw(self):
"""Draws the Patches Diagram."""
if self.colorTable != {}: CT = self.colorTable
else:
CT = self._genColorTable()
self.plotter.clear()
for p in self.points:
self.plotter.setColor(CT[p])
self.plotter.spot(p, self.spotWidth)
def _getNearest(self, pr):
"""Return the nearest point to p on the grid.
"""
def dist(a, b):
return (a[0]-b[0])**2+(a[1]-b[1])**2+(a[2]-b[2])**2
if self.checkingFlag: VerifyPointList([pr], 0.01/self.density)
ec = 0.1 / self.density
y = int((self.density-1) * pr[2] + 0.5)
if pr[2] < 1.0:
x = int((self.density-y-1) * pr[0] / (1.0-pr[2]) + ec)
else: x = 0
try:
p1 = self.points[self.offsets[y]+x]
p2 = self.points[self.offsets[y]+x+1]
p3 = self.points[self.offsets[y+1]+x]
except IndexError:
p2 = p1; p3 = p1
d1 = dist(pr, p1); d2 = dist(pr, p2); d3 = dist(pr, p3)
if d1 <= d2 and d1 <= d3: return p1
elif d2 <= d3: return p2
else: return p3
def step(self, n=1):
"""Caluculate the next n steps and update diagram."""
self._checkColorTable()
while n > 0:
newCT = {}
for p in self.points:
q = self._getNearest(self.function(p))
if self.checkingFlag:
VerifyPointList([q], ERROR_TOLERANCE)
newCT[p] = self.colorTable[q]
self.colorTable = newCT
n -= 1
self._draw()
def show(self):
"""Show the patched diagram either in its current state or after
10 steps."""
if self.colorTable != {}: self._draw()
else: self.step(self.MAGIC_SHOW)
def showFixedPoints(self, color):
"""Search for possible fixed points. Only useful after
calling method 'step'.
"""
self.plotter.setColor(color)
for p in self.points:
if self.colorTable[p] == self._assignColor(p):
self.plotter.spot(p, self.spotWidth)
def resizedGfx(self):
self.plotter.resizedGfx()
self._draw()
def redraw(self):
self.plotter.clear()
self._draw()
########################################################################
#
# Diagram
#
########################################################################
VECTORS, SCALED_VECTORS, TRAJECTORIES, PATCHES, NULL = \
[2**i for i in list(range(5))]
class Diagram(Plotter):
"""Universal class for visualizing the population dynamics of
a three-species population on a 2-dimensional simplex diagram.
Attributes (read only):
title, p1, p2, p2 - strings: Strings for the title and the
three corners of the diagram.
styleFlags - integer, interpreted as a bitfield of flags:
The style or rather flavour of the simplex diagram.
Presently four flavours are possible: VECTORS for drawing
the diagram as a vector field with many little arrows;
TRAJECTORIES for drawing pseudo trajectories; PATCHES
for drawing a patched diagram, where each point in the
diagram has a unique color in the beginning. From generation
to generation, however, colors are adjusted such that
every point ("patch") takes the color of the point it
has moved to. This exposes areas of attraction in the
diagram. NULL is a dummy that draws nothing!
visualizer - class: the visualizing class: VectorField,
Trajectory or Patches.
function - f(p)->p', where p and p' are 3 tuples of floats
that add up to 1.0: population dynamics function.
raster - the point raster of the simplex diagram.
density - list of 3-tuples of floats each of which adds up
to 1.0: the density (of the point raster) or the simplex
diagram.
color1, color2, color3 - (r,g,b) tuples, where r,g and b are
floats in range of [0.0, 1.0]: For patch diagrams these
are the edge colors of the three edges of the diagram. For
trajectory diagrams color1 is the starting color and color2
is the color towards which later steps of the trajectory are
shaded. For vector fields the range between color1 and
color2 is used to indicate the strength of the vector field.
colorFunc - f(ca, cb, strength) -> c, where ca and cb are colors
and strength is a float from [0, infinity]: This function
produces a color shade from 'ca', 'cb' and 'strength',
usually somewhere on the line between 'ca' and 'cb'.
titlePen, labelPen, simplexPen, backgroundPen - Gfx.Pen: Pens
for the respective parts of the simplex diagram.
section - 4-tuple of floats from then range [0.0, 1.0]: the
part of the screen to be used for the diagram.
"""
def __init__(self, gfx, function, title="Simplex Diagram",
p1="A", p2="B", p3="C", styleFlags = VECTORS,
raster = RASTER_DEFAULT, density = -1,
color1 = (0.,0.,1.), color2 = (0.,1.,0.),
color3 = (1.,0.,0.), colorFunc = scaleColor,
titlePen = Gfx.BLACK_PEN, labelPen = Gfx.BLACK_PEN,
simplexPen=Gfx.BLACK_PEN, backgroundPen=Gfx.WHITE_PEN,
section=Graph.REGION_FULLSCREEN):
Plotter.__init__(self, gfx, title, p1, p2, p3, styleFlags,
titlePen, labelPen, simplexPen, backgroundPen, section)
self.function = function
self.raster = raster
self.density = density
if self.density < 2: self._adjustDensity()
else: self.raster = GenRaster(self.density)
self.color1, self.color2, self.color3 = color1, color2, color3
self.colorFunc = colorFunc
self._selectVisualizer()
def _selectVisualizer(self):
if self.styleFlags & VECTORS or self.styleFlags & SCALED_VECTORS:
scale = self.styleFlags & SCALED_VECTORS
self.visualizer = VectorField(self, self.function,
self.raster, self.color1, self.color2, self.colorFunc, scale)
elif self.styleFlags & TRAJECTORIES:
self.visualizer = TrajectoryDiagram(self, self.function,
self.raster, self.color1, self.color2, self.colorFunc)
elif self.styleFlags & PATCHES:
self.visualizer = PatchDiagram(self, self.function,
self.density, self.color1, self.color2, self.color3)
elif self.styleFlags & NULL:
self.visualizer = NullVisualizer(self)
else: assert 0, "No flavor specified for simplex diagram!"
def _adjustDensity(self):
"""Sets the granularity of the patches diagram accroding to the
size (i.e. granularity) of the raster."""
l = len(self.raster)
assert l >= 3, "raster contains %i sample points, "%l+\
"but at least 3 sample points are requires!"
self.density = int(math.sqrt(2*l + 0.25) - 0.5)
def setStyle(self, styleFlags=None, titlePen=None, labelPen=None,
simplexPen=None, backgroundPen=None):
"""Sets the drawing style of the simplex diagram."""
if styleFlags != None and styleFlags != self.styleFlags:
self.styleFlags = styleFlags
self.clear()
self._selectVisualizer()
Plotter.setStyle(self, styleFlags, titlePen, labelPen,
simplexPen, backgroundPen)
def setFunction(self, function):
"Changes the population dynamical function."
self.function = function
self.visualizer.setFunction(function)
def setRaster(self, raster):
"Changes the raster of sample points (population distributions)."
self.raster = raster
self._adjustDensity()
if not PATCHES & self.styleFlags:
self.visualizer.setRaster(raster)
else: self.visualizer.setDensity(self.density)
def setDensity(self, density):
"""Generates a raster of uniformly distributed sample points
(population distributions) with the given density."""
self.density = density
self.raster = GenRaster(self.density)
if PATCHES & self.styleFlags:
self.visualizer.setDensity(self.density)
else: self.visualizer.setRaster(self.raster)
def changeColors(self, color1 = (0.,1.,0.), color2 = (1.,0.,0.),
color3 = (0.,0.,1.), colorFunc=scaleColor):
"""Changes the colors of diagram, including a color modifying
function. Note: The semantics of these paramters may differ
depending on the visualizer."""
self.color1, self.color2, self.color3 = color1, color2, color3
self.colorFunc = colorFunc
if PATCHES & self.styleFlags:
self.visualizer.setEdgeColors(color1, color2, color3)
else:
self.visualizer.changeColors(color1, color2, colorFunc)
def show(self, steps = -1):
"""Shows the diagram calculating 'steps' generations for
dyniamic diagrams (trajectories or patches)."""
if (VECTORS|SCALED_VECTORS|NULL) & self.styleFlags:
self.visualizer.show()
else:
if steps == -1: n = self.visualizer.MAGIC_SHOW
else: n = steps
self.visualizer.step(n)
def showFixedPoints(self, color):
"""Shows candidates for fixed points (only if style = PATCHES).
"""
assert PATCHES & self.styleFlags, "Can only show potential"+\
" fixed points when the simplex diagram style is PATCHES!"
self.visualizer.showFixedPoints(color)
def redraw(self):
"Draws the diagram."
Plotter.redraw(self)
self.visualizer.show()
def resizedGfx(self):
"""Takes notice of a resized graphics context and redraws the
diagram."""
Plotter.resizedGfx(self)
self.visualizer.show()
########################################################################
#
# Tests
#
########################################################################
if __name__ == "__main__":
import systemTest
systemTest.TestSimplex()
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2011 OpenStack Foundation
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import functools
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
import six
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova import utils as nova_utils
from nova.virt.libvirt import utils
lvm_opts = [
cfg.StrOpt('volume_clear',
default='zero',
help='Method used to wipe old volumes (valid options are: '
'none, zero, shred)'),
cfg.IntOpt('volume_clear_size',
default=0,
help='Size in MiB to wipe at start of old volumes. 0 => all'),
]
CONF = cfg.CONF
CONF.register_opts(lvm_opts, 'libvirt')
CONF.import_opt('instances_path', 'nova.compute.manager')
LOG = logging.getLogger(__name__)
@nova_utils.expects_func_args('path')
def wrap_no_device_error(function):
"""Wraps a method to catch exceptions related to volume BDM not found.
This decorator wraps a method to catch ProcessExecutionError having to do
with a missing volume block device mapping. It translates the error to a
VolumeBDMPathNotFound exception.
"""
@functools.wraps(function)
def decorated_function(path):
try:
return function(path)
except processutils.ProcessExecutionError as e:
if 'No such device or address' in e.stderr:
raise exception.VolumeBDMPathNotFound(path=path)
else:
raise
return decorated_function
def create_volume(vg, lv, size, sparse=False):
"""Create LVM image.
Creates a LVM image with given size.
:param vg: existing volume group which should hold this image
:param lv: name for this image (logical volume)
:size: size of image in bytes
:sparse: create sparse logical volume
"""
vg_info = get_volume_group_info(vg)
free_space = vg_info['free']
def check_size(vg, lv, size):
if size > free_space:
raise RuntimeError(_('Insufficient Space on Volume Group %(vg)s.'
' Only %(free_space)db available,'
' but %(size)db required'
' by volume %(lv)s.') %
{'vg': vg,
'free_space': free_space,
'size': size,
'lv': lv})
if sparse:
preallocated_space = 64 * units.Mi
check_size(vg, lv, preallocated_space)
if free_space < size:
LOG.warn(_LW('Volume group %(vg)s will not be able'
' to hold sparse volume %(lv)s.'
' Virtual volume size is %(size)db,'
' but free space on volume group is'
' only %(free_space)db.'),
{'vg': vg,
'free_space': free_space,
'size': size,
'lv': lv})
cmd = ('lvcreate', '-L', '%db' % preallocated_space,
'--virtualsize', '%db' % size, '-n', lv, vg)
else:
check_size(vg, lv, size)
cmd = ('lvcreate', '-L', '%db' % size, '-n', lv, vg)
utils.execute(*cmd, run_as_root=True, attempts=3)
def get_volume_group_info(vg):
"""Return free/used/total space info for a volume group in bytes
:param vg: volume group name
:returns: A dict containing:
:total: How big the filesystem is (in bytes)
:free: How much space is free (in bytes)
:used: How much space is used (in bytes)
"""
out, err = utils.execute('vgs', '--noheadings', '--nosuffix',
'--separator', '|',
'--units', 'b', '-o', 'vg_size,vg_free', vg,
run_as_root=True)
info = out.split('|')
if len(info) != 2:
raise RuntimeError(_("vg %s must be LVM volume group") % vg)
return {'total': int(info[0]),
'free': int(info[1]),
'used': int(info[0]) - int(info[1])}
def list_volumes(vg):
"""List logical volumes paths for given volume group.
:param vg: volume group name
:returns: Return a logical volume list for given volume group
: Data format example
: ['volume-aaa', 'volume-bbb', 'volume-ccc']
"""
out, err = utils.execute('lvs', '--noheadings', '-o', 'lv_name', vg,
run_as_root=True)
return [line.strip() for line in out.splitlines()]
def volume_info(path):
"""Get logical volume info.
:param path: logical volume path
:returns: Return a dict object including info of given logical volume
: Data format example
: {'#Seg': '1', 'Move': '', 'Log': '', 'Meta%': '', 'Min': '-1',
: ...
: 'Free': '9983', 'LV': 'volume-aaa', 'Host': 'xyz.com',
: 'Active': 'active', 'Path': '/dev/vg/volume-aaa', '#LV': '3',
: 'Maj': '-1', 'VSize': '50.00g', 'VFree': '39.00g', 'Pool': '',
: 'VG Tags': '', 'KMaj': '253', 'Convert': '', 'LProfile': '',
: '#Ext': '12799', 'Attr': '-wi-a-----', 'VG': 'vg',
: ...
: 'LSize': '1.00g', '#PV': '1', '#VMdaCps': 'unmanaged'}
"""
out, err = utils.execute('lvs', '-o', 'vg_all,lv_all',
'--separator', '|', path, run_as_root=True)
info = [line.split('|') for line in out.splitlines()]
if len(info) != 2:
raise RuntimeError(_("Path %s must be LVM logical volume") % path)
return dict(zip(*info))
@wrap_no_device_error
def get_volume_size(path):
"""Get logical volume size in bytes.
:param path: logical volume path
:raises: processutils.ProcessExecutionError if getting the volume size
fails in some unexpected way.
:raises: exception.VolumeBDMPathNotFound if the volume path does not exist.
"""
out, _err = utils.execute('blockdev', '--getsize64', path,
run_as_root=True)
return int(out)
def _zero_volume(path, volume_size):
"""Write zeros over the specified path
:param path: logical volume path
:param size: number of zeros to write
"""
bs = units.Mi
direct_flags = ('oflag=direct',)
sync_flags = ()
remaining_bytes = volume_size
# The loop efficiently writes zeros using dd,
# and caters for versions of dd that don't have
# the easier to use iflag=count_bytes option.
while remaining_bytes:
zero_blocks = remaining_bytes / bs
seek_blocks = (volume_size - remaining_bytes) / bs
zero_cmd = ('dd', 'bs=%s' % bs,
'if=/dev/zero', 'of=%s' % path,
'seek=%s' % seek_blocks, 'count=%s' % zero_blocks)
zero_cmd += direct_flags
zero_cmd += sync_flags
if zero_blocks:
utils.execute(*zero_cmd, run_as_root=True)
remaining_bytes %= bs
bs /= units.Ki # Limit to 3 iterations
# Use O_DIRECT with initial block size and fdatasync otherwise
direct_flags = ()
sync_flags = ('conv=fdatasync',)
def clear_volume(path):
"""Obfuscate the logical volume.
:param path: logical volume path
"""
volume_clear = CONF.libvirt.volume_clear
if volume_clear not in ('none', 'shred', 'zero'):
LOG.error(_LE("ignoring unrecognized volume_clear='%s' value"),
volume_clear)
volume_clear = 'zero'
if volume_clear == 'none':
return
volume_clear_size = int(CONF.libvirt.volume_clear_size) * units.Mi
volume_size = get_volume_size(path)
if volume_clear_size != 0 and volume_clear_size < volume_size:
volume_size = volume_clear_size
if volume_clear == 'zero':
# NOTE(p-draigbrady): we could use shred to do the zeroing
# with -n0 -z, however only versions >= 8.22 perform as well as dd
_zero_volume(path, volume_size)
elif volume_clear == 'shred':
utils.execute('shred', '-n3', '-s%d' % volume_size, path,
run_as_root=True)
else:
raise exception.Invalid(_("volume_clear='%s' is not handled")
% volume_clear)
def remove_volumes(paths):
"""Remove one or more logical volume."""
errors = []
for path in paths:
clear_volume(path)
lvremove = ('lvremove', '-f', path)
try:
utils.execute(*lvremove, attempts=3, run_as_root=True)
except processutils.ProcessExecutionError as exp:
errors.append(six.text_type(exp))
if errors:
raise exception.VolumesNotRemoved(reason=(', ').join(errors))
|
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import six
import struct
import time
from ryu import cfg
from ryu.topology import event
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import set_ev_cls
from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER
from ryu.exception import RyuException
from ryu.lib import addrconv, hub
from ryu.lib.mac import DONTCARE_STR
from ryu.lib.dpid import dpid_to_str, str_to_dpid
from ryu.lib.port_no import port_no_to_str
from ryu.lib.packet import packet, ethernet
from ryu.lib.packet import lldp, ether_types
from ryu.ofproto.ether import ETH_TYPE_LLDP
from ryu.ofproto.ether import ETH_TYPE_CFM
from ryu.ofproto import nx_match
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ofproto_v1_4
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_cli_opts([
cfg.BoolOpt('observe-links', default=False,
help='observe link discovery events.'),
cfg.BoolOpt('install-lldp-flow', default=True,
help='link discovery: explicitly install flow entry '
'to send lldp packet to controller'),
cfg.BoolOpt('explicit-drop', default=True,
help='link discovery: explicitly drop lldp packet in')
])
class Port(object):
# This is data class passed by EventPortXXX
def __init__(self, dpid, ofproto, ofpport):
super(Port, self).__init__()
self.dpid = dpid
self._ofproto = ofproto
self._config = ofpport.config
self._state = ofpport.state
self.port_no = ofpport.port_no
self.hw_addr = ofpport.hw_addr
self.name = ofpport.name
def is_reserved(self):
return self.port_no > self._ofproto.OFPP_MAX
def is_down(self):
return (self._state & self._ofproto.OFPPS_LINK_DOWN) > 0 \
or (self._config & self._ofproto.OFPPC_PORT_DOWN) > 0
def is_live(self):
# NOTE: OF1.2 has OFPPS_LIVE state
# return (self._state & self._ofproto.OFPPS_LIVE) > 0
return not self.is_down()
def to_dict(self):
return {'dpid': dpid_to_str(self.dpid),
'port_no': port_no_to_str(self.port_no),
'hw_addr': self.hw_addr,
'name': self.name.decode('utf-8')}
# for Switch.del_port()
def __eq__(self, other):
return self.dpid == other.dpid and self.port_no == other.port_no
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.dpid, self.port_no))
def __str__(self):
LIVE_MSG = {False: 'DOWN', True: 'LIVE'}
return 'Port<dpid=%s, port_no=%s, %s>' % \
(self.dpid, self.port_no, LIVE_MSG[self.is_live()])
class Switch(object):
# This is data class passed by EventSwitchXXX
def __init__(self, dp):
super(Switch, self).__init__()
self.dp = dp
self.ports = []
def add_port(self, ofpport):
port = Port(self.dp.id, self.dp.ofproto, ofpport)
if not port.is_reserved():
self.ports.append(port)
def del_port(self, ofpport):
self.ports.remove(Port(ofpport))
def to_dict(self):
d = {'dpid': dpid_to_str(self.dp.id),
'ports': [port.to_dict() for port in self.ports]}
return d
def __str__(self):
msg = 'Switch<dpid=%s, ' % self.dp.id
for port in self.ports:
msg += str(port) + ' '
msg += '>'
return msg
class Link(object):
# This is data class passed by EventLinkXXX
def __init__(self, src, dst):
super(Link, self).__init__()
self.src = src
self.dst = dst
def to_dict(self):
d = {'src': self.src.to_dict(),
'dst': self.dst.to_dict()}
return d
# this type is used for key value of LinkState
def __eq__(self, other):
return self.src == other.src and self.dst == other.dst
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.src, self.dst))
def __str__(self):
return 'Link: %s to %s' % (self.src, self.dst)
class Host(object):
# This is data class passed by EventHostXXX
def __init__(self, mac, port):
super(Host, self).__init__()
self.port = port
self.mac = mac
self.ipv4 = []
self.ipv6 = []
def to_dict(self):
d = {'mac': self.mac,
'ipv4': self.ipv4,
'ipv6': self.ipv6,
'port': self.port.to_dict()}
return d
def __eq__(self, host):
return self.mac == host.mac and self.port == host.port
def __str__(self):
msg = 'Host<mac=%s, port=%s,' % (self.mac, str(self.port))
msg += ','.join(self.ipv4)
msg += ','.join(self.ipv6)
msg += '>'
return msg
class HostState(dict):
# mac address -> Host class
def __init__(self):
super(HostState, self).__init__()
def add(self, host):
mac = host.mac
self.setdefault(mac, host)
def update_ip(self, host, ip_v4=None, ip_v6=None):
mac = host.mac
host = None
if mac in self:
host = self[mac]
if not host:
return
if ip_v4 is not None:
if ip_v4 in host.ipv4:
host.ipv4.remove(ip_v4)
host.ipv4.append(ip_v4)
if ip_v6 is not None:
if ip_v6 in host.ipv6:
host.ipv6.remove(ip_v6)
host.ipv6.append(ip_v6)
def get_by_dpid(self, dpid):
result = []
for mac in self:
host = self[mac]
if host.port.dpid == dpid:
result.append(host)
return result
class PortState(dict):
# dict: int port_no -> OFPPort port
# OFPPort is defined in ryu.ofproto.ofproto_v1_X_parser
def __init__(self):
super(PortState, self).__init__()
def add(self, port_no, port):
self[port_no] = port
def remove(self, port_no):
del self[port_no]
def modify(self, port_no, port):
self[port_no] = port
class PortData(object):
def __init__(self, is_down, lldp_data):
super(PortData, self).__init__()
self.is_down = is_down
self.lldp_data = lldp_data
self.timestamp = None
self.sent = 0
def lldp_sent(self):
self.timestamp = time.time()
self.sent += 1
def lldp_received(self):
self.sent = 0
def lldp_dropped(self):
return self.sent
def clear_timestamp(self):
self.timestamp = None
def set_down(self, is_down):
self.is_down = is_down
def __str__(self):
return 'PortData<live=%s, timestamp=%s, sent=%d>' \
% (not self.is_down, self.timestamp, self.sent)
class PortDataState(dict):
# dict: Port class -> PortData class
# slimed down version of OrderedDict as python 2.6 doesn't support it.
_PREV = 0
_NEXT = 1
_KEY = 2
def __init__(self):
super(PortDataState, self).__init__()
self._root = root = [] # sentinel node
root[:] = [root, root, None] # [_PREV, _NEXT, _KEY] doubly linked list
self._map = {}
def _remove_key(self, key):
link_prev, link_next, key = self._map.pop(key)
link_prev[self._NEXT] = link_next
link_next[self._PREV] = link_prev
def _append_key(self, key):
root = self._root
last = root[self._PREV]
last[self._NEXT] = root[self._PREV] = self._map[key] = [last, root,
key]
def _prepend_key(self, key):
root = self._root
first = root[self._NEXT]
first[self._PREV] = root[self._NEXT] = self._map[key] = [root, first,
key]
def _move_last_key(self, key):
self._remove_key(key)
self._append_key(key)
def _move_front_key(self, key):
self._remove_key(key)
self._prepend_key(key)
def add_port(self, port, lldp_data):
if port not in self:
self._prepend_key(port)
self[port] = PortData(port.is_down(), lldp_data)
else:
self[port].is_down = port.is_down()
def lldp_sent(self, port):
port_data = self[port]
port_data.lldp_sent()
self._move_last_key(port)
return port_data
def lldp_received(self, port):
self[port].lldp_received()
def move_front(self, port):
port_data = self.get(port, None)
if port_data is not None:
port_data.clear_timestamp()
self._move_front_key(port)
def set_down(self, port):
is_down = port.is_down()
port_data = self[port]
port_data.set_down(is_down)
port_data.clear_timestamp()
if not is_down:
self._move_front_key(port)
return is_down
def get_port(self, port):
return self[port]
def del_port(self, port):
del self[port]
self._remove_key(port)
def __iter__(self):
root = self._root
curr = root[self._NEXT]
while curr is not root:
yield curr[self._KEY]
curr = curr[self._NEXT]
def clear(self):
for node in self._map.values():
del node[:]
root = self._root
root[:] = [root, root, None]
self._map.clear()
dict.clear(self)
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) pairs in od'
for k in self:
yield (k, self[k])
class LinkState(dict):
# dict: Link class -> timestamp
def __init__(self):
super(LinkState, self).__init__()
self._map = {}
def get_peer(self, src):
return self._map.get(src, None)
def update_link(self, src, dst):
link = Link(src, dst)
self[link] = time.time()
self._map[src] = dst
# return if the reverse link is also up or not
rev_link = Link(dst, src)
return rev_link in self
def link_down(self, link):
del self[link]
del self._map[link.src]
def rev_link_set_timestamp(self, rev_link, timestamp):
# rev_link may or may not in LinkSet
if rev_link in self:
self[rev_link] = timestamp
def port_deleted(self, src):
dst = self.get_peer(src)
if dst is None:
raise KeyError()
link = Link(src, dst)
rev_link = Link(dst, src)
del self[link]
del self._map[src]
# reverse link might not exist
self.pop(rev_link, None)
rev_link_dst = self._map.pop(dst, None)
return dst, rev_link_dst
class LLDPPacket(object):
# make a LLDP packet for link discovery.
CHASSIS_ID_PREFIX = 'dpid:'
CHASSIS_ID_PREFIX_LEN = len(CHASSIS_ID_PREFIX)
CHASSIS_ID_FMT = CHASSIS_ID_PREFIX + '%s'
PORT_ID_STR = '!I' # uint32_t
PORT_ID_SIZE = 4
class LLDPUnknownFormat(RyuException):
message = '%(msg)s'
@staticmethod
def lldp_packet(dpid, port_no, dl_addr, ttl):
pkt = packet.Packet()
dst = lldp.LLDP_MAC_NEAREST_BRIDGE
src = dl_addr
ethertype = ETH_TYPE_LLDP
eth_pkt = ethernet.ethernet(dst, src, ethertype)
pkt.add_protocol(eth_pkt)
tlv_chassis_id = lldp.ChassisID(
subtype=lldp.ChassisID.SUB_LOCALLY_ASSIGNED,
chassis_id=(LLDPPacket.CHASSIS_ID_FMT %
dpid_to_str(dpid)).encode('ascii'))
tlv_port_id = lldp.PortID(subtype=lldp.PortID.SUB_PORT_COMPONENT,
port_id=struct.pack(
LLDPPacket.PORT_ID_STR,
port_no))
tlv_ttl = lldp.TTL(ttl=ttl)
tlv_end = lldp.End()
tlvs = (tlv_chassis_id, tlv_port_id, tlv_ttl, tlv_end)
lldp_pkt = lldp.lldp(tlvs)
pkt.add_protocol(lldp_pkt)
pkt.serialize()
return pkt.data
@staticmethod
def lldp_parse(data):
pkt = packet.Packet(data)
i = iter(pkt)
eth_pkt = six.next(i)
assert type(eth_pkt) == ethernet.ethernet
lldp_pkt = six.next(i)
if type(lldp_pkt) != lldp.lldp:
raise LLDPPacket.LLDPUnknownFormat()
tlv_chassis_id = lldp_pkt.tlvs[0]
if tlv_chassis_id.subtype != lldp.ChassisID.SUB_LOCALLY_ASSIGNED:
raise LLDPPacket.LLDPUnknownFormat(
msg='unknown chassis id subtype %d' % tlv_chassis_id.subtype)
chassis_id = tlv_chassis_id.chassis_id.decode('utf-8')
if not chassis_id.startswith(LLDPPacket.CHASSIS_ID_PREFIX):
raise LLDPPacket.LLDPUnknownFormat(
msg='unknown chassis id format %s' % chassis_id)
src_dpid = str_to_dpid(chassis_id[LLDPPacket.CHASSIS_ID_PREFIX_LEN:])
tlv_port_id = lldp_pkt.tlvs[1]
if tlv_port_id.subtype != lldp.PortID.SUB_PORT_COMPONENT:
raise LLDPPacket.LLDPUnknownFormat(
msg='unknown port id subtype %d' % tlv_port_id.subtype)
port_id = tlv_port_id.port_id
if len(port_id) != LLDPPacket.PORT_ID_SIZE:
raise LLDPPacket.LLDPUnknownFormat(
msg='unknown port id %d' % port_id)
(src_port_no, ) = struct.unpack(LLDPPacket.PORT_ID_STR, port_id)
return src_dpid, src_port_no
class Switches(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION, ofproto_v1_2.OFP_VERSION,
ofproto_v1_3.OFP_VERSION, ofproto_v1_4.OFP_VERSION]
_EVENTS = [event.EventSwitchEnter, event.EventSwitchLeave,
event.EventSwitchReconnected,
event.EventPortAdd, event.EventPortDelete,
event.EventPortModify,
event.EventLinkAdd, event.EventLinkDelete,
event.EventHostAdd]
DEFAULT_TTL = 120 # unused. ignored.
LLDP_PACKET_LEN = len(LLDPPacket.lldp_packet(0, 0, DONTCARE_STR, 0))
LLDP_SEND_GUARD = .05
LLDP_SEND_PERIOD_PER_PORT = .9
TIMEOUT_CHECK_PERIOD = 5.
LINK_TIMEOUT = TIMEOUT_CHECK_PERIOD * 2
LINK_LLDP_DROP = 5
def __init__(self, *args, **kwargs):
super(Switches, self).__init__(*args, **kwargs)
self.name = 'switches'
self.dps = {} # datapath_id => Datapath class
self.port_state = {} # datapath_id => ports
self.ports = PortDataState() # Port class -> PortData class
self.links = LinkState() # Link class -> timestamp
self.hosts = HostState() # mac address -> Host class list
self.is_active = True
self.link_discovery = self.CONF.observe_links
if self.link_discovery:
self.install_flow = self.CONF.install_lldp_flow
self.explicit_drop = self.CONF.explicit_drop
self.lldp_event = hub.Event()
self.link_event = hub.Event()
self.threads.append(hub.spawn(self.lldp_loop))
self.threads.append(hub.spawn(self.link_loop))
def close(self):
self.is_active = False
if self.link_discovery:
self.lldp_event.set()
self.link_event.set()
hub.joinall(self.threads)
def _register(self, dp):
assert dp.id is not None
self.dps[dp.id] = dp
if dp.id not in self.port_state:
self.port_state[dp.id] = PortState()
for port in dp.ports.values():
self.port_state[dp.id].add(port.port_no, port)
def _unregister(self, dp):
if dp.id in self.dps:
if (self.dps[dp.id] == dp):
del self.dps[dp.id]
del self.port_state[dp.id]
def _get_switch(self, dpid):
if dpid in self.dps:
switch = Switch(self.dps[dpid])
for ofpport in self.port_state[dpid].values():
switch.add_port(ofpport)
return switch
def _get_port(self, dpid, port_no):
switch = self._get_switch(dpid)
if switch:
for p in switch.ports:
if p.port_no == port_no:
return p
def _port_added(self, port):
lldp_data = LLDPPacket.lldp_packet(
port.dpid, port.port_no, port.hw_addr, self.DEFAULT_TTL)
self.ports.add_port(port, lldp_data)
# LOG.debug('_port_added dpid=%s, port_no=%s, live=%s',
# port.dpid, port.port_no, port.is_live())
def _link_down(self, port):
try:
dst, rev_link_dst = self.links.port_deleted(port)
except KeyError:
# LOG.debug('key error. src=%s, dst=%s',
# port, self.links.get_peer(port))
return
link = Link(port, dst)
self.send_event_to_observers(event.EventLinkDelete(link))
if rev_link_dst:
rev_link = Link(dst, rev_link_dst)
self.send_event_to_observers(event.EventLinkDelete(rev_link))
self.ports.move_front(dst)
def _is_edge_port(self, port):
for link in self.links:
if port == link.src or port == link.dst:
return False
return True
@set_ev_cls(ofp_event.EventOFPStateChange,
[MAIN_DISPATCHER, DEAD_DISPATCHER])
def state_change_handler(self, ev):
dp = ev.datapath
assert dp is not None
LOG.debug(dp)
if ev.state == MAIN_DISPATCHER:
dp_multiple_conns = False
if dp.id in self.dps:
LOG.warning('Multiple connections from %s', dpid_to_str(dp.id))
dp_multiple_conns = True
(self.dps[dp.id]).close()
self._register(dp)
switch = self._get_switch(dp.id)
LOG.debug('register %s', switch)
if not dp_multiple_conns:
self.send_event_to_observers(event.EventSwitchEnter(switch))
else:
evt = event.EventSwitchReconnected(switch)
self.send_event_to_observers(evt)
if not self.link_discovery:
return
if self.install_flow:
ofproto = dp.ofproto
ofproto_parser = dp.ofproto_parser
# TODO:XXX need other versions
if ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
rule = nx_match.ClsRule()
rule.set_dl_dst(addrconv.mac.text_to_bin(
lldp.LLDP_MAC_NEAREST_BRIDGE))
rule.set_dl_type(ETH_TYPE_LLDP)
actions = [ofproto_parser.OFPActionOutput(
ofproto.OFPP_CONTROLLER, self.LLDP_PACKET_LEN)]
dp.send_flow_mod(
rule=rule, cookie=0, command=ofproto.OFPFC_ADD,
idle_timeout=0, hard_timeout=0, actions=actions,
priority=0xFFFF)
elif ofproto.OFP_VERSION >= ofproto_v1_2.OFP_VERSION:
match = ofproto_parser.OFPMatch(
eth_type=ETH_TYPE_LLDP,
eth_dst=lldp.LLDP_MAC_NEAREST_BRIDGE)
# OFPCML_NO_BUFFER is set so that the LLDP is not
# buffered on switch
parser = ofproto_parser
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER
)]
inst = [parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS, actions)]
mod = parser.OFPFlowMod(datapath=dp, match=match,
idle_timeout=0, hard_timeout=0,
instructions=inst,
priority=0xFFFF)
dp.send_msg(mod)
else:
LOG.error('cannot install flow. unsupported version. %x',
dp.ofproto.OFP_VERSION)
# Do not add ports while dp has multiple connections to controller.
if not dp_multiple_conns:
for port in switch.ports:
if not port.is_reserved():
self._port_added(port)
self.lldp_event.set()
elif ev.state == DEAD_DISPATCHER:
# dp.id is None when datapath dies before handshake
if dp.id is None:
return
switch = self._get_switch(dp.id)
if switch:
if switch.dp is dp:
self._unregister(dp)
LOG.debug('unregister %s', switch)
evt = event.EventSwitchLeave(switch)
self.send_event_to_observers(evt)
if not self.link_discovery:
return
for port in switch.ports:
if not port.is_reserved():
self.ports.del_port(port)
self._link_down(port)
self.lldp_event.set()
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def port_status_handler(self, ev):
msg = ev.msg
reason = msg.reason
dp = msg.datapath
ofpport = msg.desc
if reason == dp.ofproto.OFPPR_ADD:
# LOG.debug('A port was added.' +
# '(datapath id = %s, port number = %s)',
# dp.id, ofpport.port_no)
self.port_state[dp.id].add(ofpport.port_no, ofpport)
self.send_event_to_observers(
event.EventPortAdd(Port(dp.id, dp.ofproto, ofpport)))
if not self.link_discovery:
return
port = self._get_port(dp.id, ofpport.port_no)
if port and not port.is_reserved():
self._port_added(port)
self.lldp_event.set()
elif reason == dp.ofproto.OFPPR_DELETE:
# LOG.debug('A port was deleted.' +
# '(datapath id = %s, port number = %s)',
# dp.id, ofpport.port_no)
self.port_state[dp.id].remove(ofpport.port_no)
self.send_event_to_observers(
event.EventPortDelete(Port(dp.id, dp.ofproto, ofpport)))
if not self.link_discovery:
return
port = self._get_port(dp.id, ofpport.port_no)
if port and not port.is_reserved():
self.ports.del_port(port)
self._link_down(port)
self.lldp_event.set()
else:
assert reason == dp.ofproto.OFPPR_MODIFY
# LOG.debug('A port was modified.' +
# '(datapath id = %s, port number = %s)',
# dp.id, ofpport.port_no)
self.port_state[dp.id].modify(ofpport.port_no, ofpport)
self.send_event_to_observers(
event.EventPortModify(Port(dp.id, dp.ofproto, ofpport)))
if not self.link_discovery:
return
port = self._get_port(dp.id, ofpport.port_no)
if port and not port.is_reserved():
if self.ports.set_down(port):
self._link_down(port)
self.lldp_event.set()
@staticmethod
def _drop_packet(msg):
buffer_id = msg.buffer_id
if buffer_id == msg.datapath.ofproto.OFP_NO_BUFFER:
return
dp = msg.datapath
# TODO:XXX
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
dp.send_packet_out(buffer_id, msg.in_port, [])
elif dp.ofproto.OFP_VERSION >= ofproto_v1_2.OFP_VERSION:
dp.send_packet_out(buffer_id, msg.match['in_port'], [])
else:
LOG.error('cannot drop_packet. unsupported version. %x',
dp.ofproto.OFP_VERSION)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def lldp_packet_in_handler(self, ev):
if not self.link_discovery:
return
msg = ev.msg
try:
src_dpid, src_port_no = LLDPPacket.lldp_parse(msg.data)
except LLDPPacket.LLDPUnknownFormat:
# This handler can receive all the packets which can be
# not-LLDP packet. Ignore it silently
return
dst_dpid = msg.datapath.id
if msg.datapath.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
dst_port_no = msg.in_port
elif msg.datapath.ofproto.OFP_VERSION >= ofproto_v1_2.OFP_VERSION:
dst_port_no = msg.match['in_port']
else:
LOG.error('cannot accept LLDP. unsupported version. %x',
msg.datapath.ofproto.OFP_VERSION)
src = self._get_port(src_dpid, src_port_no)
if not src or src.dpid == dst_dpid:
return
try:
self.ports.lldp_received(src)
except KeyError:
# There are races between EventOFPPacketIn and
# EventDPPortAdd. So packet-in event can happend before
# port add event. In that case key error can happend.
# LOG.debug('lldp_received error', exc_info=True)
pass
dst = self._get_port(dst_dpid, dst_port_no)
if not dst:
return
old_peer = self.links.get_peer(src)
# LOG.debug("Packet-In")
# LOG.debug(" src=%s", src)
# LOG.debug(" dst=%s", dst)
# LOG.debug(" old_peer=%s", old_peer)
if old_peer and old_peer != dst:
old_link = Link(src, old_peer)
del self.links[old_link]
self.send_event_to_observers(event.EventLinkDelete(old_link))
link = Link(src, dst)
if link not in self.links:
self.send_event_to_observers(event.EventLinkAdd(link))
# remove hosts if it's not attached to edge port
host_to_del = []
for host in self.hosts.values():
if not self._is_edge_port(host.port):
host_to_del.append(host.mac)
for host_mac in host_to_del:
del self.hosts[host_mac]
if not self.links.update_link(src, dst):
# reverse link is not detected yet.
# So schedule the check early because it's very likely it's up
self.ports.move_front(dst)
self.lldp_event.set()
if self.explicit_drop:
self._drop_packet(msg)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def host_discovery_packet_in_handler(self, ev):
msg = ev.msg
eth, pkt_type, pkt_data = ethernet.ethernet.parser(msg.data)
# ignore lldp and cfm packets
if eth.ethertype in (ETH_TYPE_LLDP, ETH_TYPE_CFM):
return
datapath = msg.datapath
dpid = datapath.id
port_no = -1
if msg.datapath.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
port_no = msg.in_port
else:
port_no = msg.match['in_port']
port = self._get_port(dpid, port_no)
# can't find this port(ex: logic port)
if not port:
return
# ignore switch-to-switch port
if not self._is_edge_port(port):
return
host_mac = eth.src
host = Host(host_mac, port)
if host_mac not in self.hosts:
self.hosts.add(host)
ev = event.EventHostAdd(host)
self.send_event_to_observers(ev)
# arp packet, update ip address
if eth.ethertype == ether_types.ETH_TYPE_ARP:
arp_pkt, _, _ = pkt_type.parser(pkt_data)
self.hosts.update_ip(host, ip_v4=arp_pkt.src_ip)
# ipv4 packet, update ipv4 address
elif eth.ethertype == ether_types.ETH_TYPE_IP:
ipv4_pkt, _, _ = pkt_type.parser(pkt_data)
self.hosts.update_ip(host, ip_v4=ipv4_pkt.src)
# ipv6 packet, update ipv6 address
elif eth.ethertype == ether_types.ETH_TYPE_IPV6:
# TODO: need to handle NDP
ipv6_pkt, _, _ = pkt_type.parser(pkt_data)
self.hosts.update_ip(host, ip_v6=ipv6_pkt.src)
def send_lldp_packet(self, port):
try:
port_data = self.ports.lldp_sent(port)
except KeyError:
# ports can be modified during our sleep in self.lldp_loop()
# LOG.debug('send_lld error', exc_info=True)
return
if port_data.is_down:
return
dp = self.dps.get(port.dpid, None)
if dp is None:
# datapath was already deleted
return
# LOG.debug('lldp sent dpid=%s, port_no=%d', dp.id, port.port_no)
# TODO:XXX
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
actions = [dp.ofproto_parser.OFPActionOutput(port.port_no)]
dp.send_packet_out(actions=actions, data=port_data.lldp_data)
elif dp.ofproto.OFP_VERSION >= ofproto_v1_2.OFP_VERSION:
actions = [dp.ofproto_parser.OFPActionOutput(port.port_no)]
out = dp.ofproto_parser.OFPPacketOut(
datapath=dp, in_port=dp.ofproto.OFPP_CONTROLLER,
buffer_id=dp.ofproto.OFP_NO_BUFFER, actions=actions,
data=port_data.lldp_data)
dp.send_msg(out)
else:
LOG.error('cannot send lldp packet. unsupported version. %x',
dp.ofproto.OFP_VERSION)
def lldp_loop(self):
while self.is_active:
self.lldp_event.clear()
now = time.time()
timeout = None
ports_now = []
ports = []
for (key, data) in self.ports.items():
if data.timestamp is None:
ports_now.append(key)
continue
expire = data.timestamp + self.LLDP_SEND_PERIOD_PER_PORT
if expire <= now:
ports.append(key)
continue
timeout = expire - now
break
for port in ports_now:
self.send_lldp_packet(port)
for port in ports:
self.send_lldp_packet(port)
hub.sleep(self.LLDP_SEND_GUARD) # don't burst
if timeout is not None and ports:
timeout = 0 # We have already slept
# LOG.debug('lldp sleep %s', timeout)
self.lldp_event.wait(timeout=timeout)
def link_loop(self):
while self.is_active:
self.link_event.clear()
now = time.time()
deleted = []
for (link, timestamp) in self.links.items():
# LOG.debug('%s timestamp %d (now %d)', link, timestamp, now)
if timestamp + self.LINK_TIMEOUT < now:
src = link.src
if src in self.ports:
port_data = self.ports.get_port(src)
# LOG.debug('port_data %s', port_data)
if port_data.lldp_dropped() > self.LINK_LLDP_DROP:
deleted.append(link)
for link in deleted:
self.links.link_down(link)
# LOG.debug('delete %s', link)
self.send_event_to_observers(event.EventLinkDelete(link))
dst = link.dst
rev_link = Link(dst, link.src)
if rev_link not in deleted:
# It is very likely that the reverse link is also
# disconnected. Check it early.
expire = now - self.LINK_TIMEOUT
self.links.rev_link_set_timestamp(rev_link, expire)
if dst in self.ports:
self.ports.move_front(dst)
self.lldp_event.set()
self.link_event.wait(timeout=self.TIMEOUT_CHECK_PERIOD)
@set_ev_cls(event.EventSwitchRequest)
def switch_request_handler(self, req):
# LOG.debug(req)
dpid = req.dpid
switches = []
if dpid is None:
# reply all list
for dp in self.dps.values():
switches.append(self._get_switch(dp.id))
elif dpid in self.dps:
switches.append(self._get_switch(dpid))
rep = event.EventSwitchReply(req.src, switches)
self.reply_to_request(req, rep)
@set_ev_cls(event.EventLinkRequest)
def link_request_handler(self, req):
# LOG.debug(req)
dpid = req.dpid
if dpid is None:
links = self.links
else:
links = [link for link in self.links if link.src.dpid == dpid]
rep = event.EventLinkReply(req.src, dpid, links)
self.reply_to_request(req, rep)
@set_ev_cls(event.EventHostRequest)
def host_request_handler(self, req):
dpid = req.dpid
hosts = []
if dpid is None:
for mac in self.hosts:
hosts.append(self.hosts[mac])
else:
hosts = self.hosts.get_by_dpid(dpid)
rep = event.EventHostReply(req.src, dpid, hosts)
self.reply_to_request(req, rep)
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from canvas.models import FollowCategory
import itertools
class Migration(DataMigration):
def forwards(self, orm):
# Delete dupes.
have_dupes = FollowCategory.objects.raw('select id, user_id, category_id, count(*) from canvas_followcategory group by user_id, category_id having (COUNT(*) > 1)')
dupes = list(itertools.chain.from_iterable(FollowCategory.objects.filter(user=e.user, category=e.category).exclude(id=e.id) for e in have_dupes))
have_dupes_pks = [e.pk for e in have_dupes]
for dupe in dupes:
# Make sure we're not deleting the original one.
assert(dupe.pk not in have_dupes_pks)
dupe.delete()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'canvas.apiapp': {
'Meta': {'object_name': 'APIApp'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'canvas.apiauthtoken': {
'Meta': {'unique_together': "(('user', 'app'),)", 'object_name': 'APIAuthToken'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.APIApp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.bestof': {
'Meta': {'object_name': 'BestOf'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'best_of'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'chosen_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'best_of'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {})
},
'canvas.canvasanonymoususer': {
'Meta': {'object_name': 'CanvasAnonymousUser', '_ormbases': ['canvas.CanvasUser']},
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'canvas.canvasuser': {
'Meta': {'object_name': 'CanvasUser', 'db_table': "'auth_user'", '_ormbases': ['auth.User'], 'proxy': 'True'}
},
'canvas.category': {
'Meta': {'object_name': 'Category'},
'allow_textonlyop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'disable_remix': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}),
'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_groups'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Comment']"}),
'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.commentflag': {
'Meta': {'object_name': 'CommentFlag'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'undone': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['auth.User']"})
},
'canvas.commentmoderationlog': {
'Meta': {'object_name': 'CommentModerationLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderated_comments_log'", 'to': "orm['auth.User']"}),
'visibility': ('django.db.models.fields.IntegerField', [], {})
},
'canvas.commentpin': {
'Meta': {'object_name': 'CommentPin'},
'auto': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.commentsticker': {
'Meta': {'object_name': 'CommentSticker'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stickers'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'canvas.content': {
'Meta': {'object_name': 'Content'},
'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'remix_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}),
'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'used_as_stamp'", 'blank': 'True', 'to': "orm['canvas.Content']"}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.contenturlmapping': {
'Meta': {'object_name': 'ContentUrlMapping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas.emailunsubscribe': {
'Meta': {'object_name': 'EmailUnsubscribe'},
'email': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas.facebookinvite': {
'Meta': {'object_name': 'FacebookInvite'},
'fb_message_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invited_fbid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"})
},
'canvas.facebookuser': {
'Meta': {'object_name': 'FacebookUser'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fb_uid': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gender': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_invited': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'canvas.followcategory': {
'Meta': {'unique_together': "(('user', 'category'),)", 'object_name': 'FollowCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'followers'", 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'following'", 'to': "orm['auth.User']"})
},
'canvas.invitecode': {
'Meta': {'object_name': 'InviteCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"})
},
'canvas.remixplugin': {
'Meta': {'object_name': 'RemixPlugin'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
's3md5': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
'canvas.stashcontent': {
'Meta': {'object_name': 'StashContent'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Content']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'free_invites': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invite_bypass': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'is_qa': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post_anonymously': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'power_level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'canvas.usermoderationlog': {
'Meta': {'object_name': 'UserModerationLog'},
'action': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderation_log'", 'to': "orm['auth.User']"})
},
'canvas.userwarning': {
'Meta': {'object_name': 'UserWarning'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'confirmed': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'custom_message': ('django.db.models.fields.TextField', [], {}),
'disable_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued': ('canvas.util.UnixTimestampField', [], {}),
'stock_message': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_warnings'", 'to': "orm['auth.User']"}),
'viewed': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['canvas']
|
|
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class ProviderDetails(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, city=None, email=None, gender=None, first_name=None, id=None, last_name=None, latitude=None, longitude=None, middle_name=None, network_ids=None, organization_name=None, personal_phone=None, phone=None, presentation_name=None, specialty=None, state=None, state_id=None, street_line_1=None, street_line_2=None, suffix=None, title=None, type=None, zip_code=None, npis=None, hios_ids=None):
"""
ProviderDetails - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'city': 'str',
'email': 'str',
'gender': 'str',
'first_name': 'str',
'id': 'int',
'last_name': 'str',
'latitude': 'float',
'longitude': 'float',
'middle_name': 'str',
'network_ids': 'list[int]',
'organization_name': 'str',
'personal_phone': 'str',
'phone': 'str',
'presentation_name': 'str',
'specialty': 'str',
'state': 'str',
'state_id': 'int',
'street_line_1': 'str',
'street_line_2': 'str',
'suffix': 'str',
'title': 'str',
'type': 'str',
'zip_code': 'str',
'npis': 'list[int]',
'hios_ids': 'list[str]'
}
self.attribute_map = {
'city': 'city',
'email': 'email',
'gender': 'gender',
'first_name': 'first_name',
'id': 'id',
'last_name': 'last_name',
'latitude': 'latitude',
'longitude': 'longitude',
'middle_name': 'middle_name',
'network_ids': 'network_ids',
'organization_name': 'organization_name',
'personal_phone': 'personal_phone',
'phone': 'phone',
'presentation_name': 'presentation_name',
'specialty': 'specialty',
'state': 'state',
'state_id': 'state_id',
'street_line_1': 'street_line_1',
'street_line_2': 'street_line_2',
'suffix': 'suffix',
'title': 'title',
'type': 'type',
'zip_code': 'zip_code',
'npis': 'npis',
'hios_ids': 'hios_ids'
}
self._city = city
self._email = email
self._gender = gender
self._first_name = first_name
self._id = id
self._last_name = last_name
self._latitude = latitude
self._longitude = longitude
self._middle_name = middle_name
self._network_ids = network_ids
self._organization_name = organization_name
self._personal_phone = personal_phone
self._phone = phone
self._presentation_name = presentation_name
self._specialty = specialty
self._state = state
self._state_id = state_id
self._street_line_1 = street_line_1
self._street_line_2 = street_line_2
self._suffix = suffix
self._title = title
self._type = type
self._zip_code = zip_code
self._npis = npis
self._hios_ids = hios_ids
@property
def city(self):
"""
Gets the city of this ProviderDetails.
City name (e.g. Springfield).
:return: The city of this ProviderDetails.
:rtype: str
"""
return self._city
@city.setter
def city(self, city):
"""
Sets the city of this ProviderDetails.
City name (e.g. Springfield).
:param city: The city of this ProviderDetails.
:type: str
"""
self._city = city
@property
def email(self):
"""
Gets the email of this ProviderDetails.
Primary email address to contact the provider.
:return: The email of this ProviderDetails.
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""
Sets the email of this ProviderDetails.
Primary email address to contact the provider.
:param email: The email of this ProviderDetails.
:type: str
"""
self._email = email
@property
def gender(self):
"""
Gets the gender of this ProviderDetails.
Provider's gender (M or F)
:return: The gender of this ProviderDetails.
:rtype: str
"""
return self._gender
@gender.setter
def gender(self, gender):
"""
Sets the gender of this ProviderDetails.
Provider's gender (M or F)
:param gender: The gender of this ProviderDetails.
:type: str
"""
self._gender = gender
@property
def first_name(self):
"""
Gets the first_name of this ProviderDetails.
Given name for the provider.
:return: The first_name of this ProviderDetails.
:rtype: str
"""
return self._first_name
@first_name.setter
def first_name(self, first_name):
"""
Sets the first_name of this ProviderDetails.
Given name for the provider.
:param first_name: The first_name of this ProviderDetails.
:type: str
"""
self._first_name = first_name
@property
def id(self):
"""
Gets the id of this ProviderDetails.
National Provider Index (NPI) number
:return: The id of this ProviderDetails.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ProviderDetails.
National Provider Index (NPI) number
:param id: The id of this ProviderDetails.
:type: int
"""
self._id = id
@property
def last_name(self):
"""
Gets the last_name of this ProviderDetails.
Family name for the provider.
:return: The last_name of this ProviderDetails.
:rtype: str
"""
return self._last_name
@last_name.setter
def last_name(self, last_name):
"""
Sets the last_name of this ProviderDetails.
Family name for the provider.
:param last_name: The last_name of this ProviderDetails.
:type: str
"""
self._last_name = last_name
@property
def latitude(self):
"""
Gets the latitude of this ProviderDetails.
Latitude of provider
:return: The latitude of this ProviderDetails.
:rtype: float
"""
return self._latitude
@latitude.setter
def latitude(self, latitude):
"""
Sets the latitude of this ProviderDetails.
Latitude of provider
:param latitude: The latitude of this ProviderDetails.
:type: float
"""
self._latitude = latitude
@property
def longitude(self):
"""
Gets the longitude of this ProviderDetails.
Longitude of provider
:return: The longitude of this ProviderDetails.
:rtype: float
"""
return self._longitude
@longitude.setter
def longitude(self, longitude):
"""
Sets the longitude of this ProviderDetails.
Longitude of provider
:param longitude: The longitude of this ProviderDetails.
:type: float
"""
self._longitude = longitude
@property
def middle_name(self):
"""
Gets the middle_name of this ProviderDetails.
Middle name for the provider.
:return: The middle_name of this ProviderDetails.
:rtype: str
"""
return self._middle_name
@middle_name.setter
def middle_name(self, middle_name):
"""
Sets the middle_name of this ProviderDetails.
Middle name for the provider.
:param middle_name: The middle_name of this ProviderDetails.
:type: str
"""
self._middle_name = middle_name
@property
def network_ids(self):
"""
Gets the network_ids of this ProviderDetails.
Array of network ids
:return: The network_ids of this ProviderDetails.
:rtype: list[int]
"""
return self._network_ids
@network_ids.setter
def network_ids(self, network_ids):
"""
Sets the network_ids of this ProviderDetails.
Array of network ids
:param network_ids: The network_ids of this ProviderDetails.
:type: list[int]
"""
self._network_ids = network_ids
@property
def organization_name(self):
"""
Gets the organization_name of this ProviderDetails.
name for the providers of type: organization.
:return: The organization_name of this ProviderDetails.
:rtype: str
"""
return self._organization_name
@organization_name.setter
def organization_name(self, organization_name):
"""
Sets the organization_name of this ProviderDetails.
name for the providers of type: organization.
:param organization_name: The organization_name of this ProviderDetails.
:type: str
"""
self._organization_name = organization_name
@property
def personal_phone(self):
"""
Gets the personal_phone of this ProviderDetails.
Personal contact phone for the provider.
:return: The personal_phone of this ProviderDetails.
:rtype: str
"""
return self._personal_phone
@personal_phone.setter
def personal_phone(self, personal_phone):
"""
Sets the personal_phone of this ProviderDetails.
Personal contact phone for the provider.
:param personal_phone: The personal_phone of this ProviderDetails.
:type: str
"""
self._personal_phone = personal_phone
@property
def phone(self):
"""
Gets the phone of this ProviderDetails.
Office phone for the provider
:return: The phone of this ProviderDetails.
:rtype: str
"""
return self._phone
@phone.setter
def phone(self, phone):
"""
Sets the phone of this ProviderDetails.
Office phone for the provider
:param phone: The phone of this ProviderDetails.
:type: str
"""
self._phone = phone
@property
def presentation_name(self):
"""
Gets the presentation_name of this ProviderDetails.
Preferred name for display (e.g. Dr. Francis White may prefer Dr. Frank White)
:return: The presentation_name of this ProviderDetails.
:rtype: str
"""
return self._presentation_name
@presentation_name.setter
def presentation_name(self, presentation_name):
"""
Sets the presentation_name of this ProviderDetails.
Preferred name for display (e.g. Dr. Francis White may prefer Dr. Frank White)
:param presentation_name: The presentation_name of this ProviderDetails.
:type: str
"""
self._presentation_name = presentation_name
@property
def specialty(self):
"""
Gets the specialty of this ProviderDetails.
Name of the primary Specialty
:return: The specialty of this ProviderDetails.
:rtype: str
"""
return self._specialty
@specialty.setter
def specialty(self, specialty):
"""
Sets the specialty of this ProviderDetails.
Name of the primary Specialty
:param specialty: The specialty of this ProviderDetails.
:type: str
"""
self._specialty = specialty
@property
def state(self):
"""
Gets the state of this ProviderDetails.
State code for the provider's address (e.g. NY).
:return: The state of this ProviderDetails.
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""
Sets the state of this ProviderDetails.
State code for the provider's address (e.g. NY).
:param state: The state of this ProviderDetails.
:type: str
"""
self._state = state
@property
def state_id(self):
"""
Gets the state_id of this ProviderDetails.
Foreign key to States
:return: The state_id of this ProviderDetails.
:rtype: int
"""
return self._state_id
@state_id.setter
def state_id(self, state_id):
"""
Sets the state_id of this ProviderDetails.
Foreign key to States
:param state_id: The state_id of this ProviderDetails.
:type: int
"""
self._state_id = state_id
@property
def street_line_1(self):
"""
Gets the street_line_1 of this ProviderDetails.
First line of the provider's street address.
:return: The street_line_1 of this ProviderDetails.
:rtype: str
"""
return self._street_line_1
@street_line_1.setter
def street_line_1(self, street_line_1):
"""
Sets the street_line_1 of this ProviderDetails.
First line of the provider's street address.
:param street_line_1: The street_line_1 of this ProviderDetails.
:type: str
"""
self._street_line_1 = street_line_1
@property
def street_line_2(self):
"""
Gets the street_line_2 of this ProviderDetails.
Second line of the provider's street address.
:return: The street_line_2 of this ProviderDetails.
:rtype: str
"""
return self._street_line_2
@street_line_2.setter
def street_line_2(self, street_line_2):
"""
Sets the street_line_2 of this ProviderDetails.
Second line of the provider's street address.
:param street_line_2: The street_line_2 of this ProviderDetails.
:type: str
"""
self._street_line_2 = street_line_2
@property
def suffix(self):
"""
Gets the suffix of this ProviderDetails.
Suffix for the provider's name (e.g. Jr)
:return: The suffix of this ProviderDetails.
:rtype: str
"""
return self._suffix
@suffix.setter
def suffix(self, suffix):
"""
Sets the suffix of this ProviderDetails.
Suffix for the provider's name (e.g. Jr)
:param suffix: The suffix of this ProviderDetails.
:type: str
"""
self._suffix = suffix
@property
def title(self):
"""
Gets the title of this ProviderDetails.
Professional title for the provider (e.g. Dr).
:return: The title of this ProviderDetails.
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""
Sets the title of this ProviderDetails.
Professional title for the provider (e.g. Dr).
:param title: The title of this ProviderDetails.
:type: str
"""
self._title = title
@property
def type(self):
"""
Gets the type of this ProviderDetails.
Type of NPI number (individual provider vs organization).
:return: The type of this ProviderDetails.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this ProviderDetails.
Type of NPI number (individual provider vs organization).
:param type: The type of this ProviderDetails.
:type: str
"""
self._type = type
@property
def zip_code(self):
"""
Gets the zip_code of this ProviderDetails.
Postal code for the provider's address (e.g. 11215)
:return: The zip_code of this ProviderDetails.
:rtype: str
"""
return self._zip_code
@zip_code.setter
def zip_code(self, zip_code):
"""
Sets the zip_code of this ProviderDetails.
Postal code for the provider's address (e.g. 11215)
:param zip_code: The zip_code of this ProviderDetails.
:type: str
"""
self._zip_code = zip_code
@property
def npis(self):
"""
Gets the npis of this ProviderDetails.
The National Provider Index (NPI) numbers associated with this provider.
:return: The npis of this ProviderDetails.
:rtype: list[int]
"""
return self._npis
@npis.setter
def npis(self, npis):
"""
Sets the npis of this ProviderDetails.
The National Provider Index (NPI) numbers associated with this provider.
:param npis: The npis of this ProviderDetails.
:type: list[int]
"""
self._npis = npis
@property
def hios_ids(self):
"""
Gets the hios_ids of this ProviderDetails.
List of HIOS ids for this provider
:return: The hios_ids of this ProviderDetails.
:rtype: list[str]
"""
return self._hios_ids
@hios_ids.setter
def hios_ids(self, hios_ids):
"""
Sets the hios_ids of this ProviderDetails.
List of HIOS ids for this provider
:param hios_ids: The hios_ids of this ProviderDetails.
:type: list[str]
"""
self._hios_ids = hios_ids
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import __builtin__
import datetime
import hashlib
import os
import paramiko
import StringIO
import tempfile
import uuid
import mox
from oslo.config import cfg
import cinder
from cinder import exception
from cinder.openstack.common import timeutils
from cinder import test
from cinder import utils
CONF = cfg.CONF
class ExecuteTestCase(test.TestCase):
def test_retry_on_failure(self):
fd, tmpfilename = tempfile.mkstemp()
_, tmpfilename2 = tempfile.mkstemp()
try:
fp = os.fdopen(fd, 'w+')
fp.write('''#!/bin/sh
# If stdin fails to get passed during one of the runs, make a note.
if ! grep -q foo
then
echo 'failure' > "$1"
fi
# If stdin has failed to get passed during this or a previous run, exit early.
if grep failure "$1"
then
exit 1
fi
runs="$(cat $1)"
if [ -z "$runs" ]
then
runs=0
fi
runs=$(($runs + 1))
echo $runs > "$1"
exit 1
''')
fp.close()
os.chmod(tmpfilename, 0o755)
self.assertRaises(exception.ProcessExecutionError,
utils.execute,
tmpfilename, tmpfilename2, attempts=10,
process_input='foo',
delay_on_retry=False)
fp = open(tmpfilename2, 'r+')
runs = fp.read()
fp.close()
self.assertNotEquals(runs.strip(), 'failure', 'stdin did not '
'always get passed '
'correctly')
runs = int(runs.strip())
self.assertEquals(runs, 10,
'Ran %d times instead of 10.' % (runs,))
finally:
os.unlink(tmpfilename)
os.unlink(tmpfilename2)
def test_unknown_kwargs_raises_error(self):
self.assertRaises(exception.Error,
utils.execute,
'/usr/bin/env', 'true',
this_is_not_a_valid_kwarg=True)
def test_check_exit_code_boolean(self):
utils.execute('/usr/bin/env', 'false', check_exit_code=False)
self.assertRaises(exception.ProcessExecutionError,
utils.execute,
'/usr/bin/env', 'false', check_exit_code=True)
def test_no_retry_on_success(self):
fd, tmpfilename = tempfile.mkstemp()
_, tmpfilename2 = tempfile.mkstemp()
try:
fp = os.fdopen(fd, 'w+')
fp.write('''#!/bin/sh
# If we've already run, bail out.
grep -q foo "$1" && exit 1
# Mark that we've run before.
echo foo > "$1"
# Check that stdin gets passed correctly.
grep foo
''')
fp.close()
os.chmod(tmpfilename, 0o755)
utils.execute(tmpfilename,
tmpfilename2,
process_input='foo',
attempts=2)
finally:
os.unlink(tmpfilename)
os.unlink(tmpfilename2)
class GetFromPathTestCase(test.TestCase):
def test_tolerates_nones(self):
f = utils.get_from_path
input = []
self.assertEquals([], f(input, "a"))
self.assertEquals([], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = [None]
self.assertEquals([], f(input, "a"))
self.assertEquals([], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = [{'a': None}]
self.assertEquals([], f(input, "a"))
self.assertEquals([], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = [{'a': {'b': None}}]
self.assertEquals([{'b': None}], f(input, "a"))
self.assertEquals([], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = [{'a': {'b': {'c': None}}}]
self.assertEquals([{'b': {'c': None}}], f(input, "a"))
self.assertEquals([{'c': None}], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = [{'a': {'b': {'c': None}}}, {'a': None}]
self.assertEquals([{'b': {'c': None}}], f(input, "a"))
self.assertEquals([{'c': None}], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = [{'a': {'b': {'c': None}}}, {'a': {'b': None}}]
self.assertEquals([{'b': {'c': None}}, {'b': None}], f(input, "a"))
self.assertEquals([{'c': None}], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
def test_does_select(self):
f = utils.get_from_path
input = [{'a': 'a_1'}]
self.assertEquals(['a_1'], f(input, "a"))
self.assertEquals([], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = [{'a': {'b': 'b_1'}}]
self.assertEquals([{'b': 'b_1'}], f(input, "a"))
self.assertEquals(['b_1'], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = [{'a': {'b': {'c': 'c_1'}}}]
self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a"))
self.assertEquals([{'c': 'c_1'}], f(input, "a/b"))
self.assertEquals(['c_1'], f(input, "a/b/c"))
input = [{'a': {'b': {'c': 'c_1'}}}, {'a': None}]
self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a"))
self.assertEquals([{'c': 'c_1'}], f(input, "a/b"))
self.assertEquals(['c_1'], f(input, "a/b/c"))
input = [{'a': {'b': {'c': 'c_1'}}},
{'a': {'b': None}}]
self.assertEquals([{'b': {'c': 'c_1'}}, {'b': None}], f(input, "a"))
self.assertEquals([{'c': 'c_1'}], f(input, "a/b"))
self.assertEquals(['c_1'], f(input, "a/b/c"))
input = [{'a': {'b': {'c': 'c_1'}}},
{'a': {'b': {'c': 'c_2'}}}]
self.assertEquals([{'b': {'c': 'c_1'}}, {'b': {'c': 'c_2'}}],
f(input, "a"))
self.assertEquals([{'c': 'c_1'}, {'c': 'c_2'}], f(input, "a/b"))
self.assertEquals(['c_1', 'c_2'], f(input, "a/b/c"))
self.assertEquals([], f(input, "a/b/c/d"))
self.assertEquals([], f(input, "c/a/b/d"))
self.assertEquals([], f(input, "i/r/t"))
def test_flattens_lists(self):
f = utils.get_from_path
input = [{'a': [1, 2, 3]}]
self.assertEquals([1, 2, 3], f(input, "a"))
self.assertEquals([], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = [{'a': {'b': [1, 2, 3]}}]
self.assertEquals([{'b': [1, 2, 3]}], f(input, "a"))
self.assertEquals([1, 2, 3], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = [{'a': {'b': [1, 2, 3]}}, {'a': {'b': [4, 5, 6]}}]
self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = [{'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}]
self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = [{'a': [1, 2, {'b': 'b_1'}]}]
self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a"))
self.assertEquals(['b_1'], f(input, "a/b"))
def test_bad_xpath(self):
f = utils.get_from_path
self.assertRaises(exception.Error, f, [], None)
self.assertRaises(exception.Error, f, [], "")
self.assertRaises(exception.Error, f, [], "/")
self.assertRaises(exception.Error, f, [], "/a")
self.assertRaises(exception.Error, f, [], "/a/")
self.assertRaises(exception.Error, f, [], "//")
self.assertRaises(exception.Error, f, [], "//a")
self.assertRaises(exception.Error, f, [], "a//a")
self.assertRaises(exception.Error, f, [], "a//a/")
self.assertRaises(exception.Error, f, [], "a/a/")
def test_real_failure1(self):
# Real world failure case...
# We weren't coping when the input was a Dictionary instead of a List
# This led to test_accepts_dictionaries
f = utils.get_from_path
inst = {'fixed_ip': {'floating_ips': [{'address': '1.2.3.4'}],
'address': '192.168.0.3'},
'hostname': ''}
private_ips = f(inst, 'fixed_ip/address')
public_ips = f(inst, 'fixed_ip/floating_ips/address')
self.assertEquals(['192.168.0.3'], private_ips)
self.assertEquals(['1.2.3.4'], public_ips)
def test_accepts_dictionaries(self):
f = utils.get_from_path
input = {'a': [1, 2, 3]}
self.assertEquals([1, 2, 3], f(input, "a"))
self.assertEquals([], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = {'a': {'b': [1, 2, 3]}}
self.assertEquals([{'b': [1, 2, 3]}], f(input, "a"))
self.assertEquals([1, 2, 3], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = {'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}
self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = {'a': [1, 2, {'b': 'b_1'}]}
self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a"))
self.assertEquals(['b_1'], f(input, "a/b"))
class GenericUtilsTestCase(test.TestCase):
def test_hostname_unicode_sanitization(self):
hostname = u"\u7684.test.example.com"
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_periods(self):
hostname = "....test.example.com..."
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_dashes(self):
hostname = "----test.example.com---"
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_characters(self):
hostname = "(#@&$!(@*--#&91)(__=+--test-host.example!!.com-0+"
self.assertEqual("91----test-host.example.com-0",
utils.sanitize_hostname(hostname))
def test_hostname_translate(self):
hostname = "<}\x1fh\x10e\x08l\x02l\x05o\x12!{>"
self.assertEqual("hello", utils.sanitize_hostname(hostname))
def test_generate_glance_url(self):
generated_url = utils.generate_glance_url()
actual_url = "http://%s:%d" % (CONF.glance_host,
CONF.glance_port)
self.assertEqual(generated_url, actual_url)
def test_read_cached_file(self):
self.mox.StubOutWithMock(os.path, "getmtime")
os.path.getmtime(mox.IgnoreArg()).AndReturn(1)
self.mox.ReplayAll()
cache_data = {"data": 1123, "mtime": 1}
data = utils.read_cached_file("/this/is/a/fake", cache_data)
self.assertEqual(cache_data["data"], data)
def test_read_modified_cached_file(self):
self.mox.StubOutWithMock(os.path, "getmtime")
self.mox.StubOutWithMock(__builtin__, 'open')
os.path.getmtime(mox.IgnoreArg()).AndReturn(2)
fake_contents = "lorem ipsum"
fake_file = self.mox.CreateMockAnything()
fake_file.read().AndReturn(fake_contents)
fake_context_manager = self.mox.CreateMockAnything()
fake_context_manager.__enter__().AndReturn(fake_file)
fake_context_manager.__exit__(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
__builtin__.open(mox.IgnoreArg()).AndReturn(fake_context_manager)
self.mox.ReplayAll()
cache_data = {"data": 1123, "mtime": 1}
self.reload_called = False
def test_reload(reloaded_data):
self.assertEqual(reloaded_data, fake_contents)
self.reload_called = True
data = utils.read_cached_file("/this/is/a/fake",
cache_data,
reload_func=test_reload)
self.assertEqual(data, fake_contents)
self.assertTrue(self.reload_called)
def test_generate_password(self):
password = utils.generate_password()
self.assertTrue([c for c in password if c in '0123456789'])
self.assertTrue([c for c in password
if c in 'abcdefghijklmnopqrstuvwxyz'])
self.assertTrue([c for c in password
if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'])
def test_read_file_as_root(self):
def fake_execute(*args, **kwargs):
if args[1] == 'bad':
raise exception.ProcessExecutionError
return 'fakecontents', None
self.stubs.Set(utils, 'execute', fake_execute)
contents = utils.read_file_as_root('good')
self.assertEqual(contents, 'fakecontents')
self.assertRaises(exception.FileNotFound,
utils.read_file_as_root, 'bad')
def test_strcmp_const_time(self):
self.assertTrue(utils.strcmp_const_time('abc123', 'abc123'))
self.assertFalse(utils.strcmp_const_time('a', 'aaaaa'))
self.assertFalse(utils.strcmp_const_time('ABC123', 'abc123'))
def test_temporary_chown(self):
def fake_execute(*args, **kwargs):
if args[0] == 'chown':
fake_execute.uid = args[1]
self.stubs.Set(utils, 'execute', fake_execute)
with tempfile.NamedTemporaryFile() as f:
with utils.temporary_chown(f.name, owner_uid=2):
self.assertEqual(fake_execute.uid, 2)
self.assertEqual(fake_execute.uid, os.getuid())
def test_service_is_up(self):
fts_func = datetime.datetime.fromtimestamp
fake_now = 1000
down_time = 5
self.flags(service_down_time=down_time)
self.mox.StubOutWithMock(timeutils, 'utcnow')
# Up (equal)
timeutils.utcnow().AndReturn(fts_func(fake_now))
service = {'updated_at': fts_func(fake_now - down_time),
'created_at': fts_func(fake_now - down_time)}
self.mox.ReplayAll()
result = utils.service_is_up(service)
self.assertTrue(result)
self.mox.ResetAll()
# Up
timeutils.utcnow().AndReturn(fts_func(fake_now))
service = {'updated_at': fts_func(fake_now - down_time + 1),
'created_at': fts_func(fake_now - down_time + 1)}
self.mox.ReplayAll()
result = utils.service_is_up(service)
self.assertTrue(result)
self.mox.ResetAll()
# Down
timeutils.utcnow().AndReturn(fts_func(fake_now))
service = {'updated_at': fts_func(fake_now - down_time - 1),
'created_at': fts_func(fake_now - down_time - 1)}
self.mox.ReplayAll()
result = utils.service_is_up(service)
self.assertFalse(result)
def test_safe_parse_xml(self):
normal_body = ('<?xml version="1.0" ?>'
'<foo><bar><v1>hey</v1><v2>there</v2></bar></foo>')
def killer_body():
return (("""<!DOCTYPE x [
<!ENTITY a "%(a)s">
<!ENTITY b "%(b)s">
<!ENTITY c "%(c)s">]>
<foo>
<bar>
<v1>%(d)s</v1>
</bar>
</foo>""") % {
'a': 'A' * 10,
'b': '&a;' * 10,
'c': '&b;' * 10,
'd': '&c;' * 9999,
}).strip()
dom = utils.safe_minidom_parse_string(normal_body)
# Some versions of minidom inject extra newlines so we ignore them
result = str(dom.toxml()).replace('\n', '')
self.assertEqual(normal_body, result)
self.assertRaises(ValueError,
utils.safe_minidom_parse_string,
killer_body())
def test_xhtml_escape(self):
self.assertEqual('"foo"', utils.xhtml_escape('"foo"'))
self.assertEqual(''foo'', utils.xhtml_escape("'foo'"))
def test_hash_file(self):
data = 'Mary had a little lamb, its fleece as white as snow'
flo = StringIO.StringIO(data)
h1 = utils.hash_file(flo)
h2 = hashlib.sha1(data).hexdigest()
self.assertEquals(h1, h2)
class MonkeyPatchTestCase(test.TestCase):
"""Unit test for utils.monkey_patch()."""
def setUp(self):
super(MonkeyPatchTestCase, self).setUp()
self.example_package = 'cinder.tests.monkey_patch_example.'
self.flags(
monkey_patch=True,
monkey_patch_modules=[self.example_package + 'example_a' + ':'
+ self.example_package
+ 'example_decorator'])
def test_monkey_patch(self):
utils.monkey_patch()
cinder.tests.monkey_patch_example.CALLED_FUNCTION = []
from cinder.tests.monkey_patch_example import example_a
from cinder.tests.monkey_patch_example import example_b
self.assertEqual('Example function', example_a.example_function_a())
exampleA = example_a.ExampleClassA()
exampleA.example_method()
ret_a = exampleA.example_method_add(3, 5)
self.assertEqual(ret_a, 8)
self.assertEqual('Example function', example_b.example_function_b())
exampleB = example_b.ExampleClassB()
exampleB.example_method()
ret_b = exampleB.example_method_add(3, 5)
self.assertEqual(ret_b, 8)
package_a = self.example_package + 'example_a.'
self.assertTrue(package_a + 'example_function_a'
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertTrue(package_a + 'ExampleClassA.example_method'
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertTrue(package_a + 'ExampleClassA.example_method_add'
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
package_b = self.example_package + 'example_b.'
self.assertFalse(package_b + 'example_function_b'
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertFalse(package_b + 'ExampleClassB.example_method'
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertFalse(package_b + 'ExampleClassB.example_method_add'
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
class AuditPeriodTest(test.TestCase):
def setUp(self):
super(AuditPeriodTest, self).setUp()
#a fairly random time to test with
self.test_time = datetime.datetime(second=23,
minute=12,
hour=8,
day=5,
month=3,
year=2012)
timeutils.set_time_override(override_time=self.test_time)
def tearDown(self):
timeutils.clear_time_override()
super(AuditPeriodTest, self).tearDown()
def test_hour(self):
begin, end = utils.last_completed_audit_period(unit='hour')
self.assertEquals(begin,
datetime.datetime(hour=7,
day=5,
month=3,
year=2012))
self.assertEquals(end, datetime.datetime(hour=8,
day=5,
month=3,
year=2012))
def test_hour_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='hour@10')
self.assertEquals(begin, datetime.datetime(minute=10,
hour=7,
day=5,
month=3,
year=2012))
self.assertEquals(end, datetime.datetime(minute=10,
hour=8,
day=5,
month=3,
year=2012))
def test_hour_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='hour@30')
self.assertEquals(begin, datetime.datetime(minute=30,
hour=6,
day=5,
month=3,
year=2012))
self.assertEquals(end, datetime.datetime(minute=30,
hour=7,
day=5,
month=3,
year=2012))
def test_day(self):
begin, end = utils.last_completed_audit_period(unit='day')
self.assertEquals(begin, datetime.datetime(day=4,
month=3,
year=2012))
self.assertEquals(end, datetime.datetime(day=5,
month=3,
year=2012))
def test_day_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='day@6')
self.assertEquals(begin, datetime.datetime(hour=6,
day=4,
month=3,
year=2012))
self.assertEquals(end, datetime.datetime(hour=6,
day=5,
month=3,
year=2012))
def test_day_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='day@10')
self.assertEquals(begin, datetime.datetime(hour=10,
day=3,
month=3,
year=2012))
self.assertEquals(end, datetime.datetime(hour=10,
day=4,
month=3,
year=2012))
def test_month(self):
begin, end = utils.last_completed_audit_period(unit='month')
self.assertEquals(begin, datetime.datetime(day=1,
month=2,
year=2012))
self.assertEquals(end, datetime.datetime(day=1,
month=3,
year=2012))
def test_month_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='month@2')
self.assertEquals(begin, datetime.datetime(day=2,
month=2,
year=2012))
self.assertEquals(end, datetime.datetime(day=2,
month=3,
year=2012))
def test_month_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='month@15')
self.assertEquals(begin, datetime.datetime(day=15,
month=1,
year=2012))
self.assertEquals(end, datetime.datetime(day=15,
month=2,
year=2012))
def test_year(self):
begin, end = utils.last_completed_audit_period(unit='year')
self.assertEquals(begin, datetime.datetime(day=1,
month=1,
year=2011))
self.assertEquals(end, datetime.datetime(day=1,
month=1,
year=2012))
def test_year_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='year@2')
self.assertEquals(begin, datetime.datetime(day=1,
month=2,
year=2011))
self.assertEquals(end, datetime.datetime(day=1,
month=2,
year=2012))
def test_year_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='year@6')
self.assertEquals(begin, datetime.datetime(day=1,
month=6,
year=2010))
self.assertEquals(end, datetime.datetime(day=1,
month=6,
year=2011))
class FakeSSHClient(object):
def __init__(self):
self.id = uuid.uuid4()
self.transport = FakeTransport()
def set_missing_host_key_policy(self, policy):
pass
def connect(self, ip, port=22, username=None, password=None,
pkey=None, timeout=10):
pass
def get_transport(self):
return self.transport
def close(self):
pass
def __call__(self, *args, **kwargs):
pass
class FakeSock(object):
def settimeout(self, timeout):
pass
class FakeTransport(object):
def __init__(self):
self.active = True
self.sock = FakeSock()
def set_keepalive(self, timeout):
pass
def is_active(self):
return self.active
class SSHPoolTestCase(test.TestCase):
"""Unit test for SSH Connection Pool."""
def setup(self):
self.mox.StubOutWithMock(paramiko, "SSHClient")
paramiko.SSHClient().AndReturn(FakeSSHClient())
self.mox.ReplayAll()
def test_single_ssh_connect(self):
self.setup()
sshpool = utils.SSHPool("127.0.0.1", 22, 10, "test", password="test",
min_size=1, max_size=1)
with sshpool.item() as ssh:
first_id = ssh.id
with sshpool.item() as ssh:
second_id = ssh.id
self.assertEqual(first_id, second_id)
def test_closed_reopend_ssh_connections(self):
self.setup()
sshpool = utils.SSHPool("127.0.0.1", 22, 10, "test", password="test",
min_size=1, max_size=2)
with sshpool.item() as ssh:
first_id = ssh.id
with sshpool.item() as ssh:
second_id = ssh.id
# Close the connection and test for a new connection
ssh.get_transport().active = False
self.assertEqual(first_id, second_id)
# The mox items are not getting setup in a new pool connection,
# so had to reset and set again.
self.mox.UnsetStubs()
self.setup()
with sshpool.item() as ssh:
third_id = ssh.id
self.assertNotEqual(first_id, third_id)
|
|
import urllib2
import urllib
import urlparse
import json
import mimetypes
import mimetools
class MapLargeConnector(object):
###
# Creates a connection to a MapLarge API server
###
###
# When NO_WEB_CALLS is true all MapLargeConnectors will not make remote
# calls. Instead, the response will be the full URL that would have been
# invoked.
###
NO_WEB_CALLS = False
__user = None
__token = None
__apiserver = None
__authstring = ""
__password = None
__defaultHeaders = [('User-agent', 'MapLarge SDK Python')]
def __init__(self, server, user, passwordOrToken=None):
"""
Constructor. Creates a connection to a MapLarge API server with a
username and token as credentials.
@param server: URL of API server. Must begin with valid protocol
@param user: Username to use for connection credentials.
@param passwordOrToken: Authentication token/password to use for connection credentials.
@raise ValueError:
"""
try:
self.__apiserver = server
self.__user = user
if isinstance(passwordOrToken, int):
self.__token = str(passwordOrToken)
elif isinstance(passwordOrToken, str):
self.__token = self.__GetToken(passwordOrToken)
else:
raise ValueError, "No Password or Token"
self.__authstring = "mluser=" + self.__user + "&mltoken=" + self.__token;
except Exception as e:
raise e
def __GetToken(self, password):
try:
querystring = urllib.urlencode({"mluser": self.__user, "mlpass": password}) + "&" + self.__authstring
if self.NO_WEB_CALLS:
retVal = querystring
else:
response = self.__InvokeURL("Auth", "Login", querystring)
respObj = json.loads(response)
success = respObj['success']
if success == True:
retVal = respObj['token']
else:
retVal = "ERROR"
except Exception as e:
print e
retVal = "ERROR"
return retVal
def __InvokeURL(self, controller, actionname, params):
try:
urlstring = urlparse.urljoin(self.__apiserver, controller + "/" + actionname)
urlstring = urlstring + '?' + params
if self.NO_WEB_CALLS:
retVal = urlstring
else:
flob = urllib2.urlopen(urllib2.Request(urlstring))
retVal = flob.read()
except Exception as e:
print e
retVal = "ERROR"
return retVal
def __InvokePostURL(self, controller, actionname, params, filepaths):
try:
urlstring = urlparse.urljoin(self.__apiserver, controller + "/" + actionname)
if self.NO_WEB_CALLS:
retVal = urlstring
else:
part_boundary = '--' + mimetools.choose_boundary()
CRLF = '\r\n'
L = []
for (key, val) in params.items():
L.append('--' + part_boundary)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(val)
files = []
for fileName in filepaths:
fileBody = open(fileName, "rb").read()
L.append('--' + part_boundary)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % ('fileUpload', fileName))
fileContentType = mimetypes.guess_type(fileName)[0] or 'application/octet-stream'
L.append('Content-Type: %s' % fileContentType)
L.append('')
L.append(fileBody)
L.append('--' + part_boundary + '--')
L.append('')
#files = {'file': {'filename': 'F.DAT', 'content': 'DATA HERE'}}
postData = body = CRLF.join(L)
request = urllib2.Request(urlstring)
for name, value in self.__defaultHeaders:
request.add_header(name, value)
# conn.ContentType = "multipart/form-data; boundary=" + boundary;
request.add_header('Content-type', 'multipart/form-data; boundary=%s' % part_boundary)
request.add_header('Content-length', len(postData))
request.add_data(postData)
resp = urllib2.urlopen(request)
retVal = resp.read()
except Exception as e:
print e
retVal = "ERROR"
return retVal
def InvokeAPIRequestPost(self, action, params, filepaths=None):
"""
@param action: Name of API action being called.
@param params: dict of key value pairs.
@param filepaths: List of filename(s) to upload. Do not pass of not required.
@return : API response, usually a JSON formatted string. Returns "ERROR" on exception.
"""
try:
params["mluser"] = self.__user;
params["mltoken"] = self.__token;
if (filepaths == None):
retval = self.__InvokePostURL("Remote", action, params, [])
else:
retval = self.__InvokePostURL("Remote", action, params, filepaths)
except Exception as e:
print e
retval = "ERROR"
return retval
def InvokeAPIRequest(self, action, params, filepaths=None):
"""
@param action: Name of API action being called.
@param params: dict of key value pairs.
@return : API response, usually a JSON formatted string. Returns "ERROR" on exception.
"""
try:
querystring = urllib.urlencode(params) + "&" + self.__authstring
retVal = self.__InvokeURL("Remote", action, querystring)
except Exception as e:
print e
retVal = "ERROR"
return retVal
def GetRemoteAuthToken(self, user, password, ipaddress):
"""
@param user: Username to create authentication token for
@param password: Password for supplied username
@param ipaddress: IP address of the user for whom you want to build an authentication token
@return: The authentication token in String form.
"""
try:
retVal = self.__InvokeURL("Auth", "RemoteLogin",
"mluser=" + user + "&mlpass=" + password + "&remoteIP=" + ipaddress);
except Exception as e:
print e
retVal = "ERROR"
return retVal
##DEFAULT CREDENTIALS
server = "http://server.maplarge.com/"
user = "[email protected]"
pw = "pw123456"
token = 123456789
#CREATE MAPLARGE CONNECTION WITH USER / PASSWORD
mlconnPassword = MapLargeConnector(server, user, pw)
#CREATE MAPLARGE CONNECTION WITH USER / AUTH TOKEN
mlconnToken = MapLargeConnector(server, user, token)
#CREATE TABLE SYNCHRONOUS (NO WEB CALL)
params = {"account": "aidsvualpha", "tablename": "testPythonSdkTable", "fileurl": "http://localhost/testfile.csv"}
mlconnToken.NO_WEB_CALLS = True
response = mlconnToken.InvokeAPIRequest("CreateTableSynchronous", params)
print response
mlconnPassword.NO_WEB_CALLS = False
#RETRIEVE REMOTE USER AUTH TOKEN
response = mlconnPassword.GetRemoteAuthToken(user, pw, "255.255.255.255")
print response
#List Groups
params = {"account": "aidsvualpha"}
response = mlconnPassword.InvokeAPIRequestPost("ListGroups", params)
print response
#CREATE TABLE WITH FILES SYNCHRONOUS
params = {"account": "aidsvualpha", "tablename": "testTable"}
fileList = ["c:\\temp\\usa.csv"]
print mlconnPassword.InvokeAPIRequestPost("CreateTableWithFilesSynchronous", params, fileList)
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A tool to kill any leftover test processes, executed by buildbot.
Only works on Windows."""
import optparse
import os
import re
import subprocess
import sys
import time
def Log(message):
print '%s: %s' % (time.asctime(), message)
def KillAll(process_names, must_die=True):
"""Tries to kill all copies of each process in the processes list."""
killed_processes = []
Log('killing all processes in %r' % process_names)
for process_name in process_names:
if ProcessExists(process_name):
Kill(process_name)
killed_processes.append(process_name)
Log('process killed: %r' % killed_processes)
# If we allow any processes to continue after trying to kill them, return
# now.
if not must_die:
return True
# Give our processes time to exit.
for _ in range(60):
if not AnyProcessExists(killed_processes):
break
time.sleep(1)
# We require that all processes we tried to kill must be killed. Let's
# verify that.
return not AnyProcessExists(killed_processes)
def ProcessExists(process_name):
"""Return whether process_name is found in tasklist output."""
# Use tasklist.exe to find if a given process_name is running.
Log('checking for process name %s' % process_name)
command = ('tasklist.exe /fi "imagename eq %s" | findstr.exe "K"' %
process_name)
# findstr.exe exits with code 0 if the given string is found.
return os.system(command) == 0
def ProcessExistsByPid(pid):
"""Return whether pid is found in tasklist output."""
Log('checking for process id %d' % pid)
# Use tasklist.exe to find if a given process_name is running.
command = ('tasklist.exe /fi "pid eq %d" | findstr.exe "K"' %
pid)
# findstr.exe exits with code 0 if the given string is found.
return os.system(command) == 0
def AnyProcessExists(process_list):
"""Return whether any process from the list is still running."""
return any(ProcessExists(process) for process in process_list)
def AnyProcessExistsByPid(pid_list):
"""Return whether any process from the list is still running."""
return any(ProcessExistsByPid(pid) for pid in pid_list)
def Kill(process_name):
command = ['taskkill.exe', '/f', '/t', '/im']
subprocess.call(command + [process_name])
def KillByPid(pid):
command = ['taskkill.exe', '/f', '/t', '/pid']
subprocess.call(command + [str(pid)])
def KillProcessesUsingCurrentDirectory(handle_exe):
if not os.path.exists(handle_exe):
return False
try:
Log('running %s to look for running processes' % handle_exe)
handle = subprocess.Popen([handle_exe,
os.path.join(os.getcwd(), 'src'),
'/accepteula'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except WindowsError, e: # pylint: disable=E0602
print e
return False
stdout, stderr = handle.communicate()
# Do a basic sanity check to make sure the tool is working fine.
if stderr or ('.exe' not in stdout and
'Non-existant Process' not in stdout and
'No matching handles found' not in stdout):
Log('Error running handle.exe: ' + repr((stdout, stderr)))
return False
pid_list = []
for line in stdout.splitlines(): # pylint: disable=E1103
# Killing explorer.exe would hose the bot, don't do that.
if 'explorer.exe' in line:
continue
if '.exe' in line:
match = re.match(r'.*pid: (\d+).*', line)
if match:
pid = int(match.group(1))
# Do not kill self.
if int(pid) == int(os.getpid()):
continue
Log('Killing: ' + line)
pid_list.append(pid)
KillByPid(pid)
# Give our processes time to exit.
for _ in range(60):
if not AnyProcessExistsByPid(pid_list):
break
time.sleep(1)
return True
# rdpclip.exe is part of Remote Desktop. It has a bug that sometimes causes
# it to keep the clipboard open forever, denying other processes access to it.
# Killing BuildConsole.exe usually stops an IB build within a few seconds.
# Unfortunately, killing devenv.com or devenv.exe doesn't stop a VS build, so
# we don't bother pretending.
processes = [
# Utilities we don't build, but which we use or otherwise can't
# have hanging around.
'BuildConsole.exe',
'httpd.exe',
'lighttpd.exe',
'outlook.exe',
'perl.exe',
'python_slave.exe',
'rdpclip.exe',
'svn.exe',
# These processes are spawned by some tests and should be killed by same.
# It may occur that they are left dangling if a test crashes, so we kill
# them here too.
'firefox.exe',
'iexplore.exe',
#'ieuser.exe',
'acrord32.exe',
# The JIT debugger may start when devenv.exe crashes.
'vsjitdebugger.exe',
# This process is also crashing once in a while during compile.
'midlc.exe',
# goma compiler proxy.
'compiler_proxy.exe',
# Things built by/for Chromium.
'app_list_unittests.exe',
'base_unittests.exe',
'browser_tests.exe',
'cacheinvalidation_unittests.exe',
'chrome.exe',
'chromedriver.exe',
'chromedriver_tests.exe',
'chromedriver_unittests.exe',
'chrome_launcher.exe',
'content_shell.exe',
'content_shell_crash_service.exe',
'content_unittests.exe',
'crypto_unittests.exe',
'debug_message.exe',
'device_unittests.exe',
'DumpRenderTree.exe',
'flush_cache.exe',
'gl_tests.exe',
'ie_unittests.exe',
'image_diff.exe',
'installer_util_unittests.exe',
'interactive_ui_tests.exe',
'ipc_tests.exe',
'jingle_unittests.exe',
'mediumtest_ie.exe',
'memory_test.exe',
'nacl64.exe',
'net_unittests.exe',
'page_cycler_tests.exe',
'peerconnection_server.exe',
'perf_tests.exe',
'printing_unittests.exe',
'sel_ldr.exe',
'sel_ldr64.exe',
'selenium_tests.exe',
'startup_tests.exe',
'sync_integration_tests.exe',
'tab_switching_test.exe',
'tld_cleanup.exe',
'unit_tests.exe',
'v8_shell.exe',
'v8_mksnapshot.exe',
'v8_shell_sample.exe',
'wow_helper.exe',
]
# Some processes may be present occasionally unrelated to the current build.
# For these, it's not an error if we attempt to kill them and they don't go
# away.
lingering_processes = [
# When VC crashes during compilation, this process which manages the .pdb
# file generation sometime hangs. However, Incredibuild will spawn
# mspdbsrv.exe, so don't trigger an error if it is still present after
# we attempt to kill it.
'mspdbsrv.exe',
]
def main():
handle_exe_default = os.path.join(os.getcwd(), '..', '..', '..',
'third_party', 'psutils', 'handle.exe')
parser = optparse.OptionParser(
usage='%prog [options]')
parser.add_option('--handle_exe', default=handle_exe_default,
help='The path to handle.exe. Defaults to %default.')
(options, args) = parser.parse_args()
if args:
parser.error('Unknown arguments passed in, %s' % args)
# Kill all lingering processes. It's okay if these aren't killed or end up
# reappearing.
KillAll(lingering_processes, must_die=False)
KillProcessesUsingCurrentDirectory(options.handle_exe)
# Kill all regular processes. We must guarantee that these are killed since
# we exit with an error code if they're not.
if KillAll(processes, must_die=True):
return 0
# Some processes were not killed, exit with non-zero status.
return 1
if '__main__' == __name__:
sys.exit(main())
|
|
from django.db import models
from django.urls import reverse
from taggit.managers import TaggableManager
from dcim.fields import ASNField
from dcim.models import CableTermination, PathEndpoint
from extras.models import ChangeLoggedModel, CustomFieldModel, ObjectChange, TaggedItem
from extras.utils import extras_features
from utilities.querysets import RestrictedQuerySet
from utilities.utils import serialize_object
from .choices import *
from .querysets import CircuitQuerySet
__all__ = (
'Circuit',
'CircuitTermination',
'CircuitType',
'Provider',
)
@extras_features('custom_fields', 'custom_links', 'export_templates', 'webhooks')
class Provider(ChangeLoggedModel, CustomFieldModel):
"""
Each Circuit belongs to a Provider. This is usually a telecommunications company or similar organization. This model
stores information pertinent to the user's relationship with the Provider.
"""
name = models.CharField(
max_length=100,
unique=True
)
slug = models.SlugField(
max_length=100,
unique=True
)
asn = ASNField(
blank=True,
null=True,
verbose_name='ASN',
help_text='32-bit autonomous system number'
)
account = models.CharField(
max_length=30,
blank=True,
verbose_name='Account number'
)
portal_url = models.URLField(
blank=True,
verbose_name='Portal URL'
)
noc_contact = models.TextField(
blank=True,
verbose_name='NOC contact'
)
admin_contact = models.TextField(
blank=True,
verbose_name='Admin contact'
)
comments = models.TextField(
blank=True
)
tags = TaggableManager(through=TaggedItem)
objects = RestrictedQuerySet.as_manager()
csv_headers = [
'name', 'slug', 'asn', 'account', 'portal_url', 'noc_contact', 'admin_contact', 'comments',
]
clone_fields = [
'asn', 'account', 'portal_url', 'noc_contact', 'admin_contact',
]
class Meta:
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('circuits:provider', args=[self.slug])
def to_csv(self):
return (
self.name,
self.slug,
self.asn,
self.account,
self.portal_url,
self.noc_contact,
self.admin_contact,
self.comments,
)
class CircuitType(ChangeLoggedModel):
"""
Circuits can be organized by their functional role. For example, a user might wish to define CircuitTypes named
"Long Haul," "Metro," or "Out-of-Band".
"""
name = models.CharField(
max_length=100,
unique=True
)
slug = models.SlugField(
max_length=100,
unique=True
)
description = models.CharField(
max_length=200,
blank=True,
)
objects = RestrictedQuerySet.as_manager()
csv_headers = ['name', 'slug', 'description']
class Meta:
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return "{}?type={}".format(reverse('circuits:circuit_list'), self.slug)
def to_csv(self):
return (
self.name,
self.slug,
self.description,
)
@extras_features('custom_fields', 'custom_links', 'export_templates', 'webhooks')
class Circuit(ChangeLoggedModel, CustomFieldModel):
"""
A communications circuit connects two points. Each Circuit belongs to a Provider; Providers may have multiple
circuits. Each circuit is also assigned a CircuitType and a Site. Circuit port speed and commit rate are measured
in Kbps.
"""
cid = models.CharField(
max_length=100,
verbose_name='Circuit ID'
)
provider = models.ForeignKey(
to='circuits.Provider',
on_delete=models.PROTECT,
related_name='circuits'
)
type = models.ForeignKey(
to='CircuitType',
on_delete=models.PROTECT,
related_name='circuits'
)
status = models.CharField(
max_length=50,
choices=CircuitStatusChoices,
default=CircuitStatusChoices.STATUS_ACTIVE
)
tenant = models.ForeignKey(
to='tenancy.Tenant',
on_delete=models.PROTECT,
related_name='circuits',
blank=True,
null=True
)
install_date = models.DateField(
blank=True,
null=True,
verbose_name='Date installed'
)
commit_rate = models.PositiveIntegerField(
blank=True,
null=True,
verbose_name='Commit rate (Kbps)')
description = models.CharField(
max_length=200,
blank=True
)
comments = models.TextField(
blank=True
)
objects = CircuitQuerySet.as_manager()
tags = TaggableManager(through=TaggedItem)
csv_headers = [
'cid', 'provider', 'type', 'status', 'tenant', 'install_date', 'commit_rate', 'description', 'comments',
]
clone_fields = [
'provider', 'type', 'status', 'tenant', 'install_date', 'commit_rate', 'description',
]
class Meta:
ordering = ['provider', 'cid']
unique_together = ['provider', 'cid']
def __str__(self):
return self.cid
def get_absolute_url(self):
return reverse('circuits:circuit', args=[self.pk])
def to_csv(self):
return (
self.cid,
self.provider.name,
self.type.name,
self.get_status_display(),
self.tenant.name if self.tenant else None,
self.install_date,
self.commit_rate,
self.description,
self.comments,
)
def get_status_class(self):
return CircuitStatusChoices.CSS_CLASSES.get(self.status)
def _get_termination(self, side):
for ct in self.terminations.all():
if ct.term_side == side:
return ct
return None
@property
def termination_a(self):
return self._get_termination('A')
@property
def termination_z(self):
return self._get_termination('Z')
class CircuitTermination(PathEndpoint, CableTermination):
circuit = models.ForeignKey(
to='circuits.Circuit',
on_delete=models.CASCADE,
related_name='terminations'
)
term_side = models.CharField(
max_length=1,
choices=CircuitTerminationSideChoices,
verbose_name='Termination'
)
site = models.ForeignKey(
to='dcim.Site',
on_delete=models.PROTECT,
related_name='circuit_terminations'
)
port_speed = models.PositiveIntegerField(
verbose_name='Port speed (Kbps)',
blank=True,
null=True
)
upstream_speed = models.PositiveIntegerField(
blank=True,
null=True,
verbose_name='Upstream speed (Kbps)',
help_text='Upstream speed, if different from port speed'
)
xconnect_id = models.CharField(
max_length=50,
blank=True,
verbose_name='Cross-connect ID'
)
pp_info = models.CharField(
max_length=100,
blank=True,
verbose_name='Patch panel/port(s)'
)
description = models.CharField(
max_length=200,
blank=True
)
objects = RestrictedQuerySet.as_manager()
class Meta:
ordering = ['circuit', 'term_side']
unique_together = ['circuit', 'term_side']
def __str__(self):
return 'Side {}'.format(self.get_term_side_display())
def to_objectchange(self, action):
# Annotate the parent Circuit
try:
related_object = self.circuit
except Circuit.DoesNotExist:
# Parent circuit has been deleted
related_object = None
return ObjectChange(
changed_object=self,
object_repr=str(self),
action=action,
related_object=related_object,
object_data=serialize_object(self)
)
@property
def parent(self):
return self.circuit
def get_peer_termination(self):
peer_side = 'Z' if self.term_side == 'A' else 'A'
try:
return CircuitTermination.objects.prefetch_related('site').get(
circuit=self.circuit,
term_side=peer_side
)
except CircuitTermination.DoesNotExist:
return None
|
|
#!/usr/local/sci/bin/python
# PYTHON2.7
#
# Author: Kate Willett
# Created: 23 April 2016
# Last update: 23 April 2016
# Location: /data/local/hadkw/HADCRUH2/MARINE/EUSTACEMDS/EUSTACE_SST_MAT/
# GitHub: https://github.com/Kate-Willett/HadISDH_Marine_Build/
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# This code reads in the ICOADS data output from QC using MDS_basic_KATE and
# pulls out the numbers of obs and numbers of obs passing qc and no pbs in each
# platform type
#
# creates a list for later plotting
#
# makes up a plot for each year too - maybe
#
# -----------------------
# LIST OF MODULES
# -----------------------
# inbuilt:
# import datetime as dt
# import matplotlib.pyplot as plt
# import numpy as np
# from matplotlib.dates import date2num,num2date
# import sys, os
# import sys, getopt
# from scipy.optimize import curve_fit,fsolve,leastsq
# from scipy import pi,sqrt,exp
# from scipy.special import erf
# import scipy.stats
# from math import sqrt,pi
# import struct
# import pdb # pdb.set_trace() or c
#
# Kates:
# from LinearTrends import MedianPairwise - fits linear trend using Median Pairwise
# import MDS_basic_KATE as MDStool
#
# -----------------------
# DATA
# -----------------------
# /project/hadobs2/hadisdh/marine/ICOADS.2.5.1/EARclimNBC/new_suite_197312_ERAclimNBC.txt
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# set up date cluster choices
# year1, year2, month1, month2
#
# python2.7 MakeObsCountList_APR2016.py --year1 '1973' --year2 '1973' --month1 '01' --month2 '01' --typee 'ERAclimNBC'
#
# This runs the code, outputs the plots and stops mid-process so you can then interact with the
# data.
#
# -----------------------
# OUTPUT
# -----------------------
# some plots:
# /data/local/hadkw/HADCRUH2/MARINE/IMAGES/PTTypeMetaDataDiags_all_ERAclimNBC_y1y2m1m2_APR2016.png
#
# a text file of stats
# /data/local/hadkw/HADCRUH2/MARINE/LISTS/PTTypeMetaDataStats_all_ERAclimNBC_APR2016.txt
#
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 1 (23 April 2016)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
#
#************************************************************************
# START
#************************************************************************
#import datetime as dt
import matplotlib
# use the Agg environment to generate an image rather than outputting to screen
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
#from matplotlib.dates import date2num,num2date
import sys, os
import sys, getopt
#from scipy.optimize import curve_fit,fsolve,leastsq
#from scipy import pi,sqrt,exp
#from scipy.special import erf
#import scipy.stats
#from math import sqrt,pi
#import struct
import pdb # pdb.set_trace() or c
#from LinearTrends import MedianPairwise
import MDS_RWtools as mrw
#************************************************************************
# Main
#************************************************************************
def main(argv):
# INPUT PARAMETERS AS STRINGS!!!!
year1 = '2000'
year2 = '2000'
month1 = '01' # months must be 01, 02 etc
month2 = '12'
typee = 'ERAclimNBC'
try:
opts, args = getopt.getopt(argv, "hi:",
["year1=","year2=","month1=","month2=","typee="])
except getopt.GetoptError:
print 'Usage (as strings) MakeObsCountList_APR2016.py --year1 <1973> --year2 <1973> '+\
'--month1 <01> --month2 <12>'
sys.exit(2)
for opt, arg in opts:
if opt == "--year1":
try:
year1 = arg
except:
sys.exit("Failed: year1 not an integer")
elif opt == "--year2":
try:
year2 = arg
except:
sys.exit("Failed: year2 not an integer")
elif opt == "--month1":
try:
month1 = arg
except:
sys.exit("Failed: month1 not an integer")
elif opt == "--month2":
try:
month2 = arg
except:
sys.exit("Failed: month2 not an integer")
elif opt == "--typee":
try:
typee = arg
except:
sys.exit("Failed: typee not a string")
assert year1 != -999 and year2 != -999, "Year not specified."
print(year1, year2, month1, month2, typee)
# pdb.set_trace()
#INDIR = '/project/hadobs2/hadisdh/marine/ICOADS.2.5.1/ERAclimNBC/'
#INFIL = 'new_suite_'
#INEXT = '_'+typee+'.txt'
# OUTDIR = '/data/local/hadkw/HADCRUH2/MARINE/'
OUTDIR = ''
OutTypeFil = 'IMAGES/PTTypeMetaDataDiags_'+typee+'_'+year1+year2+month1+month2+'_APR2016'
OutTypeText = 'LISTS/PTTypeMetaDataStats_'+typee+'_APR2016.txt'
OutTypeTextD = 'LISTS/PTTypeMetaDataStatsDAY_'+typee+'_APR2016.txt'
OutTypeTextN = 'LISTS/PTTypeMetaDataStatsNIGHT_'+typee+'_APR2016.txt'
OutTypeGText = 'LISTS/PTTypeGOODMetaDataStats_'+typee+'_APR2016.txt'
OutTypeGTextD = 'LISTS/PTTypeGOODMetaDataStatsDAY_'+typee+'_APR2016.txt'
OutTypeGTextN = 'LISTS/PTTypeGOODMetaDataStatsNIGHT_'+typee+'_APR2016.txt'
OutQCFil = 'IMAGES/QCMetaDataDiags_'+typee+'_'+year1+year2+month1+month2+'_APR2016'
OutQCText = 'LISTS/QCMetaDataStats_'+typee+'_APR2016.txt'
OutQCTextD = 'LISTS/QCMetaDataStatsDAY_'+typee+'_APR2016.txt'
OutQCTextN = 'LISTS/QCMetaDataStatsNIGHT_'+typee+'_APR2016.txt'
# create empty arrays for data bundles
nobs=0 # we're looking at all obs, not just those with 'good' data
nobsQC=0 # we're looking at all obs, not just those with 'good' data
LATbun = []
PTbun = []
QCday = []
QCtrk = []
QCdate1= []
QCdate2= []
QCpos = []
QCblklst = []
QCdup = []
QCATbud = []
QCATclim = []
QCATround = []
QCATrep = []
QCDPTbud = []
QCDPTclim = []
QCDPTssat = []
QCDPTround = []
QCDPTrep = []
QCDPTrepsat = []
# loop through each month, read in data, keep metadata needed
for yy in range((int(year2)+1)-int(year1)):
for mm in range((int(month2)+1)-int(month1)):
print(str(yy+int(year1)),' ','{:02}'.format(mm+int(month1)))
MDSdict=mrw.ReadMDSstandard(str(yy+int(year1)),'{:02}'.format(mm+int(month1)), typee)
if (nobs == 0):
LATbun = MDSdict['LAT']
PTbun = MDSdict['PT']
QCday = MDSdict['day']
QCtrk = MDSdict['trk']
QCdate1= MDSdict['date1']
QCdate2= MDSdict['date2']
QCpos = MDSdict['pos']
QCblklst = MDSdict['blklst']
QCdup = MDSdict['dup']
QCATbud = MDSdict['ATbud']
QCATclim = MDSdict['ATclim']
QCATround = MDSdict['ATround']
QCATrep = MDSdict['ATrep']
QCDPTbud = MDSdict['DPTbud']
QCDPTclim = MDSdict['DPTclim']
QCDPTssat = MDSdict['DPTssat']
QCDPTround = MDSdict['DPTround']
QCDPTrep = MDSdict['DPTrep']
QCDPTrepsat = MDSdict['DPTrepsat']
else:
LATbun = np.append(LATbun,MDSdict['LAT'])
PTbun = np.append(PTbun,MDSdict['PT'])
QCday = np.append(QCday,MDSdict['day'])
QCtrk = np.append(QCtrk,MDSdict['trk'])
QCdate1= np.append(QCdate1,MDSdict['date1'])
QCdate2= np.append(QCdate2,MDSdict['date2'])
QCpos = np.append(QCpos,MDSdict['pos'])
QCblklst = np.append(QCblklst,MDSdict['blklst'])
QCdup = np.append(QCdup,MDSdict['dup'])
QCATbud = np.append(QCATbud,MDSdict['ATbud'])
QCATclim = np.append(QCATclim,MDSdict['ATclim'])
QCATround = np.append(QCATround,MDSdict['ATround'])
QCATrep = np.append(QCATrep,MDSdict['ATrep'])
QCDPTbud = np.append(QCDPTbud,MDSdict['DPTbud'])
QCDPTclim = np.append(QCDPTclim,MDSdict['DPTclim'])
QCDPTssat = np.append(QCDPTssat,MDSdict['DPTssat'])
QCDPTround = np.append(QCDPTround,MDSdict['DPTround'])
QCDPTrep = np.append(QCDPTrep,MDSdict['DPTrep'])
QCDPTrepsat = np.append(QCDPTrepsat,MDSdict['DPTrepsat'])
nobs = nobs + len(MDSdict['LAT'])
MDSdict = 0 # clear out
# Set up day and night pointers
DayPts = np.where(QCday == 1)[0]
Dnobs = len(DayPts)
NightPts = np.where(QCday == 0)[0]
Nnobs = len(NightPts)
# Get good pointers
gotGOODs = np.where((QCtrk == 0) & (QCATbud == 0) & (QCATclim == 0) & (QCATrep == 0) & (QCDPTbud == 0) & (QCDPTclim == 0) & (QCDPTrep == 0) & (QCDPTssat == 0) & (QCDPTrepsat == 0))[0]
ngoods=len(gotGOODs)
DgotGOODs = np.where((QCtrk[DayPts] == 0) & (QCATbud[DayPts] == 0) & (QCATclim[DayPts] == 0) & (QCATrep[DayPts] == 0) & (QCDPTbud[DayPts] == 0) & (QCDPTclim[DayPts] == 0) & (QCDPTrep[DayPts] == 0) & (QCDPTssat[DayPts] == 0) & (QCDPTrepsat[DayPts] == 0))[0]
Dngoods = len(DgotGOODs)
NgotGOODs = np.where((QCtrk[NightPts] == 0) & (QCATbud[NightPts] == 0) & (QCATclim[NightPts] == 0) & (QCATrep[NightPts] == 0) & (QCDPTbud[NightPts] == 0) & (QCDPTclim[NightPts] == 0) & (QCDPTrep[NightPts] == 0) & (QCDPTssat[NightPts] == 0) & (QCDPTrepsat[NightPts] == 0))[0]
Nngoods=len(NgotGOODs)
# Just goods
gotPTs = np.where(PTbun[gotGOODs] <= 15)[0]
got0s = np.where(PTbun[gotGOODs] == 0)[0]
got1s = np.where(PTbun[gotGOODs] == 1)[0]
got2s = np.where(PTbun[gotGOODs] == 2)[0]
got3s = np.where(PTbun[gotGOODs] == 3)[0]
got4s = np.where(PTbun[gotGOODs] == 4)[0]
got5s = np.where(PTbun[gotGOODs] == 5)[0]
got6s = np.where(PTbun[gotGOODs] == 6)[0]
got8s = np.where(PTbun[gotGOODs] == 8)[0]
got9s = np.where(PTbun[gotGOODs] == 9)[0]
got10s = np.where(PTbun[gotGOODs] == 10)[0]
got15s = np.where(PTbun[gotGOODs] == 15)[0]
got0spct = 0.
got1spct = 0.
got2spct = 0.
got3spct = 0.
got4spct = 0.
got5spct = 0.
got6spct = 0.
got8spct = 0.
got9spct = 0.
got10spct = 0.
got15spct = 0.
gotspct = 0.
if (nobs > 0):
gotspct = (len(gotPTs)/float(ngoods))*100
if (len(got0s) > 0):
got0spct = (len(got0s)/float(len(gotPTs)))*100
if (len(got1s) > 0):
got1spct = (len(got1s)/float(len(gotPTs)))*100
if (len(got2s) > 0):
got2spct = (len(got2s)/float(len(gotPTs)))*100
if (len(got3s) > 0):
got3spct = (len(got3s)/float(len(gotPTs)))*100
if (len(got4s) > 0):
got4spct = (len(got4s)/float(len(gotPTs)))*100
if (len(got5s) > 0):
got5spct = (len(got5s)/float(len(gotPTs)))*100
print(len(got5s))
if (len(got6s) > 0):
got6spct = (len(got6s)/float(len(gotPTs)))*100
if (len(got8s) > 0):
got8spct = (len(got8s)/float(len(gotPTs)))*100
if (len(got9s) > 0):
got9spct = (len(got9s)/float(len(gotPTs)))*100
if (len(got10s) > 0):
got10spct = (len(got10s)/float(len(gotPTs)))*100
if (len(got15s) > 0):
got15spct = (len(got15s)/float(len(gotPTs)))*100
# DAY
DgotPTs = np.where(PTbun[DgotGOODs] <= 15)[0]
Dgot0s = np.where(PTbun[DgotGOODs] == 0)[0]
Dgot1s = np.where(PTbun[DgotGOODs] == 1)[0]
Dgot2s = np.where(PTbun[DgotGOODs] == 2)[0]
Dgot3s = np.where(PTbun[DgotGOODs] == 3)[0]
Dgot4s = np.where(PTbun[DgotGOODs] == 4)[0]
Dgot5s = np.where(PTbun[DgotGOODs] == 5)[0]
Dgot6s = np.where(PTbun[DgotGOODs] == 6)[0]
Dgot8s = np.where(PTbun[DgotGOODs] == 8)[0]
Dgot9s = np.where(PTbun[DgotGOODs] == 9)[0]
Dgot10s = np.where(PTbun[DgotGOODs] == 10)[0]
Dgot15s = np.where(PTbun[DgotGOODs] == 15)[0]
Dgot0spct = 0.
Dgot1spct = 0.
Dgot2spct = 0.
Dgot3spct = 0.
Dgot4spct = 0.
Dgot5spct = 0.
Dgot6spct = 0.
Dgot8spct = 0.
Dgot9spct = 0.
Dgot10spct = 0.
Dgot15spct = 0.
Dgotspct = 0.
if (nobs > 0):
Dgotspct = (len(DgotPTs)/float(Dngoods))*100
if (len(Dgot0s) > 0):
Dgot0spct = (len(Dgot0s)/float(len(DgotPTs)))*100
if (len(Dgot1s) > 0):
Dgot1spct = (len(Dgot1s)/float(len(DgotPTs)))*100
if (len(Dgot2s) > 0):
Dgot2spct = (len(Dgot2s)/float(len(DgotPTs)))*100
if (len(Dgot3s) > 0):
Dgot3spct = (len(Dgot3s)/float(len(DgotPTs)))*100
if (len(Dgot4s) > 0):
Dgot4spct = (len(Dgot4s)/float(len(DgotPTs)))*100
if (len(Dgot5s) > 0):
Dgot5spct = (len(Dgot5s)/float(len(DgotPTs)))*100
if (len(Dgot6s) > 0):
Dgot6spct = (len(Dgot6s)/float(len(DgotPTs)))*100
if (len(Dgot8s) > 0):
Dgot8spct = (len(Dgot8s)/float(len(DgotPTs)))*100
if (len(Dgot9s) > 0):
Dgot9spct = (len(Dgot9s)/float(len(DgotPTs)))*100
if (len(Dgot10s) > 0):
Dgot10spct = (len(Dgot10s)/float(len(DgotPTs)))*100
if (len(Dgot15s) > 0):
Dgot15spct = (len(Dgot15s)/float(len(DgotPTs)))*100
NgotPTs = np.where(PTbun[NgotGOODs] <= 15)[0]
Ngot0s = np.where(PTbun[NgotGOODs] == 0)[0]
Ngot1s = np.where(PTbun[NgotGOODs] == 1)[0]
Ngot2s = np.where(PTbun[NgotGOODs] == 2)[0]
Ngot3s = np.where(PTbun[NgotGOODs] == 3)[0]
Ngot4s = np.where(PTbun[NgotGOODs] == 4)[0]
Ngot5s = np.where(PTbun[NgotGOODs] == 5)[0]
Ngot6s = np.where(PTbun[NgotGOODs] == 6)[0]
Ngot8s = np.where(PTbun[NgotGOODs] == 8)[0]
Ngot9s = np.where(PTbun[NgotGOODs] == 9)[0]
Ngot10s = np.where(PTbun[NgotGOODs] == 10)[0]
Ngot15s = np.where(PTbun[NgotGOODs] == 15)[0]
Ngot0spct = 0.
Ngot1spct = 0.
Ngot2spct = 0.
Ngot3spct = 0.
Ngot4spct = 0.
Ngot5spct = 0.
Ngot6spct = 0.
Ngot8spct = 0.
Ngot9spct = 0.
Ngot10spct = 0.
Ngot15spct = 0.
Ngotspct = 0.
if (nobs > 0):
Ngotspct = (len(NgotPTs)/float(Nngoods))*100
if (len(Ngot0s) > 0):
Ngot0spct = (len(Ngot0s)/float(len(NgotPTs)))*100
if (len(Ngot1s) > 0):
Ngot1spct = (len(Ngot1s)/float(len(NgotPTs)))*100
if (len(Ngot2s) > 0):
Ngot2spct = (len(Ngot2s)/float(len(NgotPTs)))*100
if (len(Ngot3s) > 0):
Ngot3spct = (len(Ngot3s)/float(len(NgotPTs)))*100
if (len(Ngot4s) > 0):
Ngot4spct = (len(Ngot4s)/float(len(NgotPTs)))*100
if (len(Ngot5s) > 0):
Ngot5spct = (len(Ngot5s)/float(len(NgotPTs)))*100
if (len(Ngot6s) > 0):
Ngot6spct = (len(Ngot6s)/float(len(NgotPTs)))*100
if (len(Ngot8s) > 0):
Ngot8spct = (len(Ngot8s)/float(len(NgotPTs)))*100
if (len(Ngot9s) > 0):
Ngot9spct = (len(Ngot9s)/float(len(NgotPTs)))*100
if (len(Ngot10s) > 0):
Ngot10spct = (len(Ngot10s)/float(len(NgotPTs)))*100
if (len(Ngot15s) > 0):
Ngot15spct = (len(Ngot15s)/float(len(NgotPTs)))*100
# Write out stats to file (append!)
filee=open(OUTDIR+OutTypeGText,'a+')
filee.write(str(year1+' '+year2+' '+month1+' '+month2+' NOBS: '+'{:8d}'.format(ngoods)+\
' PT: '+'{:8d}'.format(len(gotPTs))+' ('+"{:5.2f}".format(gotspct)+\
'%) 0: '+'{:8d}'.format(len(got0s))+' ('+"{:5.2f}".format(got0spct)+\
'%) 1: '+'{:8d}'.format(len(got1s))+' ('+"{:5.2f}".format(got1spct)+\
'%) 2: '+'{:8d}'.format(len(got2s))+' ('+"{:5.2f}".format(got2spct)+\
'%) 3: '+'{:8d}'.format(len(got3s))+' ('+"{:5.2f}".format(got3spct)+\
'%) 4: '+'{:8d}'.format(len(got4s))+' ('+"{:5.2f}".format(got4spct)+\
'%) 5: '+'{:8d}'.format(len(got5s))+' ('+"{:5.2f}".format(got5spct)+\
'%) 6: '+'{:8d}'.format(len(got6s))+' ('+"{:5.2f}".format(got6spct)+\
'%) 8: '+'{:8d}'.format(len(got8s))+' ('+"{:5.2f}".format(got8spct)+\
'%) 9: '+'{:8d}'.format(len(got9s))+' ('+"{:5.2f}".format(got9spct)+\
'%) 10: '+'{:8d}'.format(len(got10s))+' ('+"{:5.2f}".format(got10spct)+\
'%) 15: '+'{:8d}'.format(len(got15s))+' ('+"{:5.2f}".format(got15spct)+\
'%)\n'))
filee.close()
filee=open(OUTDIR+OutTypeGTextD,'a+')
filee.write(str(year1+' '+year2+' '+month1+' '+month2+' NOBS: '+'{:8d}'.format(Dngoods)+\
' PT: '+'{:8d}'.format(len(DgotPTs))+' ('+"{:5.2f}".format(Dgotspct)+\
'%) 0: '+'{:8d}'.format(len(Dgot0s))+' ('+"{:5.2f}".format(Dgot0spct)+\
'%) 1: '+'{:8d}'.format(len(Dgot1s))+' ('+"{:5.2f}".format(Dgot1spct)+\
'%) 2: '+'{:8d}'.format(len(Dgot2s))+' ('+"{:5.2f}".format(Dgot2spct)+\
'%) 3: '+'{:8d}'.format(len(Dgot3s))+' ('+"{:5.2f}".format(Dgot3spct)+\
'%) 4: '+'{:8d}'.format(len(Dgot4s))+' ('+"{:5.2f}".format(Dgot4spct)+\
'%) 5: '+'{:8d}'.format(len(Dgot5s))+' ('+"{:5.2f}".format(Dgot5spct)+\
'%) 6: '+'{:8d}'.format(len(Dgot6s))+' ('+"{:5.2f}".format(Dgot6spct)+\
'%) 8: '+'{:8d}'.format(len(Dgot8s))+' ('+"{:5.2f}".format(Dgot8spct)+\
'%) 9: '+'{:8d}'.format(len(Dgot9s))+' ('+"{:5.2f}".format(Dgot9spct)+\
'%) 10: '+'{:8d}'.format(len(Dgot10s))+' ('+"{:5.2f}".format(Dgot10spct)+\
'%) 15: '+'{:8d}'.format(len(Dgot15s))+' ('+"{:5.2f}".format(Dgot15spct)+\
'%)\n'))
filee.close()
filee=open(OUTDIR+OutTypeGTextN,'a+')
filee.write(str(year1+' '+year2+' '+month1+' '+month2+' NOBS: '+'{:8d}'.format(Nngoods)+\
' PT: '+'{:8d}'.format(len(NgotPTs))+' ('+"{:5.2f}".format(Ngotspct)+\
'%) 0: '+'{:8d}'.format(len(Ngot0s))+' ('+"{:5.2f}".format(Ngot0spct)+\
'%) 1: '+'{:8d}'.format(len(Ngot1s))+' ('+"{:5.2f}".format(Ngot1spct)+\
'%) 2: '+'{:8d}'.format(len(Ngot2s))+' ('+"{:5.2f}".format(Ngot2spct)+\
'%) 3: '+'{:8d}'.format(len(Ngot3s))+' ('+"{:5.2f}".format(Ngot3spct)+\
'%) 4: '+'{:8d}'.format(len(Ngot4s))+' ('+"{:5.2f}".format(Ngot4spct)+\
'%) 5: '+'{:8d}'.format(len(Ngot5s))+' ('+"{:5.2f}".format(Ngot5spct)+\
'%) 6: '+'{:8d}'.format(len(Ngot6s))+' ('+"{:5.2f}".format(Ngot6spct)+\
'%) 8: '+'{:8d}'.format(len(Ngot8s))+' ('+"{:5.2f}".format(Ngot8spct)+\
'%) 9: '+'{:8d}'.format(len(Ngot9s))+' ('+"{:5.2f}".format(Ngot9spct)+\
'%) 10: '+'{:8d}'.format(len(Ngot10s))+' ('+"{:5.2f}".format(Ngot10spct)+\
'%) 15: '+'{:8d}'.format(len(Ngot15s))+' ('+"{:5.2f}".format(Ngot15spct)+\
'%)\n'))
filee.close()
# set up generall plotting stuff
# set up dimensions and plot - this is a 2 by 2 plot
# - plots, TOH by latitude where 1 = hygristor, 2 = chilled mirror, 3 = other, C = capacitance, E = electric, H = hair hygrometer, P = psychrometer, T = torsion
# - prints, number and % of obs with TOH present, and in the categories
plt.clf()
fig=plt.figure(figsize=(6,8))
ax=plt.axes([0.1,0.1,0.85,0.7])
plt.xlim(-1,11)
plt.ylim(-91,91)
plt.xlabel('Platform Type Category')
plt.ylabel('Latitude')
locs = ax.get_xticks().tolist()
locs = np.arange(-1,12,1.)
ax.set_xticks(locs)
labels=[x.get_text() for x in ax.get_xticklabels()]
labels[1] = '0'
labels[2] = '1'
labels[3] = '2'
labels[4] = '3'
labels[5] = '4'
labels[6] = '5'
labels[7] = '6'
labels[8] = '8'
labels[9] = '9'
labels[10] = '10'
labels[11] = '15'
ax.set_xticklabels(labels)
gotPTs = np.where(PTbun <= 15)[0]
got0s = np.where(PTbun == 0)[0]
got1s = np.where(PTbun == 1)[0]
got2s = np.where(PTbun == 2)[0]
got3s = np.where(PTbun == 3)[0]
got4s = np.where(PTbun == 4)[0]
got5s = np.where(PTbun == 5)[0]
got6s = np.where(PTbun == 6)[0]
got8s = np.where(PTbun == 8)[0]
got9s = np.where(PTbun == 9)[0]
got10s = np.where(PTbun == 10)[0]
got15s = np.where(PTbun == 15)[0]
got0spct = 0.
got1spct = 0.
got2spct = 0.
got3spct = 0.
got4spct = 0.
got5spct = 0.
got6spct = 0.
got8spct = 0.
got9spct = 0.
got10spct = 0.
got15spct = 0.
gotspct = 0.
if (nobs > 0):
gotspct = (len(gotPTs)/float(nobs))*100
if (len(got0s) > 0):
got0spct = (len(got0s)/float(len(gotPTs)))*100
plt.scatter(np.repeat(0,len(got0s)),LATbun[got0s],c='hotpink',marker='o',linewidth=0.,s=12)
if (len(got1s) > 0):
got1spct = (len(got1s)/float(len(gotPTs)))*100
plt.scatter(np.repeat(1,len(got1s)),LATbun[got1s],c='deeppink',marker='o',linewidth=0.,s=12)
if (len(got2s) > 0):
got2spct = (len(got2s)/float(len(gotPTs)))*100
plt.scatter(np.repeat(2,len(got2s)),LATbun[got2s],c='red',marker='o',linewidth=0.,s=12)
if (len(got3s) > 0):
got3spct = (len(got3s)/float(len(gotPTs)))*100
plt.scatter(np.repeat(3,len(got3s)),LATbun[got3s],c='darkorange',marker='o',linewidth=0.,s=12)
if (len(got4s) > 0):
got4spct = (len(got4s)/float(len(gotPTs)))*100
plt.scatter(np.repeat(4,len(got4s)),LATbun[got4s],c='gold',marker='o',linewidth=0.,s=12)
if (len(got5s) > 0):
got5spct = (len(got5s)/float(len(gotPTs)))*100
print(len(got5s))
plt.scatter(np.repeat(5,len(got5s)),LATbun[got5s],c='grey',marker='o',linewidth=0.,s=12)
if (len(got6s) > 0):
got6spct = (len(got6s)/float(len(gotPTs)))*100
plt.scatter(np.repeat(6,len(got6s)),LATbun[got6s],c='limegreen',marker='o',linewidth=0.,s=12)
if (len(got8s) > 0):
got8spct = (len(got8s)/float(len(gotPTs)))*100
plt.scatter(np.repeat(7,len(got8s)),LATbun[got8s],c='olivedrab',marker='o',linewidth=0.,s=12)
if (len(got9s) > 0):
got9spct = (len(got9s)/float(len(gotPTs)))*100
plt.scatter(np.repeat(8,len(got9s)),LATbun[got9s],c='blue',marker='o',linewidth=0.,s=12)
if (len(got10s) > 0):
got10spct = (len(got10s)/float(len(gotPTs)))*100
plt.scatter(np.repeat(9,len(got10s)),LATbun[got10s],c='indigo',marker='o',linewidth=0.,s=12)
if (len(got15s) > 0):
got15spct = (len(got15s)/float(len(gotPTs)))*100
plt.scatter(np.repeat(10,len(got15s)),LATbun[got15s],c='violet',marker='o',linewidth=0.,s=12)
# DAY
DgotPTs = np.where(PTbun[DayPts] <= 15)[0]
Dgot0s = np.where(PTbun[DayPts] == 0)[0]
Dgot1s = np.where(PTbun[DayPts] == 1)[0]
Dgot2s = np.where(PTbun[DayPts] == 2)[0]
Dgot3s = np.where(PTbun[DayPts] == 3)[0]
Dgot4s = np.where(PTbun[DayPts] == 4)[0]
Dgot5s = np.where(PTbun[DayPts] == 5)[0]
Dgot6s = np.where(PTbun[DayPts] == 6)[0]
Dgot8s = np.where(PTbun[DayPts] == 8)[0]
Dgot9s = np.where(PTbun[DayPts] == 9)[0]
Dgot10s = np.where(PTbun[DayPts] == 10)[0]
Dgot15s = np.where(PTbun[DayPts] == 15)[0]
Dgot0spct = 0.
Dgot1spct = 0.
Dgot2spct = 0.
Dgot3spct = 0.
Dgot4spct = 0.
Dgot5spct = 0.
Dgot6spct = 0.
Dgot8spct = 0.
Dgot9spct = 0.
Dgot10spct = 0.
Dgot15spct = 0.
Dgotspct = 0.
if (nobs > 0):
Dgotspct = (len(DgotPTs)/float(Dnobs))*100
if (len(Dgot0s) > 0):
Dgot0spct = (len(Dgot0s)/float(len(DgotPTs)))*100
plt.scatter(np.repeat(-0.2,len(Dgot0s)),LATbun[DayPts[Dgot0s]],c='hotpink',marker='o',linewidth=0.,s=12)
if (len(Dgot1s) > 0):
Dgot1spct = (len(Dgot1s)/float(len(DgotPTs)))*100
plt.scatter(np.repeat(0.8,len(Dgot1s)),LATbun[DayPts[Dgot1s]],c='deeppink',marker='o',linewidth=0.,s=12)
if (len(Dgot2s) > 0):
Dgot2spct = (len(Dgot2s)/float(len(DgotPTs)))*100
plt.scatter(np.repeat(1.8,len(Dgot2s)),LATbun[DayPts[Dgot2s]],c='red',marker='o',linewidth=0.,s=12)
if (len(Dgot3s) > 0):
Dgot3spct = (len(Dgot3s)/float(len(DgotPTs)))*100
plt.scatter(np.repeat(2.8,len(Dgot3s)),LATbun[DayPts[Dgot3s]],c='darkorange',marker='o',linewidth=0.,s=12)
if (len(Dgot4s) > 0):
Dgot4spct = (len(Dgot4s)/float(len(DgotPTs)))*100
plt.scatter(np.repeat(3.8,len(Dgot4s)),LATbun[DayPts[Dgot4s]],c='gold',marker='o',linewidth=0.,s=12)
if (len(Dgot5s) > 0):
Dgot5spct = (len(Dgot5s)/float(len(DgotPTs)))*100
plt.scatter(np.repeat(4.8,len(Dgot5s)),LATbun[DayPts[Dgot5s]],c='grey',marker='o',linewidth=0.,s=12)
if (len(Dgot6s) > 0):
Dgot6spct = (len(Dgot6s)/float(len(DgotPTs)))*100
plt.scatter(np.repeat(5.8,len(Dgot6s)),LATbun[DayPts[Dgot6s]],c='limegreen',marker='o',linewidth=0.,s=12)
if (len(Dgot8s) > 0):
Dgot8spct = (len(Dgot8s)/float(len(DgotPTs)))*100
plt.scatter(np.repeat(6.8,len(Dgot8s)),LATbun[DayPts[Dgot8s]],c='olivedrab',marker='o',linewidth=0.,s=12)
if (len(Dgot9s) > 0):
Dgot9spct = (len(Dgot9s)/float(len(DgotPTs)))*100
plt.scatter(np.repeat(7.8,len(Dgot9s)),LATbun[DayPts[Dgot9s]],c='blue',marker='o',linewidth=0.,s=12)
if (len(Dgot10s) > 0):
Dgot10spct = (len(Dgot10s)/float(len(DgotPTs)))*100
plt.scatter(np.repeat(8.8,len(Dgot10s)),LATbun[DayPts[Dgot10s]],c='indigo',marker='o',linewidth=0.,s=12)
if (len(Dgot15s) > 0):
Dgot15spct = (len(Dgot15s)/float(len(DgotPTs)))*100
plt.scatter(np.repeat(9.8,len(Dgot15s)),LATbun[DayPts[Dgot15s]],c='violet',marker='o',linewidth=0.,s=12)
NgotPTs = np.where(PTbun[NightPts] <= 15)[0]
Ngot0s = np.where(PTbun[NightPts] == 0)[0]
Ngot1s = np.where(PTbun[NightPts] == 1)[0]
Ngot2s = np.where(PTbun[NightPts] == 2)[0]
Ngot3s = np.where(PTbun[NightPts] == 3)[0]
Ngot4s = np.where(PTbun[NightPts] == 4)[0]
Ngot5s = np.where(PTbun[NightPts] == 5)[0]
Ngot6s = np.where(PTbun[NightPts] == 6)[0]
Ngot8s = np.where(PTbun[NightPts] == 8)[0]
Ngot9s = np.where(PTbun[NightPts] == 9)[0]
Ngot10s = np.where(PTbun[NightPts] == 10)[0]
Ngot15s = np.where(PTbun[NightPts] == 15)[0]
Ngot0spct = 0.
Ngot1spct = 0.
Ngot2spct = 0.
Ngot3spct = 0.
Ngot4spct = 0.
Ngot5spct = 0.
Ngot6spct = 0.
Ngot8spct = 0.
Ngot9spct = 0.
Ngot10spct = 0.
Ngot15spct = 0.
Ngotspct = 0.
if (nobs > 0):
Ngotspct = (len(NgotPTs)/float(Nnobs))*100
if (len(Ngot0s) > 0):
Ngot0spct = (len(Ngot0s)/float(len(NgotPTs)))*100
plt.scatter(np.repeat(0.2,len(Ngot0s)),LATbun[NightPts[Ngot0s]],c='hotpink',marker='o',linewidth=0.,s=12)
if (len(Ngot1s) > 0):
Ngot1spct = (len(Ngot1s)/float(len(NgotPTs)))*100
plt.scatter(np.repeat(1.2,len(Ngot1s)),LATbun[NightPts[Ngot1s]],c='deeppink',marker='o',linewidth=0.,s=12)
if (len(Ngot2s) > 0):
Ngot2spct = (len(Ngot2s)/float(len(NgotPTs)))*100
plt.scatter(np.repeat(2.2,len(Ngot2s)),LATbun[NightPts[Ngot2s]],c='red',marker='o',linewidth=0.,s=12)
if (len(Ngot3s) > 0):
Ngot3spct = (len(Ngot3s)/float(len(NgotPTs)))*100
plt.scatter(np.repeat(3.2,len(Ngot3s)),LATbun[NightPts[Ngot3s]],c='darkorange',marker='o',linewidth=0.,s=12)
if (len(Ngot4s) > 0):
Ngot4spct = (len(Ngot4s)/float(len(NgotPTs)))*100
plt.scatter(np.repeat(4.2,len(Ngot4s)),LATbun[NightPts[Ngot4s]],c='gold',marker='o',linewidth=0.,s=12)
if (len(Ngot5s) > 0):
Ngot5spct = (len(Ngot5s)/float(len(NgotPTs)))*100
plt.scatter(np.repeat(5.2,len(Ngot5s)),LATbun[NightPts[Ngot5s]],c='grey',marker='o',linewidth=0.,s=12)
if (len(Ngot6s) > 0):
Ngot6spct = (len(Ngot6s)/float(len(NgotPTs)))*100
plt.scatter(np.repeat(6.2,len(Ngot6s)),LATbun[NightPts[Ngot6s]],c='limegreen',marker='o',linewidth=0.,s=12)
if (len(Ngot8s) > 0):
Ngot8spct = (len(Ngot8s)/float(len(NgotPTs)))*100
plt.scatter(np.repeat(7.2,len(Ngot8s)),LATbun[NightPts[Ngot8s]],c='olivedrab',marker='o',linewidth=0.,s=12)
if (len(Ngot9s) > 0):
Ngot9spct = (len(Ngot9s)/float(len(NgotPTs)))*100
plt.scatter(np.repeat(8.2,len(Ngot9s)),LATbun[NightPts[Ngot9s]],c='blue',marker='o',linewidth=0.,s=12)
if (len(Ngot10s) > 0):
Ngot10spct = (len(Ngot10s)/float(len(NgotPTs)))*100
plt.scatter(np.repeat(9.2,len(Ngot10s)),LATbun[NightPts[Ngot10s]],c='indigo',marker='o',linewidth=0.,s=12)
if (len(Ngot15s) > 0):
Ngot15spct = (len(Ngot15s)/float(len(NgotPTs)))*100
plt.scatter(np.repeat(10.2,len(Ngot15s)),LATbun[NightPts[Ngot15s]],c='violet',marker='o',linewidth=0.,s=12)
plt.annotate('PT: '+str(len(gotPTs))+' ('+"{:6.2f}".format(gotspct)+'%), '+str(len(DgotPTs))+' ('+"{:6.2f}".format(Dgotspct)+'%), '+str(len(NgotPTs))+' ('+"{:6.2f}".format(Ngotspct)+'%)',xy=(0.01,1.26),xycoords='axes fraction',size=7,color='black')
plt.annotate('0: '+str(len(got0s))+' ('+"{:6.2f}".format(got0spct)+'%), '+str(len(Dgot0s))+' ('+"{:6.2f}".format(Dgot0spct)+'%), '+str(len(Ngot0s))+' ('+"{:6.2f}".format(Ngot0spct)+'%)',xy=(0.01,1.22),xycoords='axes fraction',size=7,color='hotpink')
plt.annotate('1: '+str(len(got1s))+' ('+"{:6.2f}".format(got1spct)+'%), '+str(len(Dgot1s))+' ('+"{:6.2f}".format(Dgot1spct)+'%), '+str(len(Ngot1s))+' ('+"{:6.2f}".format(Ngot1spct)+'%)',xy=(0.01,1.18),xycoords='axes fraction',size=7,color='deeppink')
plt.annotate('2: '+str(len(got2s))+' ('+"{:6.2f}".format(got2spct)+'%), '+str(len(Dgot2s))+' ('+"{:6.2f}".format(Dgot2spct)+'%), '+str(len(Ngot2s))+' ('+"{:6.2f}".format(Ngot2spct)+'%)',xy=(0.01,1.14),xycoords='axes fraction',size=7,color='red')
plt.annotate('3: '+str(len(got3s))+' ('+"{:6.2f}".format(got3spct)+'%), '+str(len(Dgot3s))+' ('+"{:6.2f}".format(Dgot3spct)+'%), '+str(len(Ngot3s))+' ('+"{:6.2f}".format(Ngot3spct)+'%)',xy=(0.01,1.10),xycoords='axes fraction',size=7,color='darkorange')
plt.annotate('4: '+str(len(got4s))+' ('+"{:6.2f}".format(got4spct)+'%), '+str(len(Dgot4s))+' ('+"{:6.2f}".format(Dgot4spct)+'%), '+str(len(Ngot4s))+' ('+"{:6.2f}".format(Ngot4spct)+'%)',xy=(0.01,1.06),xycoords='axes fraction',size=7,color='gold')
plt.annotate('5: '+str(len(got5s))+' ('+"{:6.2f}".format(got5spct)+'%), '+str(len(Dgot5s))+' ('+"{:6.2f}".format(Dgot5spct)+'%), '+str(len(Ngot5s))+' ('+"{:6.2f}".format(Ngot5spct)+'%)',xy=(0.01,1.02),xycoords='axes fraction',size=7,color='grey')
plt.annotate('6: '+str(len(got6s))+' ('+"{:6.2f}".format(got6spct)+'%), '+str(len(Dgot6s))+' ('+"{:6.2f}".format(Dgot6spct)+'%), '+str(len(Ngot6s))+' ('+"{:6.2f}".format(Ngot6spct)+'%)',xy=(0.51,1.22),xycoords='axes fraction',size=7,color='limegreen')
plt.annotate('8: '+str(len(got8s))+' ('+"{:6.2f}".format(got8spct)+'%), '+str(len(Dgot8s))+' ('+"{:6.2f}".format(Dgot8spct)+'%), '+str(len(Ngot8s))+' ('+"{:6.2f}".format(Ngot8spct)+'%)',xy=(0.51,1.18),xycoords='axes fraction',size=7,color='olivedrab')
plt.annotate('9: '+str(len(got9s))+' ('+"{:6.2f}".format(got9spct)+'%), '+str(len(Dgot9s))+' ('+"{:6.2f}".format(Dgot9spct)+'%), '+str(len(Ngot9s))+' ('+"{:6.2f}".format(Ngot9spct)+'%)',xy=(0.51,1.14),xycoords='axes fraction',size=7,color='blue')
plt.annotate('10: '+str(len(got10s))+' ('+"{:6.2f}".format(got10spct)+'%), '+str(len(Dgot10s))+' ('+"{:6.2f}".format(Dgot10spct)+'%), '+str(len(Ngot10s))+' ('+"{:6.2f}".format(Ngot10spct)+'%)',xy=(0.51,1.10),xycoords='axes fraction',size=7,color='indigo')
plt.annotate('15: '+str(len(got15s))+' ('+"{:6.2f}".format(got15spct)+'%), '+str(len(Dgot15s))+' ('+"{:6.2f}".format(Dgot15spct)+'%), '+str(len(Ngot15s))+' ('+"{:6.2f}".format(Ngot15spct)+'%)',xy=(0.51,1.06),xycoords='axes fraction',size=7,color='violet')
#plt.tight_layout()
# plt.savefig(OUTDIR+OutTypeFil+".eps")
plt.savefig(OUTDIR+OutTypeFil+".png")
# Write out stats to file (append!)
filee=open(OUTDIR+OutTypeText,'a+')
filee.write(str(year1+' '+year2+' '+month1+' '+month2+' NOBS: '+'{:8d}'.format(nobs)+\
' PT: '+'{:8d}'.format(len(gotPTs))+' ('+"{:5.2f}".format(gotspct)+\
'%) 0: '+'{:8d}'.format(len(got0s))+' ('+"{:5.2f}".format(got0spct)+\
'%) 1: '+'{:8d}'.format(len(got1s))+' ('+"{:5.2f}".format(got1spct)+\
'%) 2: '+'{:8d}'.format(len(got2s))+' ('+"{:5.2f}".format(got2spct)+\
'%) 3: '+'{:8d}'.format(len(got3s))+' ('+"{:5.2f}".format(got3spct)+\
'%) 4: '+'{:8d}'.format(len(got4s))+' ('+"{:5.2f}".format(got4spct)+\
'%) 5: '+'{:8d}'.format(len(got5s))+' ('+"{:5.2f}".format(got5spct)+\
'%) 6: '+'{:8d}'.format(len(got6s))+' ('+"{:5.2f}".format(got6spct)+\
'%) 8: '+'{:8d}'.format(len(got8s))+' ('+"{:5.2f}".format(got8spct)+\
'%) 9: '+'{:8d}'.format(len(got9s))+' ('+"{:5.2f}".format(got9spct)+\
'%) 10: '+'{:8d}'.format(len(got10s))+' ('+"{:5.2f}".format(got10spct)+\
'%) 15: '+'{:8d}'.format(len(got15s))+' ('+"{:5.2f}".format(got15spct)+\
'%)\n'))
filee.close()
filee=open(OUTDIR+OutTypeTextD,'a+')
filee.write(str(year1+' '+year2+' '+month1+' '+month2+' NOBS: '+'{:8d}'.format(Dnobs)+\
' PT: '+'{:8d}'.format(len(DgotPTs))+' ('+"{:5.2f}".format(Dgotspct)+\
'%) 0: '+'{:8d}'.format(len(Dgot0s))+' ('+"{:5.2f}".format(Dgot0spct)+\
'%) 1: '+'{:8d}'.format(len(Dgot1s))+' ('+"{:5.2f}".format(Dgot1spct)+\
'%) 2: '+'{:8d}'.format(len(Dgot2s))+' ('+"{:5.2f}".format(Dgot2spct)+\
'%) 3: '+'{:8d}'.format(len(Dgot3s))+' ('+"{:5.2f}".format(Dgot3spct)+\
'%) 4: '+'{:8d}'.format(len(Dgot4s))+' ('+"{:5.2f}".format(Dgot4spct)+\
'%) 5: '+'{:8d}'.format(len(Dgot5s))+' ('+"{:5.2f}".format(Dgot5spct)+\
'%) 6: '+'{:8d}'.format(len(Dgot6s))+' ('+"{:5.2f}".format(Dgot6spct)+\
'%) 8: '+'{:8d}'.format(len(Dgot8s))+' ('+"{:5.2f}".format(Dgot8spct)+\
'%) 9: '+'{:8d}'.format(len(Dgot9s))+' ('+"{:5.2f}".format(Dgot9spct)+\
'%) 10: '+'{:8d}'.format(len(Dgot10s))+' ('+"{:5.2f}".format(Dgot10spct)+\
'%) 15: '+'{:8d}'.format(len(Dgot15s))+' ('+"{:5.2f}".format(Dgot15spct)+\
'%)\n'))
filee.close()
filee=open(OUTDIR+OutTypeTextN,'a+')
filee.write(str(year1+' '+year2+' '+month1+' '+month2+' NOBS: '+'{:8d}'.format(Nnobs)+\
' PT: '+'{:8d}'.format(len(NgotPTs))+' ('+"{:5.2f}".format(Ngotspct)+\
'%) 0: '+'{:8d}'.format(len(Ngot0s))+' ('+"{:5.2f}".format(Ngot0spct)+\
'%) 1: '+'{:8d}'.format(len(Ngot1s))+' ('+"{:5.2f}".format(Ngot1spct)+\
'%) 2: '+'{:8d}'.format(len(Ngot2s))+' ('+"{:5.2f}".format(Ngot2spct)+\
'%) 3: '+'{:8d}'.format(len(Ngot3s))+' ('+"{:5.2f}".format(Ngot3spct)+\
'%) 4: '+'{:8d}'.format(len(Ngot4s))+' ('+"{:5.2f}".format(Ngot4spct)+\
'%) 5: '+'{:8d}'.format(len(Ngot5s))+' ('+"{:5.2f}".format(Ngot5spct)+\
'%) 6: '+'{:8d}'.format(len(Ngot6s))+' ('+"{:5.2f}".format(Ngot6spct)+\
'%) 8: '+'{:8d}'.format(len(Ngot8s))+' ('+"{:5.2f}".format(Ngot8spct)+\
'%) 9: '+'{:8d}'.format(len(Ngot9s))+' ('+"{:5.2f}".format(Ngot9spct)+\
'%) 10: '+'{:8d}'.format(len(Ngot10s))+' ('+"{:5.2f}".format(Ngot10spct)+\
'%) 15: '+'{:8d}'.format(len(Ngot15s))+' ('+"{:5.2f}".format(Ngot15spct)+\
'%)\n'))
filee.close()
plt.clf()
fig=plt.figure(figsize=(6,8))
ax=plt.axes([0.1,0.1,0.85,0.7])
plt.xlim(-1,11)
plt.ylim(-91,91)
plt.xlabel('QC Category')
plt.ylabel('Latitude')
locs = ax.get_xticks().tolist()
# Ensure locs are -1 to 12, every integer
locs = np.arange(-1,12,1.)
ax.set_xticks(locs)
labels=[x.get_text() for x in ax.get_xticklabels()]
labels[1] = 'trk'
labels[2] = 'ATbud'
labels[3] = 'ATclim'
labels[4] = 'ATround'
labels[5] = 'ATrep'
labels[6] = 'DPTbud'
labels[7] = 'DPTclim'
labels[8] = 'DPTssat'
labels[9] = 'DPTround'
labels[10] = 'DPTrep'
labels[11] = 'DPTrepsat'
ax.set_xticklabels(labels,rotation='vertical')
gotBADs = np.where((QCtrk == 1) | (QCATbud == 1) | (QCATclim == 1) | (QCATrep == 1) | (QCDPTbud == 1) | (QCDPTclim == 1) | (QCDPTrep == 1) | (QCDPTssat == 1) | (QCDPTrepsat == 1))[0]
got0s = np.where(QCtrk == 1)[0]
got1s = np.where(QCATbud == 1)[0]
got2s = np.where(QCATclim == 1)[0]
got3s = np.where(QCATround == 1)[0]
got4s = np.where(QCATrep == 1)[0]
got5s = np.where(QCDPTbud == 1)[0]
got6s = np.where(QCDPTclim == 1)[0]
got7s = np.where(QCDPTssat == 1)[0]
got8s = np.where(QCDPTround == 1)[0]
got9s = np.where(QCDPTrep == 1)[0]
got10s = np.where(QCDPTrepsat == 1)[0]
got0spct = 0.
got1spct = 0.
got2spct = 0.
got3spct = 0.
got4spct = 0.
got5spct = 0.
got6spct = 0.
got7spct = 0.
got8spct = 0.
got9spct = 0.
got10spct = 0.
gotspct = 0.
if (nobs > 0):
gotspct = (len(gotBADs)/float(nobs))*100
if (len(got0s) > 0):
got0spct = (len(got0s)/float(nobs))*100
plt.scatter(np.repeat(0,len(got0s)),LATbun[got0s],c='hotpink',marker='o',linewidth=0.,s=12)
if (len(got1s) > 0):
got1spct = (len(got1s)/float(nobs))*100
plt.scatter(np.repeat(1,len(got1s)),LATbun[got1s],c='deeppink',marker='o',linewidth=0.,s=12)
if (len(got2s) > 0):
got2spct = (len(got2s)/float(nobs))*100
plt.scatter(np.repeat(2,len(got2s)),LATbun[got2s],c='red',marker='o',linewidth=0.,s=12)
if (len(got3s) > 0):
got3spct = (len(got3s)/float(nobs))*100
plt.scatter(np.repeat(3,len(got3s)),LATbun[got3s],c='darkorange',marker='o',linewidth=0.,s=12)
if (len(got4s) > 0):
got4spct = (len(got4s)/float(nobs))*100
plt.scatter(np.repeat(4,len(got4s)),LATbun[got4s],c='gold',marker='o',linewidth=0.,s=12)
if (len(got5s) > 0):
got5spct = (len(got5s)/float(nobs))*100
plt.scatter(np.repeat(5,len(got5s)),LATbun[got5s],c='grey',marker='o',linewidth=0.,s=12)
if (len(got6s) > 0):
got6spct = (len(got6s)/float(nobs))*100
plt.scatter(np.repeat(6,len(got6s)),LATbun[got6s],c='limegreen',marker='o',linewidth=0.,s=12)
if (len(got7s) > 0):
got7spct = (len(got7s)/float(nobs))*100
plt.scatter(np.repeat(7,len(got7s)),LATbun[got7s],c='violet',marker='o',linewidth=0.,s=12)
if (len(got8s) > 0):
got8spct = (len(got8s)/float(nobs))*100
plt.scatter(np.repeat(8,len(got8s)),LATbun[got8s],c='olivedrab',marker='o',linewidth=0.,s=12)
if (len(got9s) > 0):
got9spct = (len(got9s)/float(nobs))*100
plt.scatter(np.repeat(9,len(got9s)),LATbun[got9s],c='blue',marker='o',linewidth=0.,s=12)
if (len(got10s) > 0):
got10spct = (len(got10s)/float(nobs))*100
plt.scatter(np.repeat(10,len(got10s)),LATbun[got10s],c='indigo',marker='o',linewidth=0.,s=12)
# DAY
DgotBADs = np.where((QCtrk[DayPts] == 1) | (QCATbud[DayPts] == 1) | (QCATclim[DayPts] == 1) | (QCATrep[DayPts] == 1) | (QCDPTbud[DayPts] == 1) | (QCDPTclim[DayPts] == 1) | (QCDPTrep[DayPts] == 1) | (QCDPTssat[DayPts] == 1) | (QCDPTrepsat[DayPts] == 1))[0]
Dgot0s = np.where(QCtrk[DayPts] == 1)[0]
Dgot1s = np.where(QCATbud[DayPts] == 1)[0]
Dgot2s = np.where(QCATclim[DayPts] == 1)[0]
Dgot3s = np.where(QCATround[DayPts] == 1)[0]
Dgot4s = np.where(QCATrep[DayPts] == 1)[0]
Dgot5s = np.where(QCDPTbud[DayPts] == 1)[0]
Dgot6s = np.where(QCDPTclim[DayPts] == 1)[0]
Dgot7s = np.where(QCDPTssat[DayPts] == 1)[0]
Dgot8s = np.where(QCDPTround[DayPts] == 1)[0]
Dgot9s = np.where(QCDPTrep[DayPts] == 1)[0]
Dgot10s = np.where(QCDPTrepsat[DayPts] == 1)[0]
Dgot0spct = 0.
Dgot1spct = 0.
Dgot2spct = 0.
Dgot3spct = 0.
Dgot4spct = 0.
Dgot5spct = 0.
Dgot6spct = 0.
Dgot7spct = 0.
Dgot8spct = 0.
Dgot9spct = 0.
Dgot10spct = 0.
Dgotspct = 0.
if (nobs > 0):
Dgotspct = (len(DgotBADs)/float(Dnobs))*100
if (len(Dgot0s) > 0):
Dgot0spct = (len(Dgot0s)/float(Dnobs))*100
plt.scatter(np.repeat(-0.2,len(Dgot0s)),LATbun[DayPts[Dgot0s]],c='hotpink',marker='o',linewidth=0.,s=12)
if (len(Dgot1s) > 0):
Dgot1spct = (len(Dgot1s)/float(Dnobs))*100
plt.scatter(np.repeat(0.8,len(Dgot1s)),LATbun[DayPts[Dgot1s]],c='deeppink',marker='o',linewidth=0.,s=12)
if (len(Dgot2s) > 0):
Dgot2spct = (len(Dgot2s)/float(Dnobs))*100
plt.scatter(np.repeat(1.8,len(Dgot2s)),LATbun[DayPts[Dgot2s]],c='red',marker='o',linewidth=0.,s=12)
if (len(Dgot3s) > 0):
Dgot3spct = (len(Dgot3s)/float(Dnobs))*100
plt.scatter(np.repeat(2.8,len(Dgot3s)),LATbun[DayPts[Dgot3s]],c='darkorange',marker='o',linewidth=0.,s=12)
if (len(Dgot4s) > 0):
Dgot4spct = (len(Dgot4s)/float(Dnobs))*100
plt.scatter(np.repeat(3.8,len(Dgot4s)),LATbun[DayPts[Dgot4s]],c='gold',marker='o',linewidth=0.,s=12)
if (len(Dgot5s) > 0):
Dgot5spct = (len(Dgot5s)/float(Dnobs))*100
plt.scatter(np.repeat(4.8,len(Dgot5s)),LATbun[DayPts[Dgot5s]],c='grey',marker='o',linewidth=0.,s=12)
if (len(Dgot6s) > 0):
Dgot6spct = (len(Dgot6s)/float(Dnobs))*100
plt.scatter(np.repeat(5.8,len(Dgot6s)),LATbun[DayPts[Dgot6s]],c='limegreen',marker='o',linewidth=0.,s=12)
if (len(Dgot7s) > 0):
Dgot7spct = (len(Dgot7s)/float(Dnobs))*100
plt.scatter(np.repeat(6.8,len(Dgot7s)),LATbun[DayPts[Dgot7s]],c='violet',marker='o',linewidth=0.,s=12)
if (len(Dgot8s) > 0):
Dgot8spct = (len(Dgot8s)/float(Dnobs))*100
plt.scatter(np.repeat(7.8,len(Dgot8s)),LATbun[DayPts[Dgot8s]],c='olivedrab',marker='o',linewidth=0.,s=12)
if (len(Dgot9s) > 0):
Dgot9spct = (len(Dgot9s)/float(Dnobs))*100
plt.scatter(np.repeat(8.8,len(Dgot9s)),LATbun[DayPts[Dgot9s]],c='blue',marker='o',linewidth=0.,s=12)
if (len(Dgot10s) > 0):
Dgot10spct = (len(Dgot10s)/float(Dnobs))*100
plt.scatter(np.repeat(9.8,len(Dgot10s)),LATbun[DayPts[Dgot10s]],c='indigo',marker='o',linewidth=0.,s=12)
#NIGHT
NgotBADs = np.where((QCtrk[NightPts] == 1) | (QCATbud[NightPts] == 1) | (QCATclim[NightPts] == 1) | (QCATrep[NightPts] == 1) | (QCDPTbud[NightPts] == 1) | (QCDPTclim[NightPts] == 1) | (QCDPTrep[NightPts] == 1) | (QCDPTssat[NightPts] == 1) | (QCDPTrepsat[NightPts] == 1))[0]
Ngot0s = np.where(QCtrk[NightPts] == 1)[0]
Ngot1s = np.where(QCATbud[NightPts] == 1)[0]
Ngot2s = np.where(QCATclim[NightPts] == 1)[0]
Ngot3s = np.where(QCATround[NightPts] == 1)[0]
Ngot4s = np.where(QCATrep[NightPts] == 1)[0]
Ngot5s = np.where(QCDPTbud[NightPts] == 1)[0]
Ngot6s = np.where(QCDPTclim[NightPts] == 1)[0]
Ngot7s = np.where(QCDPTssat[NightPts] == 1)[0]
Ngot8s = np.where(QCDPTround[NightPts] == 1)[0]
Ngot9s = np.where(QCDPTrep[NightPts] == 1)[0]
Ngot10s = np.where(QCDPTrepsat[NightPts] == 1)[0]
Ngot0spct = 0.
Ngot1spct = 0.
Ngot2spct = 0.
Ngot3spct = 0.
Ngot4spct = 0.
Ngot5spct = 0.
Ngot6spct = 0.
Ngot7spct = 0.
Ngot8spct = 0.
Ngot9spct = 0.
Ngot10spct = 0.
Ngotspct = 0.
if (nobs > 0):
Ngotspct = (len(NgotBADs)/float(Nnobs))*100
if (len(Ngot0s) > 0):
Ngot0spct = (len(Ngot0s)/float(Nnobs))*100
plt.scatter(np.repeat(0.2,len(Ngot0s)),LATbun[NightPts[Ngot0s]],c='hotpink',marker='o',linewidth=0.,s=12)
if (len(Ngot1s) > 0):
Ngot1spct = (len(Ngot1s)/float(Nnobs))*100
plt.scatter(np.repeat(1.2,len(Ngot1s)),LATbun[NightPts[Ngot1s]],c='deeppink',marker='o',linewidth=0.,s=12)
if (len(Ngot2s) > 0):
Ngot2spct = (len(Ngot2s)/float(Nnobs))*100
plt.scatter(np.repeat(2.2,len(Ngot2s)),LATbun[NightPts[Ngot2s]],c='red',marker='o',linewidth=0.,s=12)
if (len(Ngot3s) > 0):
Ngot3spct = (len(Ngot3s)/float(Nnobs))*100
plt.scatter(np.repeat(3.2,len(Ngot3s)),LATbun[NightPts[Ngot3s]],c='darkorange',marker='o',linewidth=0.,s=12)
if (len(Ngot4s) > 0):
Ngot4spct = (len(Ngot4s)/float(Nnobs))*100
plt.scatter(np.repeat(4.2,len(Ngot4s)),LATbun[NightPts[Ngot4s]],c='gold',marker='o',linewidth=0.,s=12)
if (len(Ngot5s) > 0):
Ngot5spct = (len(Ngot5s)/float(Nnobs))*100
plt.scatter(np.repeat(5.2,len(Ngot5s)),LATbun[NightPts[Ngot5s]],c='grey',marker='o',linewidth=0.,s=12)
if (len(Ngot6s) > 0):
Ngot6spct = (len(Ngot6s)/float(Nnobs))*100
plt.scatter(np.repeat(6.2,len(Ngot6s)),LATbun[NightPts[Ngot6s]],c='limegreen',marker='o',linewidth=0.,s=12)
if (len(Ngot7s) > 0):
Ngot7spct = (len(Ngot7s)/float(Nnobs))*100
plt.scatter(np.repeat(7.2,len(Ngot7s)),LATbun[NightPts[Ngot7s]],c='violet',marker='o',linewidth=0.,s=12)
if (len(Ngot8s) > 0):
Ngot8spct = (len(Ngot8s)/float(Nnobs))*100
plt.scatter(np.repeat(8.2,len(Ngot8s)),LATbun[NightPts[Ngot8s]],c='olivedrab',marker='o',linewidth=0.,s=12)
if (len(Ngot9s) > 0):
Ngot9spct = (len(Ngot9s)/float(Nnobs))*100
plt.scatter(np.repeat(9.2,len(Ngot9s)),LATbun[NightPts[Ngot9s]],c='blue',marker='o',linewidth=0.,s=12)
if (len(Ngot10s) > 0):
Ngot10spct = (len(Ngot10s)/float(Nnobs))*100
plt.scatter(np.repeat(10.2,len(Ngot10s)),LATbun[NightPts[Ngot10s]],c='indigo',marker='o',linewidth=0.,s=12)
plt.annotate('BADs: '+str(len(gotBADs))+' ('+"{:5.2f}".format(gotspct)+'%), '+str(len(DgotBADs))+' ('+"{:5.2f}".format(Dgotspct)+'%), '+str(len(NgotBADs))+' ('+"{:5.2f}".format(Ngotspct)+'%)',xy=(0.01,1.26),xycoords='axes fraction',size=7,color='black')
plt.annotate('trk: '+str(len(got0s))+' ('+"{:5.2f}".format(got0spct)+'%), '+str(len(Dgot0s))+' ('+"{:5.2f}".format(Dgot0spct)+'%), '+str(len(Ngot0s))+' ('+"{:5.2f}".format(Ngot0spct)+'%)',xy=(0.01,1.22),xycoords='axes fraction',size=7,color='hotpink')
plt.annotate('ATbud: '+str(len(got1s))+' ('+"{:5.2f}".format(got1spct)+'%), '+str(len(Dgot1s))+' ('+"{:5.2f}".format(Dgot1spct)+'%), '+str(len(Ngot1s))+' ('+"{:5.2f}".format(Ngot1spct)+'%)',xy=(0.01,1.18),xycoords='axes fraction',size=7,color='deeppink')
plt.annotate('ATclim: '+str(len(got2s))+' ('+"{:5.2f}".format(got2spct)+'%), '+str(len(Dgot2s))+' ('+"{:5.2f}".format(Dgot2spct)+'%), '+str(len(Ngot2s))+' ('+"{:5.2f}".format(Ngot2spct)+'%)',xy=(0.01,1.14),xycoords='axes fraction',size=7,color='red')
plt.annotate('ATround: '+str(len(got3s))+' ('+"{:5.2f}".format(got3spct)+'%), '+str(len(Dgot3s))+' ('+"{:5.2f}".format(Dgot3spct)+'%), '+str(len(Ngot3s))+' ('+"{:5.2f}".format(Ngot3spct)+'%)',xy=(0.01,1.10),xycoords='axes fraction',size=7,color='darkorange')
plt.annotate('ATrep: '+str(len(got4s))+' ('+"{:5.2f}".format(got4spct)+'%), '+str(len(Dgot4s))+' ('+"{:5.2f}".format(Dgot4spct)+'%), '+str(len(Ngot4s))+' ('+"{:5.2f}".format(Ngot4spct)+'%)',xy=(0.01,1.06),xycoords='axes fraction',size=7,color='gold')
plt.annotate('DPTbud: '+str(len(got5s))+' ('+"{:5.2f}".format(got5spct)+'%), '+str(len(Dgot5s))+' ('+"{:5.2f}".format(Dgot5spct)+'%), '+str(len(Ngot5s))+' ('+"{:5.2f}".format(Ngot5spct)+'%)',xy=(0.01,1.02),xycoords='axes fraction',size=7,color='grey')
plt.annotate('DPTclim: '+str(len(got6s))+' ('+"{:5.2f}".format(got6spct)+'%), '+str(len(Dgot6s))+' ('+"{:5.2f}".format(Dgot6spct)+'%), '+str(len(Ngot6s))+' ('+"{:5.2f}".format(Ngot6spct)+'%)',xy=(0.51,1.22),xycoords='axes fraction',size=7,color='limegreen')
plt.annotate('DPTssat: '+str(len(got7s))+' ('+"{:5.2f}".format(got7spct)+'%), '+str(len(Dgot7s))+' ('+"{:5.2f}".format(Dgot7spct)+'%), '+str(len(Ngot7s))+' ('+"{:5.2f}".format(Ngot7spct)+'%)',xy=(0.51,1.18),xycoords='axes fraction',size=7,color='olivedrab')
plt.annotate('DPTround: '+str(len(got8s))+' ('+"{:5.2f}".format(got8spct)+'%), '+str(len(Dgot8s))+' ('+"{:5.2f}".format(Dgot8spct)+'%), '+str(len(Ngot8s))+' ('+"{:5.2f}".format(Ngot8spct)+'%)',xy=(0.51,1.14),xycoords='axes fraction',size=7,color='blue')
plt.annotate('DPTrep: '+str(len(got9s))+' ('+"{:5.2f}".format(got9spct)+'%), '+str(len(Dgot9s))+' ('+"{:5.2f}".format(Dgot9spct)+'%), '+str(len(Ngot9s))+' ('+"{:5.2f}".format(Ngot9spct)+'%)',xy=(0.51,1.10),xycoords='axes fraction',size=7,color='indigo')
plt.annotate('DPTrepsat: '+str(len(got10s))+' ('+"{:5.2f}".format(got10spct)+'%), '+str(len(Dgot10s))+' ('+"{:5.2f}".format(Dgot10spct)+'%), '+str(len(Ngot10s))+' ('+"{:5.2f}".format(Ngot10spct)+'%)',xy=(0.51,1.06),xycoords='axes fraction',size=7,color='violet')
#plt.tight_layout()
# plt.savefig(OUTDIR+OutQCFil+".eps")
plt.savefig(OUTDIR+OutQCFil+".png")
# Write out stats to file (append!)
filee=open(OUTDIR+OutQCText,'a+')
filee.write(str(year1+' '+year2+' '+month1+' '+month2+' NOBS: '+'{:8d}'.format(nobs)+\
' BADs: '+'{:8d}'.format(len(gotBADs))+' ('+"{:5.2f}".format(gotspct)+\
'%) trk: '+'{:8d}'.format(len(got0s))+' ('+"{:5.2f}".format(got0spct)+\
'%) ATbud: '+'{:8d}'.format(len(got1s))+' ('+"{:5.2f}".format(got1spct)+\
'%) ATclim: '+'{:8d}'.format(len(got2s))+' ('+"{:5.2f}".format(got2spct)+\
'%) ATround: '+'{:8d}'.format(len(got3s))+' ('+"{:5.2f}".format(got3spct)+\
'%) ATrep: '+'{:8d}'.format(len(got4s))+' ('+"{:5.2f}".format(got4spct)+\
'%) DPTbud: '+'{:8d}'.format(len(got5s))+' ('+"{:5.2f}".format(got5spct)+\
'%) DPTclim: '+'{:8d}'.format(len(got6s))+' ('+"{:5.2f}".format(got6spct)+\
'%) DPTssat: '+'{:8d}'.format(len(got7s))+' ('+"{:5.2f}".format(got7spct)+\
'%) DPTround: '+'{:8d}'.format(len(got8s))+' ('+"{:5.2f}".format(got8spct)+\
'%) DPTrep: '+'{:8d}'.format(len(got9s))+' ('+"{:5.2f}".format(got9spct)+\
'%) DPTrepsat: '+'{:8d}'.format(len(got10s))+' ('+"{:5.2f}".format(got10spct)+\
'%)\n'))
filee.close()
filee=open(OUTDIR+OutQCTextD,'a+')
filee.write(str(year1+' '+year2+' '+month1+' '+month2+' NOBS: '+'{:8d}'.format(Dnobs)+\
' BADs: '+'{:8d}'.format(len(DgotBADs))+' ('+"{:5.2f}".format(Dgotspct)+\
'%) trk: '+'{:8d}'.format(len(Dgot0s))+' ('+"{:5.2f}".format(Dgot0spct)+\
'%) ATbud: '+'{:8d}'.format(len(Dgot1s))+' ('+"{:5.2f}".format(Dgot1spct)+\
'%) ATclim: '+'{:8d}'.format(len(Dgot2s))+' ('+"{:5.2f}".format(Dgot2spct)+\
'%) ATround: '+'{:8d}'.format(len(Dgot3s))+' ('+"{:5.2f}".format(Dgot3spct)+\
'%) ATrep: '+'{:8d}'.format(len(Dgot4s))+' ('+"{:5.2f}".format(Dgot4spct)+\
'%) DPTbud: '+'{:8d}'.format(len(Dgot5s))+' ('+"{:5.2f}".format(Dgot5spct)+\
'%) DPTclim: '+'{:8d}'.format(len(Dgot6s))+' ('+"{:5.2f}".format(Dgot6spct)+\
'%) DPTssat: '+'{:8d}'.format(len(Dgot7s))+' ('+"{:5.2f}".format(Dgot7spct)+\
'%) DPTround: '+'{:8d}'.format(len(Dgot8s))+' ('+"{:5.2f}".format(Dgot8spct)+\
'%) DPTrep: '+'{:8d}'.format(len(Dgot9s))+' ('+"{:5.2f}".format(Dgot9spct)+\
'%) DPTrepsat: '+'{:8d}'.format(len(Dgot10s))+' ('+"{:5.2f}".format(Dgot10spct)+\
'%)\n'))
filee.close()
filee=open(OUTDIR+OutQCTextN,'a+')
filee.write(str(year1+' '+year2+' '+month1+' '+month2+' NOBS: '+'{:8d}'.format(Nnobs)+\
' BADs: '+'{:8d}'.format(len(NgotBADs))+' ('+"{:5.2f}".format(Ngotspct)+\
'%) trk: '+'{:8d}'.format(len(Ngot0s))+' ('+"{:5.2f}".format(Ngot0spct)+\
'%) ATbud: '+'{:8d}'.format(len(Ngot1s))+' ('+"{:5.2f}".format(Ngot1spct)+\
'%) ATclim: '+'{:8d}'.format(len(Ngot2s))+' ('+"{:5.2f}".format(Ngot2spct)+\
'%) ATround: '+'{:8d}'.format(len(Ngot3s))+' ('+"{:5.2f}".format(Ngot3spct)+\
'%) ATrep: '+'{:8d}'.format(len(Ngot4s))+' ('+"{:5.2f}".format(Ngot4spct)+\
'%) DPTbud: '+'{:8d}'.format(len(Ngot5s))+' ('+"{:5.2f}".format(Ngot5spct)+\
'%) DPTclim: '+'{:8d}'.format(len(Ngot6s))+' ('+"{:5.2f}".format(Ngot6spct)+\
'%) DPTssat: '+'{:8d}'.format(len(Ngot7s))+' ('+"{:5.2f}".format(Ngot7spct)+\
'%) DPTround: '+'{:8d}'.format(len(Ngot8s))+' ('+"{:5.2f}".format(Ngot8spct)+\
'%) DPTrep: '+'{:8d}'.format(len(Ngot9s))+' ('+"{:5.2f}".format(Ngot9spct)+\
'%) DPTrepsat: '+'{:8d}'.format(len(Ngot10s))+' ('+"{:5.2f}".format(Ngot10spct)+\
'%)\n'))
filee.close()
#pdb.set_trace()
if __name__ == '__main__':
main(sys.argv[1:])
#************************************************************************
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from gbpautomation.heat.engine.resources import gbpresource
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from neutronclient.common.exceptions import NeutronClientException
class PolicyTarget(gbpresource.GBPResource):
PROPERTIES = (
TENANT_ID, NAME, DESCRIPTION, POLICY_TARGET_GROUP_ID,
PORT_ID
) = (
'tenant_id', 'name', 'description', 'policy_target_group_id',
'port_id'
)
ATTRIBUTES = (
PORT_ID_ATTR
) = (
'port_id'
)
properties_schema = {
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('Tenant id of the policy target.')
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the policy target.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the policy target.'),
update_allowed=True
),
POLICY_TARGET_GROUP_ID: properties.Schema(
properties.Schema.STRING,
_('Policy target group id of the policy target.'),
required=True,
update_allowed=True
),
PORT_ID: properties.Schema(
properties.Schema.STRING,
_('Neutron port id of the policy target.'),
update_allowed=False
)
}
attributes_schema = {
PORT_ID_ATTR: attributes.Schema(
_('Neutron port id of this policy target.')
)
}
def _show_resource(self):
client = self.grouppolicy()
pt_id = self.resource_id
return client.show_policy_target(pt_id)['policy_target']
def handle_create(self):
client = self.grouppolicy()
props = {}
for key in self.properties:
if self.properties.get(key) is not None:
props[key] = self.properties.get(key)
pt = client.create_policy_target(
{'policy_target': props})['policy_target']
self.resource_id_set(pt['id'])
def handle_delete(self):
client = self.grouppolicy()
pt_id = self.resource_id
try:
client.delete_policy_target(pt_id)
except NeutronClientException as ex:
self.client_plugin().ignore_not_found(ex)
else:
return self._delete_task()
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.grouppolicy().update_policy_target(
self.resource_id, {'policy_target': prop_diff})
class PolicyTargetGroup(gbpresource.GBPResource):
PROPERTIES = (
TENANT_ID, NAME, DESCRIPTION, L2_POLICY_ID, PROVIDED_POLICY_RULE_SETS,
CONSUMED_POLICY_RULE_SETS, NETWORK_SERVICE_POLICY_ID, SHARED
) = (
'tenant_id', 'name', 'description', 'l2_policy_id',
'provided_policy_rule_sets', 'consumed_policy_rule_sets',
'network_service_policy_id', 'shared'
)
properties_schema = {
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('Tenant id of the policy target group.')
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the policy target group.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the policy target group.'),
update_allowed=True
),
L2_POLICY_ID: properties.Schema(
properties.Schema.STRING,
_('L2 policy id of the policy target group.'),
update_allowed=True
),
PROVIDED_POLICY_RULE_SETS: properties.Schema(
properties.Schema.LIST,
_('Provided policy rule set for the policy target group.'),
update_allowed=True
),
CONSUMED_POLICY_RULE_SETS: properties.Schema(
properties.Schema.LIST,
_('Consumed policy rule set for the policy target group.'),
update_allowed=True
),
NETWORK_SERVICE_POLICY_ID: properties.Schema(
properties.Schema.STRING,
_('Network service policy id of the policy target group.'),
update_allowed=True, default=None
),
SHARED: properties.Schema(
properties.Schema.BOOLEAN,
_('Shared.'),
update_allowed=True, required=True
)
}
def _show_resource(self):
client = self.grouppolicy()
ptg_id = self.resource_id
return client.show_policy_target_group(ptg_id)['policy_target_group']
def handle_create(self):
client = self.grouppolicy()
props = {}
for key in self.properties:
if self.properties.get(key) is not None:
props[key] = self.properties.get(key)
provided_policy_rule_set_list = {}
consumed_policy_rule_set_list = {}
props_provided_policy_rule_sets = props.get(
'provided_policy_rule_sets', [])
props_consumed_policy_rule_sets = props.get(
'consumed_policy_rule_sets', [])
for prop_prov_policy_rule_set in props_provided_policy_rule_sets:
policy_rule_set_id = (
prop_prov_policy_rule_set['policy_rule_set_id'])
policy_rule_set_scope = (
prop_prov_policy_rule_set['policy_rule_set_scope'])
provided_policy_rule_set_list.update({policy_rule_set_id:
policy_rule_set_scope})
for prop_cons_policy_rule_set in props_consumed_policy_rule_sets:
policy_rule_set_id = (
prop_cons_policy_rule_set['policy_rule_set_id'])
policy_rule_set_scope = (
prop_cons_policy_rule_set['policy_rule_set_scope'])
consumed_policy_rule_set_list.update({policy_rule_set_id:
policy_rule_set_scope})
if provided_policy_rule_set_list:
props['provided_policy_rule_sets'] = provided_policy_rule_set_list
if consumed_policy_rule_set_list:
props['consumed_policy_rule_sets'] = consumed_policy_rule_set_list
ptg = client.create_policy_target_group(
{'policy_target_group': props})['policy_target_group']
self.resource_id_set(ptg['id'])
def handle_delete(self):
client = self.grouppolicy()
ptg_id = self.resource_id
try:
client.delete_policy_target_group(ptg_id)
except NeutronClientException as ex:
self.client_plugin().ignore_not_found(ex)
else:
return self._delete_task()
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
provided_policy_rule_set_list = {}
consumed_policy_rule_set_list = {}
props_provided_policy_rule_sets = prop_diff.get(
'provided_policy_rule_sets', [])
props_consumed_policy_rule_sets = prop_diff.get(
'consumed_policy_rule_sets', [])
for prop_prov_policy_rule_set in props_provided_policy_rule_sets:
policy_rule_set_id = (
prop_prov_policy_rule_set['policy_rule_set_id'])
policy_rule_set_scope = (
prop_prov_policy_rule_set['policy_rule_set_scope'])
provided_policy_rule_set_list.update({policy_rule_set_id:
policy_rule_set_scope})
for prop_cons_policy_rule_set in props_consumed_policy_rule_sets:
policy_rule_set_id = (
prop_cons_policy_rule_set['policy_rule_set_id'])
policy_rule_set_scope = (
prop_cons_policy_rule_set['policy_rule_set_scope'])
consumed_policy_rule_set_list.update({policy_rule_set_id:
policy_rule_set_scope})
if provided_policy_rule_set_list:
prop_diff['provided_policy_rule_sets'] = (
provided_policy_rule_set_list)
if consumed_policy_rule_set_list:
prop_diff['consumed_policy_rule_sets'] = (
consumed_policy_rule_set_list)
self.grouppolicy().update_policy_target_group(
self.resource_id, {'policy_target_group': prop_diff})
class L2Policy(gbpresource.GBPResource):
PROPERTIES = (
TENANT_ID, NAME, DESCRIPTION, L3_POLICY_ID, SHARED
) = (
'tenant_id', 'name', 'description', 'l3_policy_id', 'shared'
)
properties_schema = {
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('Tenant id of the L2 policy.')
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the L2 policy.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the L2 policy.'),
update_allowed=True
),
L3_POLICY_ID: properties.Schema(
properties.Schema.STRING,
_('L3 policy id associated with l2 policy.'),
required=True,
update_allowed=True
),
SHARED: properties.Schema(
properties.Schema.BOOLEAN,
_('Shared.'),
update_allowed=True, required=True
)
}
def _show_resource(self):
client = self.grouppolicy()
l2_policy_id = self.resource_id
return client.show_l2_policy(l2_policy_id)['l2_policy']
def handle_create(self):
client = self.grouppolicy()
props = {}
for key in self.properties:
if self.properties.get(key) is not None:
props[key] = self.properties.get(key)
l2_policy = client.create_l2_policy(
{'l2_policy': props})['l2_policy']
self.resource_id_set(l2_policy['id'])
def handle_delete(self):
client = self.grouppolicy()
l2_policy_id = self.resource_id
try:
client.delete_l2_policy(l2_policy_id)
except NeutronClientException as ex:
self.client_plugin().ignore_not_found(ex)
else:
return self._delete_task()
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.grouppolicy().update_l2_policy(
self.resource_id, {'l2_policy': prop_diff})
class L3Policy(gbpresource.GBPResource):
PROPERTIES = (
TENANT_ID, NAME, DESCRIPTION, IP_VERSION, IP_POOL,
SUBNET_PREFIX_LENGTH, EXTERNAL_SEGMENTS, SHARED
) = (
'tenant_id', 'name', 'description', 'ip_version', 'ip_pool',
'subnet_prefix_length', 'external_segments', 'shared'
)
properties_schema = {
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('Tenant id of the L3 policy.')
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the L3 policy.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the L3 policy.'),
update_allowed=True
),
IP_VERSION: properties.Schema(
properties.Schema.STRING,
_('IP version of the L3 policy.'),
update_allowed=False
),
IP_POOL: properties.Schema(
properties.Schema.STRING,
_('IP pool of the L3 policy.'),
update_allowed=False
),
SUBNET_PREFIX_LENGTH: properties.Schema(
properties.Schema.INTEGER,
_('Subnet prefix length of L3 policy.'),
update_allowed=True
),
EXTERNAL_SEGMENTS: properties.Schema(
properties.Schema.LIST,
_('External segments for L3 policy.'),
update_allowed=True
),
SHARED: properties.Schema(
properties.Schema.BOOLEAN,
_('Shared.'),
update_allowed=True, required=True
)
}
def _show_resource(self):
client = self.grouppolicy()
l3_policy_id = self.resource_id
return client.show_l3_policy(l3_policy_id)['l3_policy']
def handle_create(self):
client = self.grouppolicy()
props = {}
for key in self.properties:
if self.properties.get(key) is not None:
props[key] = self.properties.get(key)
external_segments_dict = {}
props_external_segments = props.get(
'external_segments', [])
for prop_external_segment in props_external_segments:
external_segment_id = (
prop_external_segment['external_segment_id'])
allocated_address = (
prop_external_segment['allocated_address'])
external_segments_dict.update({external_segment_id:
allocated_address})
if external_segments_dict:
props['external_segments'] = external_segments_dict
l3_policy = client.create_l3_policy(
{'l3_policy': props})['l3_policy']
self.resource_id_set(l3_policy['id'])
def handle_delete(self):
client = self.grouppolicy()
l3_policy_id = self.resource_id
try:
client.delete_l3_policy(l3_policy_id)
except NeutronClientException as ex:
self.client_plugin().ignore_not_found(ex)
else:
return self._delete_task()
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
external_segments_dict = {}
props_external_segments = prop_diff.get(
'external_segments', [])
for prop_external_segment in props_external_segments:
external_segment_id = (
prop_external_segment['external_segment_id'])
allocated_address = (
prop_external_segment['allocated_address'])
external_segments_dict.update({external_segment_id:
allocated_address})
if external_segments_dict:
prop_diff['external_segments'] = external_segments_dict
self.grouppolicy().update_l3_policy(
self.resource_id, {'l3_policy': prop_diff})
class PolicyClassifier(gbpresource.GBPResource):
PROPERTIES = (
TENANT_ID, NAME, DESCRIPTION, PROTOCOL, PORT_RANGE,
DIRECTION, SHARED
) = (
'tenant_id', 'name', 'description', 'protocol', 'port_range',
'direction', 'shared'
)
properties_schema = {
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('Tenant id of the policy classifier.')
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the policy classifier.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the policy classifier.'),
update_allowed=True
),
PROTOCOL: properties.Schema(
properties.Schema.STRING,
_('Protocol of traffic described by the policy classifier.'),
constraints=[
constraints.AllowedValues(['tcp', 'udp', 'icmp', None])
],
update_allowed=True
),
PORT_RANGE: properties.Schema(
properties.Schema.STRING,
_('Port range of traffic described by the policy classifier.'),
update_allowed=True
),
DIRECTION: properties.Schema(
properties.Schema.STRING,
_('Direction of traffic described by the policy classifier.'),
constraints=[
constraints.AllowedValues(['in', 'out', 'bi', None])
],
update_allowed=True
),
SHARED: properties.Schema(
properties.Schema.BOOLEAN,
_('Shared.'),
update_allowed=True, required=True
)
}
def _show_resource(self):
client = self.grouppolicy()
pc_id = self.resource_id
return client.show_policy_classifier(pc_id)['policy_classifier']
def handle_create(self):
client = self.grouppolicy()
props = {}
for key in self.properties:
if self.properties.get(key) is not None:
props[key] = self.properties.get(key)
policy_classifier = client.create_policy_classifier(
{'policy_classifier': props})['policy_classifier']
self.resource_id_set(policy_classifier['id'])
def handle_delete(self):
client = self.grouppolicy()
pc_id = self.resource_id
try:
client.delete_policy_classifier(pc_id)
except NeutronClientException as ex:
self.client_plugin().ignore_not_found(ex)
else:
return self._delete_task()
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.grouppolicy().update_policy_classifier(
self.resource_id, {'policy_classifier': prop_diff})
class PolicyAction(gbpresource.GBPResource):
PROPERTIES = (
TENANT_ID, NAME, DESCRIPTION, ACTION_TYPE, ACTION_VALUE, SHARED
) = (
'tenant_id', 'name', 'description', 'action_type', 'action_value',
'shared'
)
properties_schema = {
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('Tenant id of the action.')
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the action.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the action.'),
update_allowed=True
),
ACTION_TYPE: properties.Schema(
properties.Schema.STRING,
_('Type of action.'),
constraints=[
constraints.AllowedValues(['allow', 'redirect', None])
],
update_allowed=True
),
ACTION_VALUE: properties.Schema(
properties.Schema.STRING,
_('Value of the action.'),
update_allowed=True
),
SHARED: properties.Schema(
properties.Schema.BOOLEAN,
_('Shared.'),
update_allowed=True, required=True
)
}
def _show_resource(self):
client = self.grouppolicy()
action_id = self.resource_id
return client.show_policy_action(action_id)['policy_action']
def handle_create(self):
client = self.grouppolicy()
props = {}
for key in self.properties:
if self.properties.get(key) is not None:
props[key] = self.properties.get(key)
policy_action = client.create_policy_action(
{'policy_action': props})['policy_action']
self.resource_id_set(policy_action['id'])
def handle_delete(self):
client = self.grouppolicy()
action_id = self.resource_id
try:
client.delete_policy_action(action_id)
except NeutronClientException as ex:
self.client_plugin().ignore_not_found(ex)
else:
return self._delete_task()
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.grouppolicy().update_policy_action(
self.resource_id, {'policy_action': prop_diff})
class PolicyRule(gbpresource.GBPResource):
PROPERTIES = (
TENANT_ID, NAME, DESCRIPTION, ENABLED, POLICY_CLASSIFIER_ID,
POLICY_ACTIONS, SHARED
) = (
'tenant_id', 'name', 'description', 'enabled', 'policy_classifier_id',
'policy_actions', 'shared'
)
properties_schema = {
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('Tenant id of the policy rule.')
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the policy rule.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the policy rule.'),
update_allowed=True
),
ENABLED: properties.Schema(
properties.Schema.BOOLEAN,
_('State of policy rule.'),
default=True, update_allowed=True
),
POLICY_CLASSIFIER_ID: properties.Schema(
properties.Schema.STRING,
_('Classifier id of the policy rule.'),
required=True, update_allowed=True
),
POLICY_ACTIONS: properties.Schema(
properties.Schema.LIST,
_('List of actions of the policy rule.'),
default=None, update_allowed=True
),
SHARED: properties.Schema(
properties.Schema.BOOLEAN,
_('Shared.'),
update_allowed=True, required=True
)
}
def _show_resource(self):
client = self.grouppolicy()
rule_id = self.resource_id
return client.show_policy_rule(rule_id)['policy_rule']
def handle_create(self):
client = self.grouppolicy()
props = {}
for key in self.properties:
if self.properties.get(key) is not None:
props[key] = self.properties.get(key)
policy_rule = client.create_policy_rule(
{'policy_rule': props})['policy_rule']
self.resource_id_set(policy_rule['id'])
def handle_delete(self):
client = self.grouppolicy()
rule_id = self.resource_id
try:
client.delete_policy_rule(rule_id)
except NeutronClientException as ex:
self.client_plugin().ignore_not_found(ex)
else:
return self._delete_task()
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.grouppolicy().update_policy_rule(
self.resource_id, {'policy_rule': prop_diff})
class PolicyRuleSet(gbpresource.GBPResource):
PROPERTIES = (
TENANT_ID, NAME, DESCRIPTION, PARENT_ID, CHILD_POLICY_RULE_SETS,
POLICY_RULES, SHARED
) = (
'tenant_id', 'name', 'description', 'parent_id',
'child_policy_rule_sets', 'policy_rules', 'shared'
)
properties_schema = {
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('Tenant id of the policy rule set.')
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the policy rule set.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the policy rule set.'),
update_allowed=True
),
PARENT_ID: properties.Schema(
properties.Schema.STRING,
_('Parent id of the policy rule set.'),
update_allowed=False
),
CHILD_POLICY_RULE_SETS: properties.Schema(
properties.Schema.LIST,
_('List of child policy rule sets.'),
default=None, update_allowed=True
),
POLICY_RULES: properties.Schema(
properties.Schema.LIST,
_('List of policy rules.'),
default=None, update_allowed=True
),
SHARED: properties.Schema(
properties.Schema.BOOLEAN,
_('Shared.'),
update_allowed=True, required=True
)
}
def _show_resource(self):
client = self.grouppolicy()
prs_id = self.resource_id
return client.show_policy_rule_set(prs_id)['policy_rule_set']
def handle_create(self):
client = self.grouppolicy()
props = {}
for key in self.properties:
if self.properties.get(key) is not None:
props[key] = self.properties.get(key)
policy_rule_set = client.create_policy_rule_set(
{'policy_rule_set': props})['policy_rule_set']
self.resource_id_set(policy_rule_set['id'])
def handle_delete(self):
client = self.grouppolicy()
policy_rule_set_id = self.resource_id
try:
client.delete_policy_rule_set(policy_rule_set_id)
except NeutronClientException as ex:
self.client_plugin().ignore_not_found(ex)
else:
return self._delete_task()
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.grouppolicy().update_policy_rule_set(
self.resource_id, {'policy_rule_set': prop_diff})
class NetworkServicePolicy(gbpresource.GBPResource):
PROPERTIES = (
TENANT_ID, NAME, DESCRIPTION, NETWORK_SERVICE_PARAMS, SHARED
) = (
'tenant_id', 'name', 'description', 'network_service_params', 'shared'
)
properties_schema = {
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('Tenant id of the network service policy.')
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the network service policy.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the network service policy.'),
update_allowed=True
),
NETWORK_SERVICE_PARAMS: properties.Schema(
properties.Schema.LIST,
_('List of network service policy dicts.'),
default=None, update_allowed=True
),
SHARED: properties.Schema(
properties.Schema.BOOLEAN,
_('Shared.'),
update_allowed=True, required=True
)
}
def _show_resource(self):
client = self.grouppolicy()
nsp_id = self.resource_id
nsp = client.show_network_service_policy(nsp_id)
return nsp['network_service_policy']
def handle_create(self):
client = self.grouppolicy()
props = {}
for key in self.properties:
if self.properties.get(key) is not None:
props[key] = self.properties.get(key)
nsp = client.create_network_service_policy(
{'network_service_policy': props})['network_service_policy']
self.resource_id_set(nsp['id'])
def handle_delete(self):
client = self.grouppolicy()
nsp_id = self.resource_id
try:
client.delete_network_service_policy(nsp_id)
except NeutronClientException as ex:
self.client_plugin().ignore_not_found(ex)
else:
return self._delete_task()
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.grouppolicy().update_network_service_policy(
self.resource_id, {'network_service_policy': prop_diff})
class ExternalPolicy(gbpresource.GBPResource):
PROPERTIES = (
TENANT_ID, NAME, DESCRIPTION, EXTERNAL_SEGMENTS,
PROVIDED_POLICY_RULE_SETS, CONSUMED_POLICY_RULE_SETS, SHARED
) = (
'tenant_id', 'name', 'description', 'external_segments',
'provided_policy_rule_sets', 'consumed_policy_rule_sets', 'shared'
)
properties_schema = {
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('Tenant id of the external policy.')
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the external policy.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the external policy.'),
update_allowed=True
),
EXTERNAL_SEGMENTS: properties.Schema(
properties.Schema.LIST,
_('External segments of the policy.'),
update_allowed=True
),
PROVIDED_POLICY_RULE_SETS: properties.Schema(
properties.Schema.LIST,
_('Provided policy rule sets.'),
default=None, update_allowed=True
),
CONSUMED_POLICY_RULE_SETS: properties.Schema(
properties.Schema.LIST,
_('Consumed policy rule sets.'),
default=None, update_allowed=True
),
SHARED: properties.Schema(
properties.Schema.BOOLEAN,
_('Shared.'),
update_allowed=True, required=True
)
}
def _show_resource(self):
client = self.grouppolicy()
ext_policy_id = self.resource_id
ext_policy = client.show_external_policy(ext_policy_id)
return ext_policy['external_policy']
def handle_create(self):
client = self.grouppolicy()
props = {}
for key in self.properties:
if self.properties.get(key) is not None:
props[key] = self.properties.get(key)
provided_policy_rule_set_list = {}
consumed_policy_rule_set_list = {}
props_provided_policy_rule_sets = props.get(
'provided_policy_rule_sets', [])
props_consumed_policy_rule_sets = props.get(
'consumed_policy_rule_sets', [])
for prop_prov_policy_rule_set in props_provided_policy_rule_sets:
policy_rule_set_id = (
prop_prov_policy_rule_set['policy_rule_set_id'])
policy_rule_set_scope = (
prop_prov_policy_rule_set['policy_rule_set_scope'])
provided_policy_rule_set_list.update({policy_rule_set_id:
policy_rule_set_scope})
for prop_cons_policy_rule_set in props_consumed_policy_rule_sets:
policy_rule_set_id = (
prop_cons_policy_rule_set['policy_rule_set_id'])
policy_rule_set_scope = (
prop_cons_policy_rule_set['policy_rule_set_scope'])
consumed_policy_rule_set_list.update({policy_rule_set_id:
policy_rule_set_scope})
if provided_policy_rule_set_list:
props['provided_policy_rule_sets'] = provided_policy_rule_set_list
if consumed_policy_rule_set_list:
props['consumed_policy_rule_sets'] = consumed_policy_rule_set_list
ext_policy = client.create_external_policy(
{'external_policy': props})['external_policy']
self.resource_id_set(ext_policy['id'])
def handle_delete(self):
client = self.grouppolicy()
ext_policy_id = self.resource_id
try:
client.delete_external_policy(ext_policy_id)
except NeutronClientException as ex:
self.client_plugin().ignore_not_found(ex)
else:
return self._delete_task()
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
provided_policy_rule_set_list = {}
consumed_policy_rule_set_list = {}
props_provided_policy_rule_sets = prop_diff.get(
'provided_policy_rule_sets', [])
props_consumed_policy_rule_sets = prop_diff.get(
'consumed_policy_rule_sets', [])
for prop_prov_policy_rule_set in props_provided_policy_rule_sets:
policy_rule_set_id = (
prop_prov_policy_rule_set['policy_rule_set_id'])
policy_rule_set_scope = (
prop_prov_policy_rule_set['policy_rule_set_scope'])
provided_policy_rule_set_list.update({policy_rule_set_id:
policy_rule_set_scope})
for prop_cons_policy_rule_set in props_consumed_policy_rule_sets:
policy_rule_set_id = (
prop_cons_policy_rule_set['policy_rule_set_id'])
policy_rule_set_scope = (
prop_cons_policy_rule_set['policy_rule_set_scope'])
consumed_policy_rule_set_list.update({policy_rule_set_id:
policy_rule_set_scope})
if provided_policy_rule_set_list:
prop_diff['provided_policy_rule_sets'] = (
provided_policy_rule_set_list)
if consumed_policy_rule_set_list:
prop_diff['consumed_policy_rule_sets'] = (
consumed_policy_rule_set_list)
self.grouppolicy().update_external_policy(
self.resource_id, {'external_policy': prop_diff})
class ExternalSegment(gbpresource.GBPResource):
PROPERTIES = (
TENANT_ID, NAME, DESCRIPTION, IP_VERSION, CIDR, SUBNET_ID,
EXTERNAL_ROUTES, PORT_ADDRESS_TRANSLATION, SHARED
) = (
'tenant_id', 'name', 'description', 'ip_version', 'cidr',
'subnet_id', 'external_routes', 'port_address_translation',
'shared'
)
properties_schema = {
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('Tenant id of the external segment.')
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the external segment.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the external segment.'),
update_allowed=True
),
IP_VERSION: properties.Schema(
properties.Schema.STRING,
_('IP version of the external segment.'),
default='4', update_allowed=False
),
CIDR: properties.Schema(
properties.Schema.STRING,
_('CIDR of the external segment.'),
default=None, update_allowed=False
),
SUBNET_ID: properties.Schema(
properties.Schema.STRING,
_('Subnet ID of the neutron external network.'),
default=None, update_allowed=False
),
EXTERNAL_ROUTES: properties.Schema(
properties.Schema.LIST,
_('External routes of the external segment.'),
default=None, update_allowed=True
),
PORT_ADDRESS_TRANSLATION: properties.Schema(
properties.Schema.BOOLEAN,
_('Port address translation required for the external segment.'),
update_allowed=True, default=False
),
SHARED: properties.Schema(
properties.Schema.BOOLEAN,
_('Shared.'),
update_allowed=True, required=True
)
}
def _show_resource(self):
client = self.grouppolicy()
es_id = self.resource_id
es = client.show_external_segment(es_id)
return es['external_segment']
def handle_create(self):
client = self.grouppolicy()
props = {}
for key in self.properties:
if self.properties.get(key) is not None:
props[key] = self.properties.get(key)
es = client.create_external_segment(
{'external_segment': props})['external_segment']
self.resource_id_set(es['id'])
def handle_delete(self):
client = self.grouppolicy()
es_id = self.resource_id
try:
client.delete_external_segment(es_id)
except NeutronClientException as ex:
self.client_plugin().ignore_not_found(ex)
else:
return self._delete_task()
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.grouppolicy().update_external_segment(
self.resource_id, {'external_segment': prop_diff})
class NATPool(gbpresource.GBPResource):
PROPERTIES = (
TENANT_ID, NAME, DESCRIPTION, IP_VERSION, IP_POOL,
EXTERNAL_SEGMENT_ID, SHARED
) = (
'tenant_id', 'name', 'description', 'ip_version', 'ip_pool',
'external_segment_id', 'shared'
)
properties_schema = {
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('Tenant id of the NAT pool.')
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the NAT pool.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the NET pool.'),
update_allowed=True
),
IP_VERSION: properties.Schema(
properties.Schema.STRING,
_('IP version of the NAT pool.'),
default='4', update_allowed=False
),
IP_POOL: properties.Schema(
properties.Schema.STRING,
_('IP pool of the NAT pool.'),
default=None, update_allowed=False
),
EXTERNAL_SEGMENT_ID: properties.Schema(
properties.Schema.STRING,
_('External segment id of the NAT pool.'),
update_allowed=True, required=True
),
SHARED: properties.Schema(
properties.Schema.BOOLEAN,
_('Shared.'),
update_allowed=True, required=True
)
}
def _show_resource(self):
client = self.grouppolicy()
nat_pool_id = self.resource_id
nat_pool = client.show_nat_pool(nat_pool_id)
return nat_pool['nat_pool']
def handle_create(self):
client = self.grouppolicy()
props = {}
for key in self.properties:
if self.properties.get(key) is not None:
props[key] = self.properties.get(key)
nat_pool = client.create_nat_pool(
{'nat_pool': props})['nat_pool']
self.resource_id_set(nat_pool['id'])
def handle_delete(self):
client = self.grouppolicy()
nat_pool_id = self.resource_id
try:
client.delete_nat_pool(nat_pool_id)
except NeutronClientException as ex:
self.client_plugin().ignore_not_found(ex)
else:
return self._delete_task()
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.grouppolicy().update_nat_pool(
self.resource_id, {'nat_pool': prop_diff})
def resource_mapping():
return {
'OS::GroupBasedPolicy::PolicyTarget': PolicyTarget,
'OS::GroupBasedPolicy::PolicyTargetGroup': PolicyTargetGroup,
'OS::GroupBasedPolicy::L2Policy': L2Policy,
'OS::GroupBasedPolicy::L3Policy': L3Policy,
'OS::GroupBasedPolicy::PolicyClassifier': PolicyClassifier,
'OS::GroupBasedPolicy::PolicyAction': PolicyAction,
'OS::GroupBasedPolicy::PolicyRule': PolicyRule,
'OS::GroupBasedPolicy::PolicyRuleSet': PolicyRuleSet,
'OS::GroupBasedPolicy::NetworkServicePolicy': NetworkServicePolicy,
'OS::GroupBasedPolicy::ExternalPolicy': ExternalPolicy,
'OS::GroupBasedPolicy::ExternalSegment': ExternalSegment,
'OS::GroupBasedPolicy::NATPool': NATPool
}
|
|
import collections
import xml.etree.ElementTree as ET
import fiona.crs
import geopandas as gpd
from pandas.io.common import urlopen, urlencode
import pandas as pd
from shapely.geometry import Point, LineString
from six import string_types
OSMData = collections.namedtuple('OSMData', ('nodes', 'waynodes', 'waytags',
'relmembers', 'reltags'))
_crs = fiona.crs.from_epsg(4326)
# Tags to remove so we don't clobber the output. This list comes from
# osmtogeojson's index.js (https://github.com/tyrasd/osmtogeojson)
uninteresting_tags = set([
"source",
"source_ref",
"source:ref",
"history",
"attribution",
"created_by",
"tiger:county",
"tiger:tlid",
"tiger:upload_uuid",
])
# http://wiki.openstreetmap.org/wiki/Overpass_API/Language_Guide
def query_osm(typ, bbox=None, recurse=None, tags='', raw=False,
meta=False, **kwargs):
"""
Query the Overpass API to obtain OpenStreetMap data.
See also:
http://wiki.openstreetmap.org/wiki/Overpass_API/Language_Guide
The OSM XML data is parsed into an intermediate set of DataFrames.
By passing in 'render=False', this will return these DataFrames stored
as the OSMData namedtuple. If render is True, then the DataFrames
are built into their corresponding geometries.
Parameters
----------
typ : {'node', 'way', 'relation'}
The type of OSM data to query
bbox : (min lon, min lat, max lon, max lat) bounding box
Optional bounding box to restrict the query. Unless the query
is extremely restricted, you usually want to specify this.
It can be retrieved from GeoPandas objects as 'df.total_bounds' or
from Shapely objects as 'geom.bounds'
recurse : {'up, 'down', 'uprel', 'downrel'}
This is used to get more data than the original query. If 'typ' is
'way', you'll usually want this set to 'down' which grabs all nodes
of the matching ways
tags : string or list of query strings
See also the OverpassQL (referenced above) for more tag options
Examples:
tags='highway'
Matches objects with a 'highway' tag
tags='highway=motorway' <-- Matches ob
Matches objects where the 'highway' tag is 'motorway'
tags='name~[Mm]agazine'
Match if the 'name' tag matches the regular expression
Specify a list of tag requests to match all of them
tags=['highway', 'name~"^Magazine"']
Match tags that have 'highway' and where 'name' starts
with 'Magazine'
raw : boolean, default False
Return the raw XML data returned by the request
render : boolean, default True
Parse the output and return a final GeoDataFrame
meta : boolean, default False
Indicates whether to query the metadata with each OSM object. This
includes the changeset, timestamp, uid, user, and version.
Returns
-------
df - GeoDataFrame
Note that there's probably a bit more filtering required to get the
exact desired data. For example if you only want ways, you may want
to grab only the linestrings like:
>>> df = df[df.type == 'LineString']
"""
url = _build_url(typ, bbox, recurse, tags, meta)
# TODO: Raise on non-200 (or 400-599)
with urlopen(url) as response:
content = response.read()
if raw:
return content
return read_osm(content, **kwargs)
def _build_url(typ, bbox=None, recurse=None, tags='', meta=False):
recurse_map = {
'up': '<',
'uprel': '<<',
'down': '>',
'downrel': '>>',
}
if recurse is None:
recursestr = ''
else:
try:
recursestr = recurse_map[recurse]
except KeyError:
raise ValueError("Unrecognized recurse value '{}'. "
"Must be one of: {}."
.format(recurse, ', '.join(recurse_map.keys())))
# Allow tags to be a single string
if isinstance(tags, string_types) and tags:
tags = [tags]
queries = ''.join('[{}]'.format(t) for t in tags)
# Overpass QL takes the bounding box as
# (min latitude, min longitude, max latitude, max longitude)
if bbox is None:
bboxstr = ''
else:
#bboxstr = "({})".format(
#','.join(str(b) for b in (bbox[1], bbox[0], bbox[3], bbox[2])))
bboxstr = '(poly:"{}")'.format(
' '.join('{c[1]} {c[0]}'.format(c=c) for c in bbox.exterior.coords))
if meta:
metastr = 'meta'
else:
metastr = ''
query = '({typ}{bbox}{queries};{recurse});out {meta};'.format(
typ=typ, bbox=bboxstr, queries=queries, recurse=recursestr, meta=metastr)
url = ''.join(['http://www.overpass-api.de/api/interpreter?',
urlencode({'data': query})])
return url
def read_osm(content, render=True, **kwargs):
"""
Parse OSM XML data and store as several DataFrames. Optionally "render"
the DataFrames to GeoDataFrames.
"""
doc = ET.fromstring(content)
nodes = read_nodes(doc)
waynodes, waytags = read_ways(doc)
relmembers, reltags = read_relations(doc)
data = OSMData(nodes, waynodes, waytags, relmembers, reltags)
if render:
data = render_to_gdf(data, **kwargs)
return data
def read_nodes(doc):
# Example:
# <node id="1705717514" lat="42.3630798" lon="-71.0997601">
# <tag k="crossing" v="zebra"/>
# <tag k="highway" v="crossing"/>
# <tag k="source" v="Bing"/>
# </node>
nodes = [_element_to_dict(xmlnode) for xmlnode in doc.findall('node')]
nodes = _dict_to_dataframe(nodes)
nodes['lon'] = nodes['lon'].astype(float)
nodes['lat'] = nodes['lat'].astype(float)
return nodes
def _element_to_dict(element):
d = element.attrib.copy()
for t in element.findall('tag'):
k = t.attrib['k']
if k not in uninteresting_tags:
d[k] = t.attrib['v']
return d
def _dict_to_dataframe(d):
df = pd.DataFrame.from_dict(d)
if 'timestamp' in df:
df['timestamp'] = pd.to_datetime(df['timestamp'])
return df
def read_ways(doc):
# Example:
# <way id="8614593">
# <nd ref="61326730"/>
# <nd ref="61326036"/>
# <nd ref="61321194"/>
# <tag k="attribution" v="Office of Geographic and Environmental Information (MassGIS)"/>
# <tag k="condition" v="fair"/>
# <tag k="created_by" v="JOSM"/>
# <tag k="highway" v="residential"/>
# <tag k="lanes" v="2"/>
# <tag k="massgis:way_id" v="171099"/>
# <tag k="name" v="Centre Street"/>
# <tag k="source" v="massgis_import_v0.1_20071008165629"/>
# <tag k="width" v="13.4"/>
# </way>
waytags = []
waynodes = []
for xmlway in doc.findall('way'):
wayid = xmlway.attrib['id']
for i, xmlnd in enumerate(xmlway.findall('nd')):
d = xmlnd.attrib.copy()
d['id'] = wayid
d['index'] = i
waynodes.append(d)
tags = _element_to_dict(xmlway)
waytags.append(tags)
waynodes = _dict_to_dataframe(waynodes)
waytags = _dict_to_dataframe(waytags)
return waynodes, waytags
def read_relations(doc):
# Example:
# <relation id="1933745">
# <member type="way" ref="134055159" role="outer"/>
# <member type="way" ref="260533047" role="outer"/>
# <member type="way" ref="142867799" role="outer"/>
# <member type="way" ref="134063352" role="outer"/>
# <member type="way" ref="142803038" role="outer"/>
# <member type="way" ref="134056144" role="outer"/>
# <member type="way" ref="134056141" role="outer"/>
# <tag k="admin_level" v="8"/>
# <tag k="boundary" v="administrative"/>
# <tag k="name" v="Cambridge"/>
# <tag k="type" v="boundary"/>
# <tag k="wikipedia" v="en:Cambridge, Massachusetts"/>
# </relation>
reltags = []
relmembers = []
for xmlrel in doc.findall('relation'):
relid = xmlrel.attrib['id']
for i, xmlmember in enumerate(xmlrel.findall('member')):
d = xmlmember.attrib.copy()
d['id'] = relid
d['index'] = i
relmembers.append(d)
tags = _element_to_dict(xmlrel)
reltags.append(tags)
relmembers = _dict_to_dataframe(relmembers)
reltags = _dict_to_dataframe(reltags)
return relmembers, reltags
def render_to_gdf(osmdata, drop_untagged=True):
nodes = render_nodes(osmdata.nodes, drop_untagged)
ways = render_ways(osmdata.nodes, osmdata.waynodes, osmdata.waytags)
if ways is not None:
# We should get append working
nodes = nodes.append(ways).set_geometry('geometry', crs=_crs)
return nodes
def render_nodes(nodes, drop_untagged=True):
# Drop nodes that have no tags, convert lon/lat to points
if drop_untagged:
nodes = nodes.dropna(subset=nodes.columns.drop(['id', 'lon', 'lat']),
how='all')
points = [Point(x['lon'], x['lat']) for i, x in nodes.iterrows()]
nodes = nodes.drop(['lon', 'lat'], axis=1)
nodes = nodes.set_geometry(points, crs=_crs)
return nodes
def render_ways(nodes, waynodes, waytags):
if waynodes is None or waynodes.empty:
return None
node_points = nodes[['id', 'lon', 'lat']]
def wayline(df):
df = df.sort_index(by='index')[['lon', 'lat']]
return LineString(df.values)
# Group the ways and create a LineString for each one. way_lines is a
# Series where the index is the way id and the value is the LineString.
# Merge it with the waytags to get a single GeoDataFrame of ways
waynodes = waynodes.merge(node_points, left_on='ref', right_on='id',
suffixes=('', '_nodes'))
way_lines = waynodes.groupby('id').apply(wayline)
ways = waytags.set_index('id').set_geometry(way_lines, crs=_crs)
ways.reset_index(inplace=True)
return ways
|
|
#! /usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
# Copyright 2017 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""Check in on the status of this machine's MegaRAID array and log information
to the M&C system.
We assume that there is one controller with one big old RAID array.
This script must be run as root since that's what the storcli64 command line
client requires.
"""
import datetime
import dateutil.tz
import errno
import json
import socket
from subprocess import Popen, PIPE
import sys
from astropy.time import Time, TimeDelta
from hera_mc import mc
# Preliminaries
#
# This script runs as root on netboot machines where the majority of the
# filesystem is readonly. So `event_ticker` is hardcoded to point to somewhere
# where we can reliably save state.
#
# If `num_recent_events` gets too big, the relevant storcli call can start taking
# incredibly long to complete.
storcli = '/opt/MegaRAID/storcli/storcli64'
event_ticker = '/home/obs/.hera_mc/megaraid_last_event_id_%s.txt' % (socket.gethostname(), )
controller = 0
num_recent_events = 32 # if more than this many events occur between runs, some won't get logged
hostname = socket.gethostname()
show_all_items = [
'Controller Status',
'Memory Correctable Errors',
'Memory Uncorrectable Errors',
'BBU Status',
'Physical Drives',
]
event_header_keep_keys = frozenset(['Code', 'Class', 'Locale', 'Event Description'])
_months = {
'Jan': 1,
'Feb': 2,
'Mar': 3,
'Apr': 4,
'May': 5,
'Jun': 6,
'Jul': 7,
'Aug': 8,
'Sep': 9,
'Oct': 10,
'Nov': 11,
'Dec': 12,
}
def parse_storcli_datetime(text):
"""Example input text: "Sat May 20 00:16:57 2017". Returns an Astropy Time
object. The time reported by storcli is in the system local time (and not,
say, UTC).
"""
_, month, day, hhmmss, year = text.split()
month = _months[month]
day = int(day)
year = int(year)
hour, minute, second = [int(s) for s in hhmmss.split(':')]
local_tz = dateutil.tz.tzlocal()
t = datetime.datetime(year, month, day, hour, minute, second, tzinfo=local_tz)
return Time(t) # auto-converts to UTC timescale
# Before running anything, make sure we can connect to the DB.
parser = mc.get_mc_argument_parser()
args = parser.parse_args()
db = mc.connect_to_mc_db(args)
# Get the seqNum of the last event that we've noticed
try:
ticker_file = open(event_ticker, 'r')
except IOError as e:
if e.errno != errno.ENOENT:
raise
last_seen_seqnum = 0
else:
last_seen_seqnum = int(ticker_file.readline())
ticker_file.close()
# Parse '/c0 show all'
show_all = Popen([storcli, '/c%d' % controller, 'show', 'all'],
shell=False, stdout=PIPE)
item_values = {}
for line in show_all.stdout:
line = line.decode("utf-8")
for item in show_all_items:
if line.startswith(item):
item_values[item] = line.split('=', 1)[1].strip()
break
# This is not at all scalable, but ... meh. We're looking for:
#
# ----------------------------------------------------------------
# DG/VD TYPE State Access Consist Cache Cac sCC Size Name
# ----------------------------------------------------------------
# 0/0 RAID60 Optl RW Yes RWBD - ON 196.475 TB
# ----------------------------------------------------------------
if line.startswith('0/0'):
item_values['VD 0/0 State'] = line.split()[2]
if show_all.wait() != 0:
print('error: storcli exited with an error code', file=sys.stderr)
print('unfortunately this script may have swallowed its error message', file=sys.stderr)
sys.exit(1)
num_disks = int(item_values.pop('Physical Drives', 0))
status_info = json.dumps(item_values, sort_keys=True)
# Parse the recent events
event_log = Popen([storcli, '/c%d' % controller, 'show', 'events',
'type=latest=%d' % num_recent_events, 'filter=warning,critical,fatal'],
shell=False, stdout=PIPE)
events = []
NOT_IN_EVENT, IN_EVENT = 0, 1
state = NOT_IN_EVENT
seq_num = None
cur_event_data = {}
for line in event_log.stdout:
line = line.decode("utf-8")
if state == NOT_IN_EVENT:
if line.startswith('seqNum:'):
# The extra 0 arg here means to guess the numeric base;
# seqnum is in hex with a 0x prefix.
seq_num = int(line.split(':', 1)[1].strip(), 0)
state = IN_EVENT
elif state == IN_EVENT:
line = line.strip()
if not len(line):
continue
if line.startswith('======='):
continue
if line.startswith('Event Data:'): # just a separator
continue
if line == 'None': # appears for events with no data after the ====== divider
continue
if line.startswith('seqNum:'): # new event, finishing old one
if seq_num is not None:
events.append((seq_num, cur_event_data))
seq_num = None
cur_event_data = {}
seq_num = int(line.split(':', 1)[1].strip(), 0)
state = IN_EVENT
continue
if line.startswith('Controller ='): # we've reached the footer
if seq_num is not None:
events.append((seq_num, cur_event_data))
seq_num = None
cur_event_data = {}
state = NOT_IN_EVENT
continue
try:
key, value = line.split(':', 1)
except ValueError:
print('severe: unexpected event data line: %r' % (line,))
else:
cur_event_data[key] = value.strip()
if event_log.wait() != 0:
print('error: storcli exited with an error code', file=sys.stderr)
print('unfortunately this script may have swallowed its error message', file=sys.stderr)
sys.exit(1)
if seq_num is not None:
events.append((seq_num, cur_event_data))
# Now actually check in with the database
now = Time.now()
biggest_seqnum = last_seen_seqnum
with db.sessionmaker() as session:
session.add_lib_raid_status(now, hostname, num_disks, status_info)
for seqnum, data in events:
if seqnum <= last_seen_seqnum:
continue
biggest_seqnum = max(biggest_seqnum, seqnum)
# Once the controller makes contact with the OS,
# it reports event times in the local time using the 'Time'
# key. But on boot, it doesn't know the time and can
# only report a delta against boot.
abs_time_str = data.pop('Time', None)
if abs_time_str is not None:
time = parse_storcli_datetime(abs_time_str)
else:
boot_rel_time = int(data.pop('Seconds since last reboot'))
import psutil
boot = datetime.datetime.fromtimestamp(psutil.boot_time())
delta = TimeDelta(boot_rel_time, format='sec')
time = Time(boot) + delta
disk = data.pop('Device ID', '?')
data['seqNum'] = seqnum
info = json.dumps(data)
session.add_lib_raid_error(time, hostname, disk, info)
session.commit()
# Remember the biggest seqnum that we've seen.
with open(event_ticker, 'w') as f:
print(biggest_seqnum, file=f)
|
|
###############################################################################
##
## Copyright (C) 2012-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ['Client']
try:
import ssl
_HAS_SSL = True
except ImportError:
_HAS_SSL = False
import sys
_HAS_SSL_CLIENT_CONTEXT = sys.version_info >= (2,7,9)
import json
import hmac
import hashlib
import base64
import random
from datetime import datetime
from urllib import parse
from http.client import HTTPConnection, HTTPSConnection
def _utcnow():
"""
Get current time in UTC as ISO 8601 string.
:returns str -- Current time as string in ISO 8601 format.
"""
now = datetime.utcnow()
return now.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + "Z"
def _parse_url(url):
"""
Parses a Crossbar.io HTTP bridge URL.
"""
parsed = parse.urlparse(url)
if parsed.scheme not in ["http", "https"]:
raise Exception("invalid Push URL scheme '%s'" % parsed.scheme)
if parsed.port is None or parsed.port == "":
if parsed.scheme == "http":
port = 80
elif parsed.scheme == "https":
port = 443
else:
raise Exception("logic error")
else:
port = int(parsed.port)
if parsed.fragment is not None and parsed.fragment != "":
raise Exception("invalid Push URL: non-empty fragment '%s" % parsed.fragment)
if parsed.query is not None and parsed.query != "":
raise Exception("invalid Push URL: non-empty query string '%s" % parsed.query)
if parsed.path is not None and parsed.path != "":
ppath = parsed.path
path = parse.unquote(ppath)
else:
ppath = "/"
path = ppath
return {'secure': parsed.scheme == "https",
'host': parsed.hostname,
'port': port,
'path': path}
class Client:
"""
Crossbar.io HTTP bridge client.
"""
def __init__(self, url, key = None, secret = None, timeout = 5, context = None):
"""
Create a new Crossbar.io push client.
The only mandatory argument is the Push service endpoint of the Crossbar.io
instance to push to.
For signed pushes, provide authentication key and secret. If those are not
given, unsigned pushes are performed.
:param url: URL of the HTTP bridge of Crossbar.io (e.g. http://example.com:8080/push).
:type url: str
:param key: Optional key to use for signing requests.
:type key: str
:param secret: When using signed request, the secret corresponding to key.
:type secret: str
:param timeout: Timeout for requests.
:type timeout: int
:param context: If the HTTP bridge is running on HTTPS (that is securely over TLS),
then the context provides the SSL settings the client should use (e.g. the
certificate chain against which to verify the server certificate). This parameter
is only available on Python 2.7.9+ and Python 3 (otherwise the parameter is silently
ignored!). See: https://docs.python.org/2/library/ssl.html#ssl.SSLContext
:type context: obj or None
"""
assert(type(url) == str)
assert((key and secret) or (not key and not secret))
assert(key is None or type(key) == str)
assert(secret is None or type(secret) == str)
assert(type(timeout) == int)
if _HAS_SSL and _HAS_SSL_CLIENT_CONTEXT:
assert(context is None or isinstance(context, ssl.SSLContext))
self._seq = 1
self._key = key
self._secret = secret
self._endpoint = _parse_url(url)
self._endpoint['headers'] = {
"Content-type": "application/json",
"User-agent": "crossbarconnect-python"
}
if self._endpoint['secure']:
if not _HAS_SSL:
raise Exception("Bridge URL is using HTTPS, but Python SSL module is missing")
if _HAS_SSL_CLIENT_CONTEXT:
self._connection = HTTPSConnection(self._endpoint['host'],
self._endpoint['port'], timeout = timeout, context = context)
else:
self._connection = HTTPSConnection(self._endpoint['host'],
self._endpoint['port'], timeout = timeout)
else:
self._connection = HTTPConnection(self._endpoint['host'],
self._endpoint['port'], timeout = timeout)
def publish(self, topic, *args, **kwargs):
"""
Publish an event to subscribers on specified topic via Crossbar.io HTTP bridge.
The event payload (positional and keyword) can be of any type that can be
serialized to JSON.
If `kwargs` contains an `options` attribute, this is expected to
be a dictionary with the following possible parameters:
* `exclude`: A list of WAMP session IDs to exclude from receivers.
* `eligible`: A list of WAMP session IDs eligible as receivers.
:param topic: Topic to push to.
:type topic: str
:param args: Arbitrary application payload for the event (positional arguments).
:type args: list
:param kwargs: Arbitrary application payload for the event (keyword arguments).
:type kwargs: dict
:returns int -- The event publication ID assigned by the broker.
"""
assert(type(topic) == str)
## this will get filled and later serialized into HTTP/POST body
##
event = {
'topic': topic
}
if 'options' in kwargs:
event['options'] = kwargs.pop('options')
assert(type(event['options']) == dict)
if args:
event['args'] = args
if kwargs:
event['kwargs'] = kwargs
try:
body = json.dumps(event, separators = (',',':'))
body = body.encode('utf8')
except Exception as e:
raise Exception("invalid event payload - not JSON serializable: {0}".format(e))
params = {
'timestamp': _utcnow(),
'seq': self._seq,
}
if self._key:
## if the request is to be signed, create extra fields and signature
params['key'] = self._key
params['nonce'] = random.randint(0, 9007199254740992)
# HMAC[SHA256]_{secret} (key | timestamp | seq | nonce | body) => signature
hm = hmac.new(self._secret.encode('utf8'), None, hashlib.sha256)
hm.update(params['key'].encode('utf8'))
hm.update(params['timestamp'].encode('utf8'))
hm.update("{0}".format(params['seq']).encode('utf8'))
hm.update("{0}".format(params['nonce']).encode('utf8'))
hm.update(body)
signature = base64.urlsafe_b64encode(hm.digest())
params['signature'] = signature
self._seq += 1
path = "{0}?{1}".format(parse.quote(self._endpoint['path']), parse.urlencode(params))
## now issue the HTTP/POST
##
self._connection.request('POST', path, body, self._endpoint['headers'])
response = self._connection.getresponse()
response_body = response.read()
if response.status != 202:
raise Exception("publication request failed {0} [{1}] - {2}".format(response.status, response.reason, response_body))
try:
res = json.loads(response_body)
except Exception as e:
raise Exception("publication request bogus result - {0}".format(e))
return res['id']
|
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from pycket.error import SchemeException
from pycket import values
path_home_dir = values.W_Symbol.make("home-dir")
path_pref_dir = values.W_Symbol.make("pref-dir")
path_pref_file = values.W_Symbol.make("pref-file")
path_temp_dir = values.W_Symbol.make("temp-dir")
path_init_dir = values.W_Symbol.make("init-dir")
path_init_file = values.W_Symbol.make("init-file")
path_config_dir = values.W_Symbol.make("config-dir")
path_host_config_dir = values.W_Symbol.make("host-config-dir")
path_addon_dir = values.W_Symbol.make("addon-dir")
path_doc_dir = values.W_Symbol.make("doc-dir")
path_desk_dir = values.W_Symbol.make("desk-dir")
path_sys_dir = values.W_Symbol.make("sys-dir")
path_exec_file = values.W_Symbol.make("exec-file")
path_run_file = values.W_Symbol.make("run-file")
path_collects_dir = values.W_Symbol.make("collects-dir")
path_host_collects_dir = values.W_Symbol.make("host-collects-dir")
path_orig_dir = values.W_Symbol.make("orig-dir")
path_kinds = [path_home_dir,
path_pref_dir,
path_pref_file,
path_temp_dir,
path_init_dir,
path_init_file,
path_config_dir,
path_host_config_dir,
path_addon_dir,
path_doc_dir,
path_desk_dir,
path_sys_dir,
path_exec_file,
path_run_file,
path_collects_dir,
path_host_collects_dir,
path_orig_dir
]
class RacketPaths(object):
def __init__(self):
self.paths = {}
self.initialized = False
def get_path(self, kind):
if not self.initialized:
self.initialize_paths()
if kind not in self.paths:
raise SchemeException("Path cannot be found for : %s" % kind.tostring())
return self.paths[kind]
def set_path(self, kind, path):
if kind not in path_kinds:
raise SchemeException("Possibly trying to set a wrong kind of system-path : %s" % kind.tostring())
self.paths[kind] = path
def initialize_paths(self):
from pycket.util import os_get_env_var, os_check_env_var
# FIXME : check absolute/relative paths
# Environment Variables
if not os_check_env_var("PLTHOME") and not os_check_env_var("PLTCOLLECTS"):
raise SchemeException("In order to locate the Racket installation, Pycket requires a `PLTHOME` environment variable to point to Racket directory. If Racket is installed in Unix-style, then you can just set a `PLTCOLLECTS` variable to point to the Racket `collects`.")
PLTHOME = os_get_env_var("PLTHOME")
PLTCOLLECTS = os_get_env_var("PLTCOLLECTS")
PLTEXECFILE = os_get_env_var("PLTEXECFILE")
PLTUSERHOME = os_get_env_var("PLTUSERHOME")
HOME = os_get_env_var("HOME")
USER = os_get_env_var("USER")
LOGNAME = os_get_env_var("LOGNAME")
TMPDIR = os_get_env_var("TMPDIR")
PLTCONFIGDIR = os_get_env_var("PLTCONFIGDIR")
PLTADDONDIR = os_get_env_var("PLTADDONDIR")
CURRENT_DIR = os.getcwd()
#############
# HOME
#############
W_PATH_HOME_DIR = ""
if PLTUSERHOME:
W_PATH_HOME_DIR = PLTUSERHOME
elif HOME:
W_PATH_HOME_DIR = HOME
elif USER:
W_PATH_HOME_DIR = USER
elif LOGNAME:
W_PATH_HOME_DIR = LOGNAME
self.paths[path_home_dir] = values.W_Path(W_PATH_HOME_DIR)
#############
# PREF-DIR
#############
W_PATH_PREF_DIR = ""
if W_PATH_HOME_DIR:
W_PATH_PREF_DIR = os.path.join(W_PATH_HOME_DIR, ".racket")
self.paths[path_pref_dir] = values.W_Path(W_PATH_PREF_DIR)
#############
# PREF-FILE
#############
W_PATH_PREF_FILE = ""
if W_PATH_PREF_DIR:
W_PATH_PREF_FILE = os.path.join(W_PATH_PREF_DIR, "racket-prefs.rktd")
self.paths[path_pref_file] = values.W_Path(W_PATH_PREF_FILE)
#############
# TEMP-DIR
#############
W_PATH_TEMP_DIR = ""
if TMPDIR:
W_PATH_TEMP_DIR = TMPDIR
elif os.path.exists("/var/tmp"):
W_PATH_TEMP_DIR = "/var/tmp"
elif os.path.exists("/usr/tmp"):
W_PATH_TEMP_DIR = "/usr/tmp"
elif os.path.exists("/tmp"):
W_PATH_TEMP_DIR = "/tmp"
self.paths[path_temp_dir] = values.W_Path(W_PATH_TEMP_DIR)
#############
# INIT-DIR
#############
W_PATH_INIT_DIR = W_PATH_HOME_DIR
self.paths[path_init_dir] = values.W_Path(W_PATH_INIT_DIR)
#############
# INIT-FILE -- startup file
#
# Unix and Mac OS: ".racketrc"
# Windows: "racketrc.rktl"
#############
W_PATH_INIT_FILE = ""
if W_PATH_INIT_DIR:
W_PATH_INIT_FILE = os.path.join(W_PATH_INIT_DIR, ".racketrc")
self.paths[path_init_file] = values.W_Path(W_PATH_INIT_FILE)
#############
# CONFIG-DIR
# defaults to an "etc" directory relative to the current executable
# It might not exist
#############
W_PATH_CONFIG_DIR = ""
if PLTCONFIGDIR:
W_PATH_CONFIG_DIR = PLTCONFIGDIR
else:
if PLTHOME:
W_PATH_CONFIG_DIR = os.path.join(PLTHOME, os.path.join("racket", "etc"))
else:
W_PATH_CONFIG_DIR = os.path.join(CURRENT_DIR, "etc")
if path_config_dir not in self.paths:
self.paths[path_config_dir] = values.W_Path(W_PATH_CONFIG_DIR)
#############
# HOST-CONFIG-DIR
#############
W_PATH_HOST_CONFIG_DIR = W_PATH_CONFIG_DIR
self.paths[path_host_config_dir] = values.W_Path(W_PATH_HOST_CONFIG_DIR)
#############
# ADDON-DIR
#############
W_PATH_ADDON_DIR = W_PATH_PREF_DIR
if PLTADDONDIR:
W_PATH_ADDON_DIR = PLTADDONDIR
if path_addon_dir not in self.paths:
self.paths[path_addon_dir] = values.W_Path(W_PATH_ADDON_DIR)
#############
# DOC-DIR
#############
W_PATH_DOC_DIR = W_PATH_HOME_DIR
self.paths[path_doc_dir] = values.W_Path(W_PATH_DOC_DIR)
#############
# SYS-DIR
#############
W_PATH_SYS_DIR = "/"
self.paths[path_sys_dir] = values.W_Path(W_PATH_SYS_DIR)
#############
# EXEC-FILE
#############
# FIXME : get argv[0] from target args
if PLTHOME:
# assume the binary is at $PLTHOME/racket/bin/racket
W_PATH_EXEC_FILE = os.path.join(PLTHOME, os.path.join("racket", os.path.join("bin", "racket")))
elif PLTEXECFILE:
# expect PLTEXECFILE
W_PATH_EXEC_FILE = PLTEXECFILE
else:
# should we error?
# set it to pycket-c for now
W_PATH_EXEC_FILE = os.path.join(CURRENT_DIR, "pycket-c")
self.paths[path_exec_file] = values.W_Path(W_PATH_EXEC_FILE)
#############
# RUN-FILE
#############
W_PATH_RUN_FILE = W_PATH_EXEC_FILE
if path_run_file not in self.paths:
self.paths[path_run_file] = values.W_Path(W_PATH_RUN_FILE)
#############
# COLLECTS-DIR
#############
if PLTHOME:
W_PATH_COLLECTS_DIR = os.path.join(PLTHOME, os.path.join("racket", "collects"))
else:
W_PATH_COLLECTS_DIR = PLTCOLLECTS
if path_collects_dir not in self.paths:
self.paths[path_collects_dir] = values.W_Path(W_PATH_COLLECTS_DIR)
#############
# HOST-COLLECTS-DIR
#############
W_PATH_HOST_COLLECTS_DIR = W_PATH_COLLECTS_DIR
self.paths[path_host_collects_dir] = values.W_Path(W_PATH_HOST_COLLECTS_DIR)
#############
# ORIG-DIR
#############
W_PATH_ORIG_DIR = CURRENT_DIR
self.paths[path_orig_dir] = values.W_Path(W_PATH_ORIG_DIR)
self.initialized = True
racket_sys_paths = RacketPaths()
|
|
# mako/codegen.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides functionality for rendering a parsetree constructing into module source code."""
import time
import re
from mako.pygen import PythonPrinter
from mako import util, ast, parsetree, filters, exceptions
MAGIC_NUMBER = 7
def compile(node,
uri,
filename=None,
default_filters=None,
buffer_filters=None,
imports=None,
source_encoding=None,
generate_magic_comment=True,
disable_unicode=False,
strict_undefined=False):
"""Generate module source code given a parsetree node,
uri, and optional source filename"""
# if on Py2K, push the "source_encoding" string to be
# a bytestring itself, as we will be embedding it into
# the generated source and we don't want to coerce the
# result into a unicode object, in "disable_unicode" mode
if not util.py3k and isinstance(source_encoding, unicode):
source_encoding = source_encoding.encode(source_encoding)
buf = util.FastEncodingBuffer()
printer = PythonPrinter(buf)
_GenerateRenderMethod(printer,
_CompileContext(uri,
filename,
default_filters,
buffer_filters,
imports,
source_encoding,
generate_magic_comment,
disable_unicode,
strict_undefined),
node)
return buf.getvalue()
class _CompileContext(object):
def __init__(self,
uri,
filename,
default_filters,
buffer_filters,
imports,
source_encoding,
generate_magic_comment,
disable_unicode,
strict_undefined):
self.uri = uri
self.filename = filename
self.default_filters = default_filters
self.buffer_filters = buffer_filters
self.imports = imports
self.source_encoding = source_encoding
self.generate_magic_comment = generate_magic_comment
self.disable_unicode = disable_unicode
self.strict_undefined = strict_undefined
class _GenerateRenderMethod(object):
"""A template visitor object which generates the
full module source for a template.
"""
def __init__(self, printer, compiler, node):
self.printer = printer
self.last_source_line = -1
self.compiler = compiler
self.node = node
self.identifier_stack = [None]
self.in_def = isinstance(node, (parsetree.DefTag, parsetree.BlockTag))
if self.in_def:
name = "render_%s" % node.funcname
args = node.get_argument_expressions()
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
defs = None
pagetag = None
if node.is_block and not node.is_anonymous:
args += ['**pageargs']
else:
defs = self.write_toplevel()
pagetag = self.compiler.pagetag
name = "render_body"
if pagetag is not None:
args = pagetag.body_decl.get_argument_expressions()
if not pagetag.body_decl.kwargs:
args += ['**pageargs']
cached = eval(pagetag.attributes.get('cached', 'False'))
else:
args = ['**pageargs']
cached = False
buffered = filtered = False
if args is None:
args = ['context']
else:
args = [a for a in ['context'] + args]
self.write_render_callable(
pagetag or node,
name, args,
buffered, filtered, cached)
if defs is not None:
for node in defs:
_GenerateRenderMethod(printer, compiler, node)
@property
def identifiers(self):
return self.identifier_stack[-1]
def write_toplevel(self):
"""Traverse a template structure for module-level directives and
generate the start of module-level code.
"""
inherit = []
namespaces = {}
module_code = []
encoding =[None]
self.compiler.pagetag = None
class FindTopLevel(object):
def visitInheritTag(s, node):
inherit.append(node)
def visitNamespaceTag(s, node):
namespaces[node.name] = node
def visitPageTag(s, node):
self.compiler.pagetag = node
def visitCode(s, node):
if node.ismodule:
module_code.append(node)
f = FindTopLevel()
for n in self.node.nodes:
n.accept_visitor(f)
self.compiler.namespaces = namespaces
module_ident = set()
for n in module_code:
module_ident = module_ident.union(n.declared_identifiers())
module_identifiers = _Identifiers()
module_identifiers.declared = module_ident
# module-level names, python code
if self.compiler.generate_magic_comment and \
self.compiler.source_encoding:
self.printer.writeline("# -*- encoding:%s -*-" %
self.compiler.source_encoding)
self.printer.writeline("from mako import runtime, filters, cache")
self.printer.writeline("UNDEFINED = runtime.UNDEFINED")
self.printer.writeline("__M_dict_builtin = dict")
self.printer.writeline("__M_locals_builtin = locals")
self.printer.writeline("_magic_number = %r" % MAGIC_NUMBER)
self.printer.writeline("_modified_time = %r" % time.time())
self.printer.writeline(
"_template_filename = %r" % self.compiler.filename)
self.printer.writeline("_template_uri = %r" % self.compiler.uri)
self.printer.writeline(
"_source_encoding = %r" % self.compiler.source_encoding)
if self.compiler.imports:
buf = ''
for imp in self.compiler.imports:
buf += imp + "\n"
self.printer.writeline(imp)
impcode = ast.PythonCode(
buf,
source='', lineno=0,
pos=0,
filename='template defined imports')
else:
impcode = None
main_identifiers = module_identifiers.branch(self.node)
module_identifiers.topleveldefs = \
module_identifiers.topleveldefs.\
union(main_identifiers.topleveldefs)
module_identifiers.declared.add("UNDEFINED")
if impcode:
module_identifiers.declared.update(impcode.declared_identifiers)
self.compiler.identifiers = module_identifiers
self.printer.writeline("_exports = %r" %
[n.name for n in
main_identifiers.topleveldefs.values()]
)
self.printer.write("\n\n")
if len(module_code):
self.write_module_code(module_code)
if len(inherit):
self.write_namespaces(namespaces)
self.write_inherit(inherit[-1])
elif len(namespaces):
self.write_namespaces(namespaces)
return main_identifiers.topleveldefs.values()
def write_render_callable(self, node, name, args, buffered, filtered, cached):
"""write a top-level render callable.
this could be the main render() method or that of a top-level def."""
if self.in_def:
decorator = node.decorator
if decorator:
self.printer.writeline("@runtime._decorate_toplevel(%s)" % decorator)
self.printer.writelines(
"def %s(%s):" % (name, ','.join(args)),
"context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writeline("context._push_buffer()")
self.identifier_stack.append(self.compiler.identifiers.branch(self.node))
if (not self.in_def or self.node.is_block) and '**pageargs' in args:
self.identifier_stack[-1].argument_declared.add('pageargs')
if not self.in_def and (
len(self.identifiers.locally_assigned) > 0 or
len(self.identifiers.argument_declared) > 0
):
self.printer.writeline("__M_locals = __M_dict_builtin(%s)" %
','.join([
"%s=%s" % (x, x) for x in
self.identifiers.argument_declared
]))
self.write_variable_declares(self.identifiers, toplevel=True)
for n in self.node.nodes:
n.accept_visitor(self)
self.write_def_finish(self.node, buffered, filtered, cached)
self.printer.writeline(None)
self.printer.write("\n\n")
if cached:
self.write_cache_decorator(
node, name,
args, buffered,
self.identifiers, toplevel=True)
def write_module_code(self, module_code):
"""write module-level template code, i.e. that which
is enclosed in <%! %> tags in the template."""
for n in module_code:
self.write_source_comment(n)
self.printer.write_indented_block(n.text)
def write_inherit(self, node):
"""write the module-level inheritance-determination callable."""
self.printer.writelines(
"def _mako_inherit(template, context):",
"_mako_generate_namespaces(context)",
"return runtime._inherit_from(context, %s, _template_uri)" %
(node.parsed_attributes['file']),
None
)
def write_namespaces(self, namespaces):
"""write the module-level namespace-generating callable."""
self.printer.writelines(
"def _mako_get_namespace(context, name):",
"try:",
"return context.namespaces[(__name__, name)]",
"except KeyError:",
"_mako_generate_namespaces(context)",
"return context.namespaces[(__name__, name)]",
None,None
)
self.printer.writeline("def _mako_generate_namespaces(context):")
for node in namespaces.values():
if node.attributes.has_key('import'):
self.compiler.has_ns_imports = True
self.write_source_comment(node)
if len(node.nodes):
self.printer.writeline("def make_namespace():")
export = []
identifiers = self.compiler.identifiers.branch(node)
self.in_def = True
class NSDefVisitor(object):
def visitDefTag(s, node):
s.visitDefOrBase(node)
def visitBlockTag(s, node):
s.visitDefOrBase(node)
def visitDefOrBase(s, node):
if node.is_anonymous:
raise exceptions.CompileException(
"Can't put anonymous blocks inside <%namespace>",
**node.exception_kwargs
)
self.write_inline_def(node, identifiers, nested=False)
export.append(node.funcname)
vis = NSDefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.printer.writeline("return [%s]" % (','.join(export)))
self.printer.writeline(None)
self.in_def = False
callable_name = "make_namespace()"
else:
callable_name = "None"
if 'file' in node.parsed_attributes:
self.printer.writeline(
"ns = runtime.TemplateNamespace(%r, context._clean_inheritance_tokens(),"
" templateuri=%s, callables=%s, calling_uri=_template_uri)" %
(
node.name,
node.parsed_attributes.get('file', 'None'),
callable_name,
)
)
elif 'module' in node.parsed_attributes:
self.printer.writeline(
"ns = runtime.ModuleNamespace(%r, context._clean_inheritance_tokens(),"
" callables=%s, calling_uri=_template_uri, module=%s)" %
(
node.name,
callable_name,
node.parsed_attributes.get('module', 'None')
)
)
else:
self.printer.writeline(
"ns = runtime.Namespace(%r, context._clean_inheritance_tokens(),"
" callables=%s, calling_uri=_template_uri)" %
(
node.name,
callable_name,
)
)
if eval(node.attributes.get('inheritable', "False")):
self.printer.writeline("context['self'].%s = ns" % (node.name))
self.printer.writeline("context.namespaces[(__name__, %s)] = ns" % repr(node.name))
self.printer.write("\n")
if not len(namespaces):
self.printer.writeline("pass")
self.printer.writeline(None)
def write_variable_declares(self, identifiers, toplevel=False, limit=None):
"""write variable declarations at the top of a function.
the variable declarations are in the form of callable
definitions for defs and/or name lookup within the
function's context argument. the names declared are based
on the names that are referenced in the function body,
which don't otherwise have any explicit assignment
operation. names that are assigned within the body are
assumed to be locally-scoped variables and are not
separately declared.
for def callable definitions, if the def is a top-level
callable then a 'stub' callable is generated which wraps
the current Context into a closure. if the def is not
top-level, it is fully rendered as a local closure.
"""
# collection of all defs available to us in this scope
comp_idents = dict([(c.funcname, c) for c in identifiers.defs])
to_write = set()
# write "context.get()" for all variables we are going to
# need that arent in the namespace yet
to_write = to_write.union(identifiers.undeclared)
# write closure functions for closures that we define
# right here
to_write = to_write.union([c.funcname for c in identifiers.closuredefs.values()])
# remove identifiers that are declared in the argument
# signature of the callable
to_write = to_write.difference(identifiers.argument_declared)
# remove identifiers that we are going to assign to.
# in this way we mimic Python's behavior,
# i.e. assignment to a variable within a block
# means that variable is now a "locally declared" var,
# which cannot be referenced beforehand.
to_write = to_write.difference(identifiers.locally_declared)
# if a limiting set was sent, constraint to those items in that list
# (this is used for the caching decorator)
if limit is not None:
to_write = to_write.intersection(limit)
if toplevel and getattr(self.compiler, 'has_ns_imports', False):
self.printer.writeline("_import_ns = {}")
self.compiler.has_imports = True
for ident, ns in self.compiler.namespaces.iteritems():
if ns.attributes.has_key('import'):
self.printer.writeline(
"_mako_get_namespace(context, %r)._populate(_import_ns, %r)" %
(
ident,
re.split(r'\s*,\s*', ns.attributes['import'])
))
for ident in to_write:
if ident in comp_idents:
comp = comp_idents[ident]
if comp.is_block:
if not comp.is_anonymous:
self.write_def_decl(comp, identifiers)
else:
self.write_inline_def(comp, identifiers, nested=True)
else:
if comp.is_root():
self.write_def_decl(comp, identifiers)
else:
self.write_inline_def(comp, identifiers, nested=True)
elif ident in self.compiler.namespaces:
self.printer.writeline(
"%s = _mako_get_namespace(context, %r)" %
(ident, ident)
)
else:
if getattr(self.compiler, 'has_ns_imports', False):
if self.compiler.strict_undefined:
self.printer.writelines(
"%s = _import_ns.get(%r, UNDEFINED)" %
(ident, ident),
"if %s is UNDEFINED:" % ident,
"try:",
"%s = context[%r]" % (ident, ident),
"except KeyError:",
"raise NameError(\"'%s' is not defined\")" %
ident,
None, None
)
else:
self.printer.writeline(
"%s = _import_ns.get(%r, context.get(%r, UNDEFINED))" %
(ident, ident, ident))
else:
if self.compiler.strict_undefined:
self.printer.writelines(
"try:",
"%s = context[%r]" % (ident, ident),
"except KeyError:",
"raise NameError(\"'%s' is not defined\")" %
ident,
None
)
else:
self.printer.writeline(
"%s = context.get(%r, UNDEFINED)" % (ident, ident)
)
self.printer.writeline("__M_writer = context.writer()")
def write_source_comment(self, node):
"""write a source comment containing the line number of the corresponding template line."""
if self.last_source_line != node.lineno:
self.printer.writeline("# SOURCE LINE %d" % node.lineno)
self.last_source_line = node.lineno
def write_def_decl(self, node, identifiers):
"""write a locally-available callable referencing a top-level def"""
funcname = node.funcname
namedecls = node.get_argument_expressions()
nameargs = node.get_argument_expressions(include_defaults=False)
if not self.in_def and (
len(self.identifiers.locally_assigned) > 0 or
len(self.identifiers.argument_declared) > 0):
nameargs.insert(0, 'context.locals_(__M_locals)')
else:
nameargs.insert(0, 'context')
self.printer.writeline("def %s(%s):" % (funcname, ",".join(namedecls)))
self.printer.writeline("return render_%s(%s)" % (funcname, ",".join(nameargs)))
self.printer.writeline(None)
def write_inline_def(self, node, identifiers, nested):
"""write a locally-available def callable inside an enclosing def."""
namedecls = node.get_argument_expressions()
decorator = node.decorator
if decorator:
self.printer.writeline("@runtime._decorate_inline(context, %s)" % decorator)
self.printer.writeline("def %s(%s):" % (node.funcname, ",".join(namedecls)))
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
self.printer.writelines(
"context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writelines(
"context._push_buffer()",
)
identifiers = identifiers.branch(node, nested=nested)
self.write_variable_declares(identifiers)
self.identifier_stack.append(identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, filtered, cached)
self.printer.writeline(None)
if cached:
self.write_cache_decorator(node, node.funcname,
namedecls, False, identifiers,
inline=True, toplevel=False)
def write_def_finish(self, node, buffered, filtered, cached, callstack=True):
"""write the end section of a rendering function, either outermost or inline.
this takes into account if the rendering function was filtered, buffered, etc.
and closes the corresponding try: block if any, and writes code to retrieve
captured content, apply filters, send proper return value."""
if not buffered and not cached and not filtered:
self.printer.writeline("return ''")
if callstack:
self.printer.writelines(
"finally:",
"context.caller_stack._pop_frame()",
None
)
if buffered or filtered or cached:
if buffered or cached:
# in a caching scenario, don't try to get a writer
# from the context after popping; assume the caching
# implemenation might be using a context with no
# extra buffers
self.printer.writelines(
"finally:",
"__M_buf = context._pop_buffer()"
)
else:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()"
)
if callstack:
self.printer.writeline("context.caller_stack._pop_frame()")
s = "__M_buf.getvalue()"
if filtered:
s = self.create_filter_callable(node.filter_args.args, s, False)
self.printer.writeline(None)
if buffered and not cached:
s = self.create_filter_callable(self.compiler.buffer_filters, s, False)
if buffered or cached:
self.printer.writeline("return %s" % s)
else:
self.printer.writelines(
"__M_writer(%s)" % s,
"return ''"
)
def write_cache_decorator(self, node_or_pagetag, name,
args, buffered, identifiers,
inline=False, toplevel=False):
"""write a post-function decorator to replace a rendering
callable with a cached version of itself."""
self.printer.writeline("__M_%s = %s" % (name, name))
cachekey = node_or_pagetag.parsed_attributes.get('cache_key', repr(name))
cache_args = {}
if self.compiler.pagetag is not None:
cache_args.update(
(
pa[6:],
self.compiler.pagetag.parsed_attributes[pa]
)
for pa in self.compiler.pagetag.parsed_attributes
if pa.startswith('cache_') and pa != 'cache_key'
)
cache_args.update(
(
pa[6:],
node_or_pagetag.parsed_attributes[pa]
) for pa in node_or_pagetag.parsed_attributes
if pa.startswith('cache_') and pa != 'cache_key'
)
if 'timeout' in cache_args:
cache_args['timeout'] = int(eval(cache_args['timeout']))
self.printer.writeline("def %s(%s):" % (name, ','.join(args)))
# form "arg1, arg2, arg3=arg3, arg4=arg4", etc.
pass_args = [
'=' in a and "%s=%s" % ((a.split('=')[0],)*2) or a
for a in args
]
self.write_variable_declares(
identifiers,
toplevel=toplevel,
limit=node_or_pagetag.undeclared_identifiers()
)
if buffered:
s = "context.get('local')."\
"cache.get_and_replace(%s, lambda:__M_%s(%s), %s__M_defname=%r)" % \
(cachekey, name, ','.join(pass_args),
''.join(["%s=%s, " % (k,v) for k, v in cache_args.items()]),
name
)
# apply buffer_filters
s = self.create_filter_callable(self.compiler.buffer_filters, s, False)
self.printer.writelines("return " + s,None)
else:
self.printer.writelines(
"__M_writer(context.get('local')."
"cache.get_and_replace(%s, lambda:__M_%s(%s), %s__M_defname=%r))" %
(cachekey, name, ','.join(pass_args),
''.join(["%s=%s, " % (k,v) for k, v in cache_args.items()]),
name,
),
"return ''",
None
)
def create_filter_callable(self, args, target, is_expression):
"""write a filter-applying expression based on the filters
present in the given filter names, adjusting for the global
'default' filter aliases as needed."""
def locate_encode(name):
if re.match(r'decode\..+', name):
return "filters." + name
elif self.compiler.disable_unicode:
return filters.NON_UNICODE_ESCAPES.get(name, name)
else:
return filters.DEFAULT_ESCAPES.get(name, name)
if 'n' not in args:
if is_expression:
if self.compiler.pagetag:
args = self.compiler.pagetag.filter_args.args + args
if self.compiler.default_filters:
args = self.compiler.default_filters + args
for e in args:
# if filter given as a function, get just the identifier portion
if e == 'n':
continue
m = re.match(r'(.+?)(\(.*\))', e)
if m:
(ident, fargs) = m.group(1,2)
f = locate_encode(ident)
e = f + fargs
else:
x = e
e = locate_encode(e)
assert e is not None
target = "%s(%s)" % (e, target)
return target
def visitExpression(self, node):
self.write_source_comment(node)
if len(node.escapes) or \
(
self.compiler.pagetag is not None and
len(self.compiler.pagetag.filter_args.args)
) or \
len(self.compiler.default_filters):
s = self.create_filter_callable(node.escapes_code.args, "%s" % node.text, True)
self.printer.writeline("__M_writer(%s)" % s)
else:
self.printer.writeline("__M_writer(%s)" % node.text)
def visitControlLine(self, node):
if node.isend:
if not node.get_children():
self.printer.writeline("pass")
self.printer.writeline(None)
else:
self.write_source_comment(node)
self.printer.writeline(node.text)
def visitText(self, node):
self.write_source_comment(node)
self.printer.writeline("__M_writer(%s)" % repr(node.content))
def visitTextTag(self, node):
filtered = len(node.filter_args.args) > 0
if filtered:
self.printer.writelines(
"__M_writer = context._push_writer()",
"try:",
)
for n in node.nodes:
n.accept_visitor(self)
if filtered:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()",
"__M_writer(%s)" %
self.create_filter_callable(
node.filter_args.args,
"__M_buf.getvalue()",
False),
None
)
def visitCode(self, node):
if not node.ismodule:
self.write_source_comment(node)
self.printer.write_indented_block(node.text)
if not self.in_def and len(self.identifiers.locally_assigned) > 0:
# if we are the "template" def, fudge locally
# declared/modified variables into the "__M_locals" dictionary,
# which is used for def calls within the same template,
# to simulate "enclosing scope"
self.printer.writeline('__M_locals_builtin_stored = __M_locals_builtin()')
self.printer.writeline(
'__M_locals.update(__M_dict_builtin([(__M_key,'
' __M_locals_builtin_stored[__M_key]) for '
'__M_key in [%s] if __M_key in __M_locals_builtin_stored]))' %
','.join([repr(x) for x in node.declared_identifiers()]))
def visitIncludeTag(self, node):
self.write_source_comment(node)
args = node.attributes.get('args')
if args:
self.printer.writeline(
"runtime._include_file(context, %s, _template_uri, %s)" %
(node.parsed_attributes['file'], args))
else:
self.printer.writeline(
"runtime._include_file(context, %s, _template_uri)" %
(node.parsed_attributes['file']))
def visitNamespaceTag(self, node):
pass
def visitDefTag(self, node):
pass
def visitBlockTag(self, node):
if node.is_anonymous:
self.printer.writeline("%s()" % node.funcname)
else:
nameargs = node.get_argument_expressions(include_defaults=False)
nameargs += ['**pageargs']
self.printer.writeline("if 'parent' not in context._data or "
"not hasattr(context._data['parent'], '%s'):"
% node.funcname)
self.printer.writeline("context['self'].%s(%s)" % (node.funcname, ",".join(nameargs)))
self.printer.writeline("\n")
def visitCallNamespaceTag(self, node):
# TODO: we can put namespace-specific checks here, such
# as ensure the given namespace will be imported,
# pre-import the namespace, etc.
self.visitCallTag(node)
def visitCallTag(self, node):
self.printer.writeline("def ccall(caller):")
export = ['body']
callable_identifiers = self.identifiers.branch(node, nested=True)
body_identifiers = callable_identifiers.branch(node, nested=False)
# we want the 'caller' passed to ccall to be used
# for the body() function, but for other non-body()
# <%def>s within <%call> we want the current caller
# off the call stack (if any)
body_identifiers.add_declared('caller')
self.identifier_stack.append(body_identifiers)
class DefVisitor(object):
def visitDefTag(s, node):
s.visitDefOrBase(node)
def visitBlockTag(s, node):
s.visitDefOrBase(node)
def visitDefOrBase(s, node):
self.write_inline_def(node, callable_identifiers, nested=False)
if not node.is_anonymous:
export.append(node.funcname)
# remove defs that are within the <%call> from the "closuredefs" defined
# in the body, so they dont render twice
if node.funcname in body_identifiers.closuredefs:
del body_identifiers.closuredefs[node.funcname]
vis = DefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.identifier_stack.pop()
bodyargs = node.body_decl.get_argument_expressions()
self.printer.writeline("def body(%s):" % ','.join(bodyargs))
# TODO: figure out best way to specify
# buffering/nonbuffering (at call time would be better)
buffered = False
if buffered:
self.printer.writelines(
"context._push_buffer()",
"try:"
)
self.write_variable_declares(body_identifiers)
self.identifier_stack.append(body_identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, False, False, callstack=False)
self.printer.writelines(
None,
"return [%s]" % (','.join(export)),
None
)
self.printer.writelines(
# get local reference to current caller, if any
"__M_caller = context.caller_stack._get_caller()",
# push on caller for nested call
"context.caller_stack.nextcaller = "
"runtime.Namespace('caller', context, callables=ccall(__M_caller))",
"try:")
self.write_source_comment(node)
self.printer.writelines(
"__M_writer(%s)" % self.create_filter_callable([], node.expression, True),
"finally:",
"context.caller_stack.nextcaller = None",
None
)
class _Identifiers(object):
"""tracks the status of identifier names as template code is rendered."""
def __init__(self, node=None, parent=None, nested=False):
if parent is not None:
# if we are the branch created in write_namespaces(),
# we don't share any context from the main body().
if isinstance(node, parsetree.NamespaceTag):
self.declared = set()
self.topleveldefs = util.SetLikeDict()
else:
# things that have already been declared
# in an enclosing namespace (i.e. names we can just use)
self.declared = set(parent.declared).\
union([c.name for c in parent.closuredefs.values()]).\
union(parent.locally_declared).\
union(parent.argument_declared)
# if these identifiers correspond to a "nested"
# scope, it means whatever the parent identifiers
# had as undeclared will have been declared by that parent,
# and therefore we have them in our scope.
if nested:
self.declared = self.declared.union(parent.undeclared)
# top level defs that are available
self.topleveldefs = util.SetLikeDict(**parent.topleveldefs)
else:
self.declared = set()
self.topleveldefs = util.SetLikeDict()
# things within this level that are referenced before they
# are declared (e.g. assigned to)
self.undeclared = set()
# things that are declared locally. some of these things
# could be in the "undeclared" list as well if they are
# referenced before declared
self.locally_declared = set()
# assignments made in explicit python blocks.
# these will be propagated to
# the context of local def calls.
self.locally_assigned = set()
# things that are declared in the argument
# signature of the def callable
self.argument_declared = set()
# closure defs that are defined in this level
self.closuredefs = util.SetLikeDict()
self.node = node
if node is not None:
node.accept_visitor(self)
def branch(self, node, **kwargs):
"""create a new Identifiers for a new Node, with
this Identifiers as the parent."""
return _Identifiers(node, self, **kwargs)
@property
def defs(self):
return set(self.topleveldefs.union(self.closuredefs).values())
def __repr__(self):
return "Identifiers(declared=%r, locally_declared=%r, "\
"undeclared=%r, topleveldefs=%r, closuredefs=%r, argumentdeclared=%r)" %\
(
list(self.declared),
list(self.locally_declared),
list(self.undeclared),
[c.name for c in self.topleveldefs.values()],
[c.name for c in self.closuredefs.values()],
self.argument_declared)
def check_declared(self, node):
"""update the state of this Identifiers with the undeclared
and declared identifiers of the given node."""
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.locally_declared.add(ident)
def add_declared(self, ident):
self.declared.add(ident)
if ident in self.undeclared:
self.undeclared.remove(ident)
def visitExpression(self, node):
self.check_declared(node)
def visitControlLine(self, node):
self.check_declared(node)
def visitCode(self, node):
if not node.ismodule:
self.check_declared(node)
self.locally_assigned = self.locally_assigned.union(node.declared_identifiers())
def visitNamespaceTag(self, node):
# only traverse into the sub-elements of a
# <%namespace> tag if we are the branch created in
# write_namespaces()
if self.node is node:
for n in node.nodes:
n.accept_visitor(self)
def _check_name_exists(self, collection, node):
existing = collection.get(node.funcname)
collection[node.funcname] = node
if existing is not None and \
existing is not node and \
(node.is_block or existing.is_block):
raise exceptions.CompileException(
"%%def or %%block named '%s' already "
"exists in this template." %
node.funcname, **node.exception_kwargs)
def visitDefTag(self, node):
if node.is_root() and not node.is_anonymous:
self._check_name_exists(self.topleveldefs, node)
elif node is not self.node:
self._check_name_exists(self.closuredefs, node)
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
# visit defs only one level deep
if node is self.node:
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
def visitBlockTag(self, node):
if node is not self.node and \
not node.is_anonymous:
if isinstance(self.node, parsetree.DefTag):
raise exceptions.CompileException(
"Named block '%s' not allowed inside of def '%s'"
% (node.name, self.node.name), **node.exception_kwargs)
elif isinstance(self.node, (parsetree.CallTag, parsetree.CallNamespaceTag)):
raise exceptions.CompileException(
"Named block '%s' not allowed inside of <%%call> tag"
% (node.name, ), **node.exception_kwargs)
if not node.is_anonymous:
self._check_name_exists(self.topleveldefs, node)
self.undeclared.add(node.funcname)
elif node is not self.node:
self._check_name_exists(self.closuredefs, node)
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
def visitIncludeTag(self, node):
self.check_declared(node)
def visitPageTag(self, node):
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
self.check_declared(node)
def visitCallNamespaceTag(self, node):
self.visitCallTag(node)
def visitCallTag(self, node):
if node is self.node:
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
else:
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
|
|
# coding: utf-8
from sqlalchemy import all_
from sqlalchemy import and_
from sqlalchemy import any_
from sqlalchemy import Boolean
from sqlalchemy import cast
from sqlalchemy import Column
from sqlalchemy import false
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import or_
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import true
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
class IdiosyncrasyTest(fixtures.TestBase):
__only_on__ = "mysql", "mariadb"
__backend__ = True
@testing.emits_warning()
def test_is_boolean_symbols_despite_no_native(self, connection):
is_(
connection.scalar(select(cast(true().is_(true()), Boolean))),
True,
)
is_(
connection.scalar(select(cast(true().is_not(true()), Boolean))),
False,
)
is_(
connection.scalar(select(cast(false().is_(false()), Boolean))),
True,
)
class MatchTest(fixtures.TablesTest):
__only_on__ = "mysql", "mariadb"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"cattable",
metadata,
Column("id", Integer, primary_key=True),
Column("description", String(50)),
mysql_engine="MyISAM",
mariadb_engine="MyISAM",
)
Table(
"matchtable",
metadata,
Column("id", Integer, primary_key=True),
Column("title", String(200)),
Column("category_id", Integer, ForeignKey("cattable.id")),
mysql_engine="MyISAM",
mariadb_engine="MyISAM",
)
@classmethod
def insert_data(cls, connection):
cattable, matchtable = cls.tables("cattable", "matchtable")
connection.execute(
cattable.insert(),
[
{"id": 1, "description": "Python"},
{"id": 2, "description": "Ruby"},
],
)
connection.execute(
matchtable.insert(),
[
{
"id": 1,
"title": "Agile Web Development with Ruby On Rails",
"category_id": 2,
},
{"id": 2, "title": "Dive Into Python", "category_id": 1},
{
"id": 3,
"title": "Programming Matz's Ruby",
"category_id": 2,
},
{
"id": 4,
"title": "The Definitive Guide to Django",
"category_id": 1,
},
{"id": 5, "title": "Python in a Nutshell", "category_id": 1},
],
)
def test_simple_match(self, connection):
matchtable = self.tables.matchtable
results = connection.execute(
matchtable.select()
.where(matchtable.c.title.match("python"))
.order_by(matchtable.c.id)
).fetchall()
eq_([2, 5], [r.id for r in results])
def test_not_match(self, connection):
matchtable = self.tables.matchtable
results = connection.execute(
matchtable.select()
.where(~matchtable.c.title.match("python"))
.order_by(matchtable.c.id)
)
eq_([1, 3, 4], [r.id for r in results])
def test_simple_match_with_apostrophe(self, connection):
matchtable = self.tables.matchtable
results = connection.execute(
matchtable.select().where(matchtable.c.title.match("Matz's"))
).fetchall()
eq_([3], [r.id for r in results])
def test_return_value(self, connection):
matchtable = self.tables.matchtable
# test [ticket:3263]
result = connection.execute(
select(
matchtable.c.title.match("Agile Ruby Programming").label(
"ruby"
),
matchtable.c.title.match("Dive Python").label("python"),
matchtable.c.title,
).order_by(matchtable.c.id)
).fetchall()
eq_(
result,
[
(2.0, 0.0, "Agile Web Development with Ruby On Rails"),
(0.0, 2.0, "Dive Into Python"),
(2.0, 0.0, "Programming Matz's Ruby"),
(0.0, 0.0, "The Definitive Guide to Django"),
(0.0, 1.0, "Python in a Nutshell"),
],
)
def test_or_match(self, connection):
matchtable = self.tables.matchtable
results1 = connection.execute(
matchtable.select()
.where(
or_(
matchtable.c.title.match("nutshell"),
matchtable.c.title.match("ruby"),
)
)
.order_by(matchtable.c.id)
).fetchall()
eq_([1, 3, 5], [r.id for r in results1])
results2 = connection.execute(
matchtable.select()
.where(matchtable.c.title.match("nutshell ruby"))
.order_by(matchtable.c.id)
).fetchall()
eq_([1, 3, 5], [r.id for r in results2])
def test_and_match(self, connection):
matchtable = self.tables.matchtable
results1 = connection.execute(
matchtable.select().where(
and_(
matchtable.c.title.match("python"),
matchtable.c.title.match("nutshell"),
)
)
).fetchall()
eq_([5], [r.id for r in results1])
results2 = connection.execute(
matchtable.select().where(
matchtable.c.title.match("+python +nutshell")
)
).fetchall()
eq_([5], [r.id for r in results2])
def test_match_across_joins(self, connection):
matchtable = self.tables.matchtable
cattable = self.tables.cattable
results = connection.execute(
matchtable.select()
.where(
and_(
cattable.c.id == matchtable.c.category_id,
or_(
cattable.c.description.match("Ruby"),
matchtable.c.title.match("nutshell"),
),
)
)
.order_by(matchtable.c.id)
).fetchall()
eq_([1, 3, 5], [r.id for r in results])
class AnyAllTest(fixtures.TablesTest):
__only_on__ = "mysql", "mariadb"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"stuff",
metadata,
Column("id", Integer, primary_key=True),
Column("value", Integer),
)
@classmethod
def insert_data(cls, connection):
stuff = cls.tables.stuff
connection.execute(
stuff.insert(),
[
{"id": 1, "value": 1},
{"id": 2, "value": 2},
{"id": 3, "value": 3},
{"id": 4, "value": 4},
{"id": 5, "value": 5},
],
)
def test_any_w_comparator(self, connection):
stuff = self.tables.stuff
stmt = select(stuff.c.id).where(
stuff.c.value > any_(select(stuff.c.value).scalar_subquery())
)
eq_(connection.execute(stmt).fetchall(), [(2,), (3,), (4,), (5,)])
def test_all_w_comparator(self, connection):
stuff = self.tables.stuff
stmt = select(stuff.c.id).where(
stuff.c.value >= all_(select(stuff.c.value).scalar_subquery())
)
eq_(connection.execute(stmt).fetchall(), [(5,)])
def test_any_literal(self, connection):
stuff = self.tables.stuff
stmt = select(4 == any_(select(stuff.c.value).scalar_subquery()))
is_(connection.execute(stmt).scalar(), True)
|
|
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects for rights for various user actions."""
from __future__ import annotations
from core import feconf
from core import utils
from core.constants import constants
from core.domain import change_domain
from typing import List, Optional
from typing_extensions import TypedDict
from core.domain import user_services # pylint: disable=invalid-import-from # isort:skip
# TODO(#14537): Refactor this file and remove imports marked
# with 'invalid-import-from'.
# IMPORTANT: Ensure that all changes to how these cmds are interpreted preserve
# backward-compatibility with previous exploration snapshots in the datastore.
# Do not modify the definitions of CMD keys that already exist.
CMD_CREATE_NEW = feconf.CMD_CREATE_NEW
CMD_CHANGE_ROLE = feconf.CMD_CHANGE_ROLE
CMD_REMOVE_ROLE = feconf.CMD_REMOVE_ROLE
CMD_CHANGE_EXPLORATION_STATUS = feconf.CMD_CHANGE_EXPLORATION_STATUS
CMD_CHANGE_COLLECTION_STATUS = feconf.CMD_CHANGE_COLLECTION_STATUS
CMD_CHANGE_PRIVATE_VIEWABILITY = feconf.CMD_CHANGE_PRIVATE_VIEWABILITY
CMD_RELEASE_OWNERSHIP = feconf.CMD_RELEASE_OWNERSHIP
CMD_UPDATE_FIRST_PUBLISHED_MSEC = feconf.CMD_UPDATE_FIRST_PUBLISHED_MSEC
ACTIVITY_STATUS_PRIVATE = constants.ACTIVITY_STATUS_PRIVATE
ACTIVITY_STATUS_PUBLIC = constants.ACTIVITY_STATUS_PUBLIC
ROLE_OWNER = feconf.ROLE_OWNER
ROLE_EDITOR = feconf.ROLE_EDITOR
ROLE_VOICE_ARTIST = feconf.ROLE_VOICE_ARTIST
ROLE_VIEWER = feconf.ROLE_VIEWER
ROLE_NONE = feconf.ROLE_NONE
ASSIGN_ROLE_COMMIT_MESSAGE_TEMPLATE = 'Changed role of %s from %s to %s'
ASSIGN_ROLE_COMMIT_MESSAGE_REGEX = '^Changed role of (.*) from (.*) to (.*)$'
DEASSIGN_ROLE_COMMIT_MESSAGE_TEMPLATE = 'Remove %s from role %s'
DEASSIGN_ROLE_COMMIT_MESSAGE_REGEX = '^Remove (.*) from role (.*)$'
class ActivityRightsDict(TypedDict):
"""A dict version of ActivityRights suitable for use by the frontend."""
cloned_from: Optional[str]
status: str
community_owned: bool
owner_names: List[str]
editor_names: List[str]
voice_artist_names: List[str]
viewer_names: List[str]
viewable_if_private: bool
class ActivityRights:
"""Domain object for the rights/publication status of an activity (an
exploration or a collection).
"""
def __init__(
self,
exploration_id: str,
owner_ids: List[str],
editor_ids: List[str],
voice_artist_ids: List[str],
viewer_ids: List[str],
community_owned: bool = False,
cloned_from: Optional[str] = None,
status: str = ACTIVITY_STATUS_PRIVATE,
viewable_if_private: bool = False,
first_published_msec: Optional[str] = None
) -> None:
self.id = exploration_id
self.owner_ids = owner_ids
self.editor_ids = editor_ids
self.voice_artist_ids = voice_artist_ids
self.viewer_ids = viewer_ids
self.community_owned = community_owned
self.cloned_from = cloned_from
self.status = status
self.viewable_if_private = viewable_if_private
self.first_published_msec = first_published_msec
def validate(self) -> None:
"""Validates an ActivityRights object.
Raises:
utils.ValidationError. If any of the owners, editors, voice artists
and viewers lists overlap, or if a community-owned exploration
has owners, editors, voice artists or viewers specified.
"""
if self.community_owned:
if (self.owner_ids or self.editor_ids or self.voice_artist_ids or
self.viewer_ids):
raise utils.ValidationError(
'Community-owned explorations should have no owners, '
'editors, voice artists or viewers specified.')
if self.community_owned and self.status == ACTIVITY_STATUS_PRIVATE:
raise utils.ValidationError(
'Community-owned explorations cannot be private.')
if self.status != ACTIVITY_STATUS_PRIVATE and self.viewer_ids:
raise utils.ValidationError(
'Public explorations should have no viewers specified.')
owner_editor = set(self.owner_ids) & set(self.editor_ids)
owner_voice_artist = set(self.owner_ids) & set(self.voice_artist_ids)
owner_viewer = set(self.owner_ids) & set(self.viewer_ids)
editor_voice_artist = set(self.editor_ids) & set(self.voice_artist_ids)
editor_viewer = set(self.editor_ids) & set(self.viewer_ids)
voice_artist_viewer = set(self.voice_artist_ids) & set(self.viewer_ids)
if owner_editor:
raise utils.ValidationError(
'A user cannot be both an owner and an editor: %s' %
owner_editor)
if owner_voice_artist:
raise utils.ValidationError(
'A user cannot be both an owner and a voice artist: %s' %
owner_voice_artist)
if owner_viewer:
raise utils.ValidationError(
'A user cannot be both an owner and a viewer: %s' %
owner_viewer)
if editor_voice_artist:
raise utils.ValidationError(
'A user cannot be both an editor and a voice artist: %s' %
editor_voice_artist)
if editor_viewer:
raise utils.ValidationError(
'A user cannot be both an editor and a viewer: %s' %
editor_viewer)
if voice_artist_viewer:
raise utils.ValidationError(
'A user cannot be both a voice artist and a viewer: %s' %
voice_artist_viewer)
if not self.community_owned and len(self.owner_ids) == 0:
raise utils.ValidationError(
'Activity should have atleast one owner.')
def to_dict(self) -> ActivityRightsDict:
"""Returns a dict suitable for use by the frontend.
Returns:
dict. A dict version of ActivityRights suitable for use by the
frontend.
"""
if self.community_owned:
return {
'cloned_from': self.cloned_from,
'status': self.status,
'community_owned': True,
'owner_names': [],
'editor_names': [],
'voice_artist_names': [],
'viewer_names': [],
'viewable_if_private': self.viewable_if_private,
}
else:
return {
'cloned_from': self.cloned_from,
'status': self.status,
'community_owned': False,
'owner_names': user_services.get_human_readable_user_ids(# type: ignore[no-untyped-call]
self.owner_ids),
'editor_names': user_services.get_human_readable_user_ids(# type: ignore[no-untyped-call]
self.editor_ids),
'voice_artist_names': user_services.get_human_readable_user_ids(# type: ignore[no-untyped-call]
self.voice_artist_ids),
'viewer_names': user_services.get_human_readable_user_ids(# type: ignore[no-untyped-call]
self.viewer_ids),
'viewable_if_private': self.viewable_if_private,
}
def is_owner(self, user_id: str) -> bool:
"""Checks whether given user is owner of activity.
Args:
user_id: str or None. Id of the user.
Returns:
bool. Whether user is an activity owner.
"""
return bool(user_id in self.owner_ids)
def is_editor(self, user_id: str) -> bool:
"""Checks whether given user is editor of activity.
Args:
user_id: str or None. Id of the user.
Returns:
bool. Whether user is an activity editor.
"""
return bool(user_id in self.editor_ids)
def is_voice_artist(self, user_id: str) -> bool:
"""Checks whether given user is voice artist of activity.
Args:
user_id: str or None. Id of the user.
Returns:
bool. Whether user is an activity voice artist.
"""
return bool(user_id in self.voice_artist_ids)
def is_viewer(self, user_id: str) -> bool:
"""Checks whether given user is viewer of activity.
Args:
user_id: str or None. Id of the user.
Returns:
bool. Whether user is an activity viewer.
"""
return bool(user_id in self.viewer_ids)
def is_published(self) -> bool:
"""Checks whether activity is published.
Returns:
bool. Whether activity is published.
"""
return bool(self.status == ACTIVITY_STATUS_PUBLIC)
def is_private(self) -> bool:
"""Checks whether activity is private.
Returns:
bool. Whether activity is private.
"""
return bool(self.status == ACTIVITY_STATUS_PRIVATE)
def is_solely_owned_by_user(self, user_id: str) -> bool:
"""Checks whether the activity is solely owned by the user.
Args:
user_id: str. The id of the user.
Returns:
bool. Whether the activity is solely owned by the user.
"""
return user_id in self.owner_ids and len(self.owner_ids) == 1
def assign_new_role(self, user_id: str, new_role: str) -> str:
"""Assigns new role to user and removes previous role if present.
Args:
user_id: str. The ID of the user.
new_role: str. The role of the user.
Returns:
str. The previous role of the user.
Raises:
Exception. If previous role is assigned again.
"""
old_role = ROLE_NONE
if new_role == ROLE_VIEWER:
if self.status != ACTIVITY_STATUS_PRIVATE:
raise Exception(
'Public explorations can be viewed by anyone.')
for role, user_ids in zip(
[ROLE_OWNER, ROLE_EDITOR, ROLE_VIEWER, ROLE_VOICE_ARTIST],
[self.owner_ids, self.editor_ids, self.viewer_ids,
self.voice_artist_ids]):
if user_id in user_ids:
user_ids.remove(user_id)
old_role = role
if new_role == role and old_role != new_role:
user_ids.append(user_id)
if old_role == new_role:
if old_role == ROLE_OWNER:
raise Exception(
'This user already owns this exploration.')
if old_role == ROLE_EDITOR:
raise Exception(
'This user already can edit this exploration.')
if old_role == ROLE_VOICE_ARTIST:
raise Exception(
'This user already can voiceover this exploration.')
if old_role == ROLE_VIEWER:
raise Exception(
'This user already can view this exploration.')
return old_role
class ExplorationRightsChange(change_domain.BaseChange):
"""Domain object class for an exploration rights change.
The allowed commands, together with the attributes:
- 'create_new'
- 'change_role' (with assignee_id, old_role, new_role)
- 'change_exploration_status' (with old_status, new_status)
- 'change_private_viewability' (with
old_viewable_if_private, new_viewable_if_private)
- 'release_ownership'
- 'update_first_published_msec' (with
old_first_published_msec, new_first_published_msec)
A role must be one of the ALLOWED_ROLES.
A status must be one of the ALLOWED_STATUS.
"""
ALLOWED_COMMANDS = feconf.EXPLORATION_RIGHTS_CHANGE_ALLOWED_COMMANDS
class CollectionRightsChange(change_domain.BaseChange):
"""Domain object class for an collection rights change.
The allowed commands, together with the attributes:
- 'create_new'
- 'change_role' (with assignee_id, old_role, new_role)
- 'change_collection_status' (with old_status, new_status)
- 'change_private_viewability' (with
old_viewable_if_private, new_viewable_if_private)
- 'release_ownership'
- 'update_first_published_msec' (with
old_first_published_msec, new_first_published_msec)
A role must be one of the ALLOWED_ROLES.
A status must be one of the ALLOWED_STATUS.
"""
ALLOWED_COMMANDS = feconf.COLLECTION_RIGHTS_CHANGE_ALLOWED_COMMANDS
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import unittest
from datetime import datetime
from unittest import TestCase
from mock import patch, Mock
from click.testing import CliRunner
from statuspage import cli, update, create, iter_systems, get_severity, SYSTEM_LABEL_COLOR
from github import UnknownObjectException
import codecs
class CLITestCase(TestCase):
def setUp(self):
self.patcher = patch('statuspage.Github')
self.gh = self.patcher.start()
# setup mocked label
self.label = Mock()
self.label.color = "171717"
self.label.name = "Website"
self.label1 = Mock()
self.label1.color = "171717"
self.label1.name = "API"
self.gh().get_user().get_repo().get_labels.return_value = [self.label, self.label1]
# set up mocked issue
self.issue = Mock()
self.issue.created_at = datetime.now()
self.issue.state = "open"
self.issue_label = Mock()
self.issue_label.color = "FF4D4D"
self.issue_label.name = "major outage"
self.issue.get_labels.return_value = [self.issue_label, self.label]
self.issue.user.login = "some-dude"
self.comment = Mock()
self.comment.user.login = "some-dude"
self.issue.get_comments.return_value = [self.comment, ]
self.issue1 = Mock()
self.issue1.created_at = datetime.now()
self.issue1.state = "open"
self.issue1.user.login = "some-dude"
self.issue1.get_labels.return_value = [self.issue_label, self.label1]
self.issue1.get_comments.return_value = [self.comment, ]
self.gh().get_user().get_repo().get_issues.return_value = [self.issue, self.issue1]
self.template = Mock()
self.template.decoded_content = b"some foo"
self.template.content = codecs.encode(b"some other foo", "base64")
self.gh().get_user().get_repo().get_file_contents.return_value = self.template
self.gh().get_organization().get_repo().get_file_contents.return_value = self.template
self.collaborator = Mock()
self.collaborator.login = "some-dude"
self.gh().get_user().get_repo().get_collaborators.return_value = [self.collaborator,]
self.gh().get_organization().get_repo().get_collaborators.return_value = [self.collaborator,]
def tearDown(self):
self.patcher.stop()
@patch("statuspage.run_update")
def test_create(self, run_update):
label = Mock()
self.gh().get_user().create_repo().get_labels.return_value = [label,]
runner = CliRunner()
result = runner.invoke(
create,
["--name", "testrepo", "--token", "token", "--systems", "sys1,sys2"]
)
self.assertEqual(result.exit_code, 0)
self.gh.assert_called_with("token")
@patch("statuspage.run_update")
def test_create_org(self, run_update):
runner = CliRunner()
result = runner.invoke(
create,
["--name", "testrepo",
"--token", "token",
"--systems", "sys1,sys2",
"--org", "some"]
)
self.assertEqual(result.exit_code, 0)
self.gh.assert_called_with("token")
self.gh().get_organization.assert_called_with("some")
def test_update(self):
runner = CliRunner()
result = runner.invoke(update, ["--name", "testrepo", "--token", "token"])
self.assertEqual(result.exit_code, 0)
self.gh.assert_called_with("token")
self.gh().get_user().get_repo.assert_called_with(name="testrepo")
self.gh().get_user().get_repo().get_labels.assert_called_once_with()
def test_dont_update_when_nothing_changes(self):
runner = CliRunner()
self.template.content = codecs.encode(b"some foo", "base64")
result = runner.invoke(update, ["--name", "testrepo", "--token", "token"])
self.assertEqual(result.exit_code, 0)
self.gh.assert_called_with("token")
self.gh().get_user().get_repo.assert_called_with(name="testrepo")
self.gh().get_user().get_repo().get_labels.assert_called_once_with()
self.gh().get_user().get_repo().update_file.assert_not_called()
def test_update_org(self):
runner = CliRunner()
result = runner.invoke(update, ["--name", "testrepo", "--token", "token", "--org", "some"])
self.assertEqual(result.exit_code, 0)
self.gh.assert_called_with("token")
self.gh().get_organization().get_repo.assert_called_with(name="testrepo")
self.gh().get_organization().get_repo().get_labels.assert_called_once_with()
def test_update_index_does_not_exist(self):
self.gh().get_user().get_repo().update_file.side_effect = UnknownObjectException(status=404, data="foo")
runner = CliRunner()
result = runner.invoke(update, ["--name", "testrepo", "--token", "token"])
self.assertEqual(result.exit_code, 0)
self.gh.assert_called_with("token")
self.gh().get_user().get_repo.assert_called_with(name="testrepo")
self.gh().get_user().get_repo().get_labels.assert_called_once_with()
self.gh().get_user().get_repo().create_file.assert_called_once_with(
branch='gh-pages',
content='some foo',
message='initial',
path='/index.html'
)
def test_update_non_labeled_issue_not_displayed(self):
self.issue.get_labels.return_value = []
runner = CliRunner()
result = runner.invoke(update, ["--name", "testrepo", "--token", "token"])
self.assertEqual(result.exit_code, 0)
# make sure that get_comments is not called for the first issue but for the second
self.issue.get_comments.assert_not_called()
self.issue1.get_comments.assert_called_once_with()
def test_update_non_colaborator_issue_not_displayed(self):
self.issue.user.login = "some-other-dude"
runner = CliRunner()
result = runner.invoke(update, ["--name", "testrepo", "--token", "token"])
self.assertEqual(result.exit_code, 0)
# make sure that get_comments is not called for the first issue but for the second
self.issue.get_comments.assert_not_called()
self.issue1.get_comments.assert_called_once_with()
class UtilTestCase(TestCase):
def test_iter_systems(self):
label1 = Mock()
label2 = Mock()
label1.name = "website"
label1.color = SYSTEM_LABEL_COLOR
self.assertEqual(
list(iter_systems([label1, label2])),
["website", ]
)
self.assertEqual(
list(iter_systems([label2])),
[]
)
def test_severity(self):
label1 = Mock()
label2 = Mock()
label1.color = "FF4D4D"
self.assertEqual(
get_severity([label1, label2]),
"major outage"
)
label1.color = "000000"
self.assertEqual(
get_severity([label1, label2]),
None
)
if __name__ == '__main__':
unittest.main()
|
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import os
from typing import Callable, Union
from pyflink.common import typeinfo, ExecutionConfig
from pyflink.common.typeinfo import RowTypeInfo, PickledBytesTypeInfo, Types
from pyflink.common.typeinfo import TypeInformation
from pyflink.datastream.functions import _get_python_env, FlatMapFunctionWrapper, FlatMapFunction, \
MapFunction, MapFunctionWrapper, Function, FunctionWrapper, SinkFunction, FilterFunction, \
FilterFunctionWrapper, KeySelectorFunctionWrapper, KeySelector, ReduceFunction, \
ReduceFunctionWrapper, CoMapFunction, CoFlatMapFunction, Partitioner, \
PartitionerFunctionWrapper
from pyflink.java_gateway import get_gateway
class DataStream(object):
"""
A DataStream represents a stream of elements of the same type. A DataStream can be transformed
into another DataStream by applying a transformation as for example:
::
>>> DataStream.map(MapFunctionImpl())
>>> DataStream.filter(FilterFunctionImpl())
"""
def __init__(self, j_data_stream):
self._j_data_stream = j_data_stream
def get_name(self) -> str:
"""
Gets the name of the current data stream. This name is used by the visualization and logging
during runtime.
:return: Name of the stream.
"""
return self._j_data_stream.getName()
def name(self, name: str):
"""
Sets the name of the current data stream. This name is used by the visualization and logging
during runting.
:param name: Name of the stream.
:return: The named operator.
"""
self._j_data_stream.name(name)
return self
def uid(self, uid: str):
"""
Sets an ID for this operator. The specified ID is used to assign the same operator ID across
job submissions (for example when starting a job from a savepoint).
Important: this ID needs to be unique per transformation and job. Otherwise, job submission
will fail.
:param uid: The unique user-specified ID of this transformation.
:return: The operator with the specified ID.
"""
self._j_data_stream.uid(uid)
return self
def set_uid_hash(self, uid_hash: str):
"""
Sets an user provided hash for this operator. This will be used AS IS the create the
JobVertexID. The user provided hash is an alternative to the generated hashed, that is
considered when identifying an operator through the default hash mechanics fails (e.g.
because of changes between Flink versions).
Important: this should be used as a workaround or for trouble shooting. The provided hash
needs to be unique per transformation and job. Otherwise, job submission will fail.
Furthermore, you cannot assign user-specified hash to intermediate nodes in an operator
chain and trying so will let your job fail.
A use case for this is in migration between Flink versions or changing the jobs in a way
that changes the automatically generated hashes. In this case, providing the previous hashes
directly through this method (e.g. obtained from old logs) can help to reestablish a lost
mapping from states to their target operator.
:param uid_hash: The user provided hash for this operator. This will become the jobVertexID,
which is shown in the logs and web ui.
:return: The operator with the user provided hash.
"""
self._j_data_stream.setUidHash(uid_hash)
return self
def set_parallelism(self, parallelism: int):
"""
Sets the parallelism for this operator.
:param parallelism: THe parallelism for this operator.
:return: The operator with set parallelism.
"""
self._j_data_stream.setParallelism(parallelism)
return self
def set_max_parallelism(self, max_parallelism: int):
"""
Sets the maximum parallelism of this operator.
The maximum parallelism specifies the upper bound for dynamic scaling. It also defines the
number of key groups used for partitioned state.
:param max_parallelism: Maximum parallelism.
:return: The operator with set maximum parallelism.
"""
self._j_data_stream.setMaxParallelism(max_parallelism)
return self
def get_type(self) -> TypeInformation:
"""
Gets the type of the stream.
:return: The type of the DataStream.
"""
return typeinfo._from_java_type(self._j_data_stream.getType())
def get_execution_environment(self):
"""
Returns the StreamExecutionEnvironment that was used to create this DataStream.
:return: The Execution Environment.
"""
from pyflink.datastream import StreamExecutionEnvironment
return StreamExecutionEnvironment(
j_stream_execution_environment=self._j_data_stream.getExecutionEnvironment())
def get_execution_config(self) -> ExecutionConfig:
return ExecutionConfig(j_execution_config=self._j_data_stream.getExecutionConfig())
def force_non_parallel(self):
"""
Sets the parallelism and maximum parallelism of this operator to one. And mark this operator
cannot set a non-1 degree of parallelism.
:return: The operator with only one parallelism.
"""
self._j_data_stream.forceNonParallel()
return self
def set_buffer_timeout(self, timeout_millis: int):
"""
Sets the buffering timeout for data produced by this operation. The timeout defines how long
data may linger ina partially full buffer before being sent over the network.
Lower timeouts lead to lower tail latencies, but may affect throughput. Timeouts of 1 ms
still sustain high throughput, even for jobs with high parallelism.
A value of '-1' means that the default buffer timeout should be used. A value of '0'
indicates that no buffering should happen, and all records/events should be immediately sent
through the network, without additional buffering.
:param timeout_millis: The maximum time between two output flushes.
:return: The operator with buffer timeout set.
"""
self._j_data_stream.setBufferTimeout(timeout_millis)
return self
def start_new_chain(self) -> 'DataStream':
"""
Starts a new task chain beginning at this operator. This operator will be chained (thread
co-located for increased performance) to any previous tasks even if possible.
:return: The operator with chaining set.
"""
self._j_data_stream.startNewChain()
return self
def disable_chaining(self) -> 'DataStream':
"""
Turns off chaining for this operator so thread co-location will not be used as an
optimization.
Chaining can be turned off for the whole job by
StreamExecutionEnvironment.disableOperatorChaining() however it is not advised for
performance consideration.
:return: The operator with chaining disabled.
"""
self._j_data_stream.disableChaining()
return self
def slot_sharing_group(self, slot_sharing_group: str) -> 'DataStream':
"""
Sets the slot sharing group of this operation. Parallel instances of operations that are in
the same slot sharing group will be co-located in the same TaskManager slot, if possible.
Operations inherit the slot sharing group of input operations if all input operations are in
the same slot sharing group and no slot sharing group was explicitly specified.
Initially an operation is in the default slot sharing group. An operation can be put into
the default group explicitly by setting the slot sharing group to 'default'.
:param slot_sharing_group: The slot sharing group name.
:return: This operator.
"""
self._j_data_stream.slotSharingGroup(slot_sharing_group)
return self
def map(self, func: Union[Callable, MapFunction], output_type: TypeInformation = None) \
-> 'DataStream':
"""
Applies a Map transformation on a DataStream. The transformation calls a MapFunction for
each element of the DataStream. Each MapFunction call returns exactly one element. The user
can also extend RichMapFunction to gain access to other features provided by the
RichFunction interface.
Note that If user does not specify the output data type, the output data will be serialized
as pickle primitive byte array.
:param func: The MapFunction that is called for each element of the DataStream.
:param output_type: The type information of the MapFunction output data.
:return: The transformed DataStream.
"""
if not isinstance(func, MapFunction):
if callable(func):
func = MapFunctionWrapper(func)
else:
raise TypeError("The input must be a MapFunction or a callable function")
func_name = str(func)
from pyflink.fn_execution import flink_fn_execution_pb2
j_python_data_stream_scalar_function_operator, j_output_type_info = \
self._get_java_python_function_operator(func,
output_type,
func_name,
flink_fn_execution_pb2
.UserDefinedDataStreamFunction.MAP)
return DataStream(self._j_data_stream.transform(
"Map",
j_output_type_info,
j_python_data_stream_scalar_function_operator
))
def flat_map(self, func: Union[Callable, FlatMapFunction],
result_type: TypeInformation = None) -> 'DataStream':
"""
Applies a FlatMap transformation on a DataStream. The transformation calls a FlatMapFunction
for each element of the DataStream. Each FlatMapFunction call can return any number of
elements including none. The user can also extend RichFlatMapFunction to gain access to
other features provided by the RichFUnction.
:param func: The FlatMapFunction that is called for each element of the DataStream.
:param result_type: The type information of output data.
:return: The transformed DataStream.
"""
if not isinstance(func, FlatMapFunction):
if callable(func):
func = FlatMapFunctionWrapper(func)
else:
raise TypeError("The input must be a FlatMapFunction or a callable function")
func_name = str(func)
from pyflink.fn_execution import flink_fn_execution_pb2
j_python_data_stream_scalar_function_operator, j_output_type_info = \
self._get_java_python_function_operator(func,
result_type,
func_name,
flink_fn_execution_pb2
.UserDefinedDataStreamFunction.FLAT_MAP)
return DataStream(self._j_data_stream.transform(
"FLAT_MAP",
j_output_type_info,
j_python_data_stream_scalar_function_operator
))
def key_by(self, key_selector: Union[Callable, KeySelector],
key_type_info: TypeInformation = None) -> 'KeyedStream':
"""
Creates a new KeyedStream that uses the provided key for partitioning its operator states.
:param key_selector: The KeySelector to be used for extracting the key for partitioning.
:param key_type_info: The type information describing the key type.
:return: The DataStream with partitioned state(i.e. KeyedStream).
"""
if callable(key_selector):
key_selector = KeySelectorFunctionWrapper(key_selector)
if not isinstance(key_selector, (KeySelector, KeySelectorFunctionWrapper)):
raise TypeError("Parameter key_selector should be a type of KeySelector.")
gateway = get_gateway()
PickledKeySelector = gateway.jvm \
.org.apache.flink.datastream.runtime.functions.python.PickledKeySelector
j_output_type_info = self._j_data_stream.getTransformation().getOutputType()
output_type_info = typeinfo._from_java_type(j_output_type_info)
is_key_pickled_byte_array = False
if key_type_info is None:
key_type_info = Types.PICKLED_BYTE_ARRAY()
is_key_pickled_byte_array = True
intermediate_map_stream = self.map(lambda x: (key_selector.get_key(x), x),
output_type=Types.ROW([key_type_info, output_type_info]))
intermediate_map_stream.name(gateway.jvm.org.apache.flink.python.util.PythonConfigUtil
.STREAM_KEY_BY_MAP_OPERATOR_NAME)
generated_key_stream = KeyedStream(intermediate_map_stream._j_data_stream
.keyBy(PickledKeySelector(is_key_pickled_byte_array),
key_type_info.get_java_type_info()), self)
generated_key_stream._original_data_type_info = output_type_info
return generated_key_stream
def filter(self, func: Union[Callable, FilterFunction]) -> 'DataStream':
"""
Applies a Filter transformation on a DataStream. The transformation calls a FilterFunction
for each element of the DataStream and retains only those element for which the function
returns true. Elements for which the function returns false are filtered. The user can also
extend RichFilterFunction to gain access to other features provided by the RichFunction
interface.
:param func: The FilterFunction that is called for each element of the DataStream.
:return: The filtered DataStream.
"""
class FilterFlatMap(FlatMapFunction):
def __init__(self, filter_func):
self._func = filter_func
def flat_map(self, value):
if self._func.filter(value):
yield value
if isinstance(func, Callable):
func = FilterFunctionWrapper(func)
elif not isinstance(func, FilterFunction):
raise TypeError("func must be a Callable or instance of FilterFunction.")
j_input_type = self._j_data_stream.getTransformation().getOutputType()
type_info = typeinfo._from_java_type(j_input_type)
j_data_stream = self.flat_map(FilterFlatMap(func), result_type=type_info)._j_data_stream
filtered_stream = DataStream(j_data_stream)
filtered_stream.name("Filter")
return filtered_stream
def union(self, *streams) -> 'DataStream':
"""
Creates a new DataStream by merging DataStream outputs of the same type with each other. The
DataStreams merged using this operator will be transformed simultaneously.
:param streams: The DataStream to union outputwith.
:return: The DataStream.
"""
j_data_streams = []
for data_stream in streams:
j_data_streams.append(data_stream._j_data_stream)
gateway = get_gateway()
j_data_stream_class = gateway.jvm.org.apache.flink.streaming.api.datastream.DataStream
j_data_stream_arr = get_gateway().new_array(j_data_stream_class, len(j_data_streams))
for i in range(len(j_data_streams)):
j_data_stream_arr[i] = j_data_streams[i]
j_united_stream = self._j_data_stream.union(j_data_stream_arr)
return DataStream(j_data_stream=j_united_stream)
def connect(self, ds: 'DataStream') -> 'ConnectedStreams':
"""
Creates a new 'ConnectedStreams' by connecting 'DataStream' outputs of (possible)
different types with each other. The DataStreams connected using this operator can
be used with CoFunctions to apply joint transformations.
:param ds: The DataStream with which this stream will be connected.
:return: The `ConnectedStreams`.
"""
return ConnectedStreams(self, ds)
def shuffle(self) -> 'DataStream':
"""
Sets the partitioning of the DataStream so that the output elements are shuffled uniformly
randomly to the next operation.
:return: The DataStream with shuffle partitioning set.
"""
return DataStream(self._j_data_stream.shuffle())
def project(self, *field_indexes) -> 'DataStream':
"""
Initiates a Project transformation on a Tuple DataStream.
Note that only Tuple DataStreams can be projected.
:param field_indexes: The field indexes of the input tuples that are retained. The order of
fields in the output tuple corresponds to the order of field indexes.
:return: The projected DataStream.
"""
if not isinstance(self.get_type(), typeinfo.TupleTypeInfo):
raise Exception('Only Tuple DataStreams can be projected.')
gateway = get_gateway()
j_index_arr = gateway.new_array(gateway.jvm.int, len(field_indexes))
for i in range(len(field_indexes)):
j_index_arr[i] = field_indexes[i]
return DataStream(self._j_data_stream.project(j_index_arr))
def rescale(self) -> 'DataStream':
"""
Sets the partitioning of the DataStream so that the output elements are distributed evenly
to a subset of instances of the next operation in a round-robin fashion.
The subset of downstream operations to which the upstream operation sends elements depends
on the degree of parallelism of both the upstream and downstream operation. For example, if
the upstream operation has parallelism 2 and the downstream operation has parallelism 4,
then one upstream operation would distribute elements to two downstream operations. If, on
the other hand, the downstream operation has parallelism 4 then two upstream operations will
distribute to one downstream operation while the other two upstream operations will
distribute to the other downstream operations.
In cases where the different parallelisms are not multiples of each one or several
downstream operations will have a differing number of inputs from upstream operations.
:return: The DataStream with rescale partitioning set.
"""
return DataStream(self._j_data_stream.rescale())
def rebalance(self) -> 'DataStream':
"""
Sets the partitioning of the DataStream so that the output elements are distributed evenly
to instances of the next operation in a round-robin fashion.
:return: The DataStream with rebalance partition set.
"""
return DataStream(self._j_data_stream.rebalance())
def forward(self) -> 'DataStream':
"""
Sets the partitioning of the DataStream so that the output elements are forwarded to the
local sub-task of the next operation.
:return: The DataStream with forward partitioning set.
"""
return DataStream(self._j_data_stream.forward())
def broadcast(self) -> 'DataStream':
"""
Sets the partitioning of the DataStream so that the output elements are broadcasted to every
parallel instance of the next operation.
:return: The DataStream with broadcast partitioning set.
"""
return DataStream(self._j_data_stream.broadcast())
def partition_custom(self, partitioner: Union[Callable, Partitioner],
key_selector: Union[Callable, KeySelector]) -> 'DataStream':
"""
Partitions a DataStream on the key returned by the selector, using a custom partitioner.
This method takes the key selector to get the key to partition on, and a partitioner that
accepts the key type.
Note that this method works only on single field keys, i.e. the selector cannet return
tuples of fields.
:param partitioner: The partitioner to assign partitions to keys.
:param key_selector: The KeySelector with which the DataStream is partitioned.
:return: The partitioned DataStream.
"""
if callable(key_selector):
key_selector = KeySelectorFunctionWrapper(key_selector)
if not isinstance(key_selector, (KeySelector, KeySelectorFunctionWrapper)):
raise TypeError("Parameter key_selector should be a type of KeySelector.")
if callable(partitioner):
partitioner = PartitionerFunctionWrapper(partitioner)
if not isinstance(partitioner, (Partitioner, PartitionerFunctionWrapper)):
raise TypeError("Parameter partitioner should be a type of Partitioner.")
gateway = get_gateway()
data_stream_num_partitions_env_key = gateway.jvm\
.org.apache.flink.datastream.runtime.operators.python\
.DataStreamPythonPartitionCustomFunctionOperator.DATA_STREAM_NUM_PARTITIONS
class PartitionCustomMapFunction(MapFunction):
"""
A wrapper class for partition_custom map function. It indicates that it is a partition
custom operation that we need to apply DataStreamPythonPartitionCustomFunctionOperator
to run the map function.
"""
def __init__(self):
self.num_partitions = None
def map(self, value):
return self.partition_custom_map(value)
def partition_custom_map(self, value):
if self.num_partitions is None:
self.num_partitions = int(os.environ[data_stream_num_partitions_env_key])
partition = partitioner.partition(key_selector.get_key(value), self.num_partitions)
return partition, value
def __repr__(self) -> str:
return '_Flink_PartitionCustomMapFunction'
original_type_info = self.get_type()
intermediate_map_stream = self.map(PartitionCustomMapFunction(),
output_type=Types.ROW([Types.INT(), original_type_info]))
intermediate_map_stream.name(
gateway.jvm.org.apache.flink.python.util.PythonConfigUtil
.STREAM_PARTITION_CUSTOM_MAP_OPERATOR_NAME)
JPartitionCustomKeySelector = gateway.jvm\
.org.apache.flink.datastream.runtime.functions.python.PartitionCustomKeySelector
JIdParitioner = gateway.jvm\
.org.apache.flink.api.java.functions.IdPartitioner
intermediate_map_stream = DataStream(intermediate_map_stream._j_data_stream
.partitionCustom(JIdParitioner(),
JPartitionCustomKeySelector()))
values_map_stream = intermediate_map_stream.map(lambda x: x[1], original_type_info)
values_map_stream.name(gateway.jvm.org.apache.flink.python.util.PythonConfigUtil
.KEYED_STREAM_VALUE_OPERATOR_NAME)
return DataStream(values_map_stream._j_data_stream)
def _get_java_python_function_operator(self, func: Union[Function, FunctionWrapper],
type_info: TypeInformation, func_name: str,
func_type: int):
"""
Create a flink operator according to user provided function object, data types,
function name and function type.
:param func: a function object that implements the Function interface.
:param type_info: the data type of the function output data.
:param func_name: function name.
:param func_type: function type, supports MAP, FLAT_MAP, etc.
:return: A flink java operator which is responsible for execution user defined python
function.
"""
gateway = get_gateway()
import cloudpickle
serialized_func = cloudpickle.dumps(func)
j_input_types = self._j_data_stream.getTransformation().getOutputType()
if type_info is None:
output_type_info = PickledBytesTypeInfo.PICKLED_BYTE_ARRAY_TYPE_INFO()
else:
if isinstance(type_info, list):
output_type_info = RowTypeInfo(type_info)
else:
output_type_info = type_info
DataStreamPythonFunction = gateway.jvm.org.apache.flink.datastream.runtime.functions \
.python.DataStreamPythonFunction
j_python_data_stream_scalar_function = DataStreamPythonFunction(
func_name,
bytearray(serialized_func),
_get_python_env())
DataStreamPythonFunctionInfo = gateway.jvm. \
org.apache.flink.datastream.runtime.functions.python \
.DataStreamPythonFunctionInfo
j_python_data_stream_function_info = DataStreamPythonFunctionInfo(
j_python_data_stream_scalar_function,
func_type)
j_conf = gateway.jvm.org.apache.flink.configuration.Configuration()
# set max bundle size to 1 to force synchronize process for reduce function.
from pyflink.fn_execution.flink_fn_execution_pb2 import UserDefinedDataStreamFunction
if func_type == UserDefinedDataStreamFunction.REDUCE:
j_conf.setInteger(gateway.jvm.org.apache.flink.python.PythonOptions.MAX_BUNDLE_SIZE, 1)
DataStreamPythonReduceFunctionOperator = gateway.jvm.org.apache.flink.datastream \
.runtime.operators.python.DataStreamPythonReduceFunctionOperator
j_output_type_info = j_input_types.getTypeAt(1)
j_python_data_stream_function_operator = DataStreamPythonReduceFunctionOperator(
j_conf,
j_input_types,
j_output_type_info,
j_python_data_stream_function_info)
return j_python_data_stream_function_operator, j_output_type_info
else:
if str(func) == '_Flink_PartitionCustomMapFunction':
DataStreamPythonFunctionOperator = gateway.jvm.org.apache.flink.datastream.runtime \
.operators.python.DataStreamPythonPartitionCustomFunctionOperator
else:
DataStreamPythonFunctionOperator = gateway.jvm.org.apache.flink.datastream.runtime \
.operators.python.DataStreamPythonStatelessFunctionOperator
j_python_data_stream_function_operator = DataStreamPythonFunctionOperator(
j_conf,
j_input_types,
output_type_info.get_java_type_info(),
j_python_data_stream_function_info)
return j_python_data_stream_function_operator, output_type_info.get_java_type_info()
def add_sink(self, sink_func: SinkFunction) -> 'DataStreamSink':
"""
Adds the given sink to this DataStream. Only streams with sinks added will be executed once
the StreamExecutionEnvironment.execute() method is called.
:param sink_func: The SinkFunction object.
:return: The closed DataStream.
"""
return DataStreamSink(self._j_data_stream.addSink(sink_func.get_java_function()))
def print(self, sink_identifier: str = None) -> 'DataStreamSink':
"""
Writes a DataStream to the standard output stream (stdout).
For each element of the DataStream the object string is writen.
NOTE: This will print to stdout on the machine where the code is executed, i.e. the Flink
worker, and is not fault tolerant.
:param sink_identifier: The string to prefix the output with.
:return: The closed DataStream.
"""
if sink_identifier is not None:
j_data_stream_sink = self._align_output_type()._j_data_stream.print(sink_identifier)
else:
j_data_stream_sink = self._align_output_type()._j_data_stream.print()
return DataStreamSink(j_data_stream_sink)
def _align_output_type(self) -> 'DataStream':
"""
Transform the pickled python object into String if the output type is PickledByteArrayInfo.
"""
output_type_info_class = self._j_data_stream.getTransformation().getOutputType().getClass()
if output_type_info_class.isAssignableFrom(
PickledBytesTypeInfo.PICKLED_BYTE_ARRAY_TYPE_INFO().get_java_type_info()
.getClass()):
def python_obj_to_str_map_func(value):
if not isinstance(value, (str, bytes)):
value = str(value)
return value
transformed_data_stream = DataStream(
self.map(python_obj_to_str_map_func,
output_type=Types.STRING())._j_data_stream)
return transformed_data_stream
else:
return self
class DataStreamSink(object):
"""
A Stream Sink. This is used for emitting elements from a streaming topology.
"""
def __init__(self, j_data_stream_sink):
"""
The constructor of DataStreamSink.
:param j_data_stream_sink: A DataStreamSink java object.
"""
self._j_data_stream_sink = j_data_stream_sink
def name(self, name: str) -> 'DataStreamSink':
"""
Sets the name of this sink. THis name is used by the visualization and logging during
runtime.
:param name: The name of this sink.
:return: The named sink.
"""
self._j_data_stream_sink.name(name)
return self
def uid(self, uid: str) -> 'DataStreamSink':
"""
Sets an ID for this operator. The specified ID is used to assign the same operator ID across
job submissions (for example when starting a job from a savepoint).
Important: this ID needs to be unique per transformation and job. Otherwise, job submission
will fail.
:param uid: The unique user-specified ID of this transformation.
:return: The operator with the specified ID.
"""
self._j_data_stream_sink.uid(uid)
return self
def set_uid_hash(self, uid_hash: str) -> 'DataStreamSink':
"""
Sets an user provided hash for this operator. This will be used AS IS the create the
JobVertexID. The user provided hash is an alternative to the generated hashed, that is
considered when identifying an operator through the default hash mechanics fails (e.g.
because of changes between Flink versions).
Important: this should be used as a workaround or for trouble shooting. The provided hash
needs to be unique per transformation and job. Otherwise, job submission will fail.
Furthermore, you cannot assign user-specified hash to intermediate nodes in an operator
chain and trying so will let your job fail.
A use case for this is in migration between Flink versions or changing the jobs in a way
that changes the automatically generated hashes. In this case, providing the previous hashes
directly through this method (e.g. obtained from old logs) can help to reestablish a lost
mapping from states to their target operator.
:param uid_hash: The user provided hash for this operator. This will become the jobVertexID,
which is shown in the logs and web ui.
:return: The operator with the user provided hash.
"""
self._j_data_stream_sink.setUidHash(uid_hash)
return self
def set_parallelism(self, parallelism: int) -> 'DataStreamSink':
"""
Sets the parallelism for this operator.
:param parallelism: THe parallelism for this operator.
:return: The operator with set parallelism.
"""
self._j_data_stream_sink.setParallelism(parallelism)
return self
def disable_chaining(self) -> 'DataStreamSink':
"""
Turns off chaining for this operator so thread co-location will not be used as an
optimization.
Chaining can be turned off for the whole job by
StreamExecutionEnvironment.disableOperatorChaining() however it is not advised for
performance consideration.
:return: The operator with chaining disabled.
"""
self._j_data_stream_sink.disableChaining()
return self
def slot_sharing_group(self, slot_sharing_group: str) -> 'DataStreamSink':
"""
Sets the slot sharing group of this operation. Parallel instances of operations that are in
the same slot sharing group will be co-located in the same TaskManager slot, if possible.
Operations inherit the slot sharing group of input operations if all input operations are in
the same slot sharing group and no slot sharing group was explicitly specified.
Initially an operation is in the default slot sharing group. An operation can be put into
the default group explicitly by setting the slot sharing group to 'default'.
:param slot_sharing_group: The slot sharing group name.
:return: This operator.
"""
self._j_data_stream_sink.slotSharingGroup(slot_sharing_group)
return self
class KeyedStream(DataStream):
"""
A KeyedStream represents a DataStream on which operator state is partitioned by key using a
provided KeySelector. Typical operations supported by a DataStream are also possible on a
KeyedStream, with the exception of partitioning methods such as shuffle, forward and keyBy.
Reduce-style operations, such as reduce and sum work on elements that have the same key.
"""
def __init__(self, j_keyed_stream, origin_stream: DataStream):
"""
Constructor of KeyedStream.
:param j_keyed_stream: A java KeyedStream object.
:param origin_stream: The DataStream before key by.
"""
super(KeyedStream, self).__init__(j_data_stream=j_keyed_stream)
self._original_data_type_info = None
self._origin_stream = origin_stream
def map(self, func: Union[Callable, MapFunction], output_type: TypeInformation = None) \
-> 'DataStream':
return self._values().map(func, output_type)
def flat_map(self, func: Union[Callable, FlatMapFunction], result_type: TypeInformation = None)\
-> 'DataStream':
return self._values().flat_map(func, result_type)
def reduce(self, func: Union[Callable, ReduceFunction]) -> 'DataStream':
"""
Applies a reduce transformation on the grouped data stream grouped on by the given
key position. The `ReduceFunction` will receive input values based on the key value.
Only input values with the same key will go to the same reducer.
Example:
::
>>> ds = env.from_collection([(1, 'a'), (2, 'a'), (3, 'a'), (4, 'b'])
>>> ds.key_by(lambda x: x[1]).reduce(lambda a, b: a[0] + b[0], b[1])
:param func: The ReduceFunction that is called for each element of the DataStream.
:return: The transformed DataStream.
"""
if not isinstance(func, ReduceFunction):
if callable(func):
func = ReduceFunctionWrapper(func)
else:
raise TypeError("The input must be a ReduceFunction or a callable function!")
from pyflink.fn_execution.flink_fn_execution_pb2 import UserDefinedDataStreamFunction
func_name = "m_reduce_" + str(func)
j_python_data_stream_scalar_function_operator, j_output_type_info = \
self._get_java_python_function_operator(func,
None,
func_name,
UserDefinedDataStreamFunction.REDUCE)
return DataStream(self._j_data_stream.transform(
"Keyed Reduce",
j_output_type_info,
j_python_data_stream_scalar_function_operator
))
def filter(self, func: Union[Callable, FilterFunction]) -> 'DataStream':
return self._values().filter(func)
def add_sink(self, sink_func: SinkFunction) -> 'DataStreamSink':
return self._values().add_sink(sink_func)
def key_by(self, key_selector: Union[Callable, KeySelector],
key_type_info: TypeInformation = None) -> 'KeyedStream':
return self._origin_stream.key_by(key_selector, key_type_info)
def union(self, *streams) -> 'DataStream':
return self._values().union(*streams)
def shuffle(self) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def project(self, *field_indexes) -> 'DataStream':
return self._values().project(*field_indexes)
def rescale(self) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def rebalance(self) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def forward(self) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def broadcast(self) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def partition_custom(self, partitioner: Union[Callable, Partitioner],
key_selector: Union[Callable, KeySelector]) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def print(self, sink_identifier=None):
return self._values().print()
def _values(self) -> 'DataStream':
"""
Since python KeyedStream is in the format of Row(key_value, original_data), it is used for
getting the original_data.
"""
transformed_stream = super().map(lambda x: x[1], output_type=self._original_data_type_info)
transformed_stream.name(get_gateway().jvm.org.apache.flink.python.util.PythonConfigUtil
.KEYED_STREAM_VALUE_OPERATOR_NAME)
return DataStream(transformed_stream._j_data_stream)
def set_parallelism(self, parallelism: int):
raise Exception("Set parallelism for KeyedStream is not supported.")
def name(self, name: str):
raise Exception("Set name for KeyedStream is not supported.")
def get_name(self) -> str:
raise Exception("Get name of KeyedStream is not supported.")
def uid(self, uid: str):
raise Exception("Set uid for KeyedStream is not supported.")
def set_uid_hash(self, uid_hash: str):
raise Exception("Set uid hash for KeyedStream is not supported.")
def set_max_parallelism(self, max_parallelism: int):
raise Exception("Set max parallelism for KeyedStream is not supported.")
def force_non_parallel(self):
raise Exception("Set force non-parallel for KeyedStream is not supported.")
def set_buffer_timeout(self, timeout_millis: int):
raise Exception("Set buffer timeout for KeyedStream is not supported.")
def start_new_chain(self) -> 'DataStream':
raise Exception("Start new chain for KeyedStream is not supported.")
def disable_chaining(self) -> 'DataStream':
raise Exception("Disable chaining for KeyedStream is not supported.")
def slot_sharing_group(self, slot_sharing_group: str) -> 'DataStream':
raise Exception("Setting slot sharing group for KeyedStream is not supported.")
class ConnectedStreams(object):
"""
ConnectedStreams represent two connected streams of (possibly) different data types.
Connected streams are useful for cases where operations on one stream directly
affect the operations on the other stream, usually via shared state between the streams.
An example for the use of connected streams would be to apply rules that change over time
onto another stream. One of the connected streams has the rules, the other stream the
elements to apply the rules to. The operation on the connected stream maintains the
current set of rules in the state. It may receive either a rule update and update the state
or a data element and apply the rules in the state to the element.
The connected stream can be conceptually viewed as a union stream of an Either type, that
holds either the first stream's type or the second stream's type.
"""
def __init__(self, stream1: DataStream, stream2: DataStream):
self.stream1 = stream1
self.stream2 = stream2
def key_by(self, key_selector1: Union[Callable, KeySelector],
key_selector2: Union[Callable, KeySelector],
key_type_info: TypeInformation = None) -> 'ConnectedStreams':
"""
KeyBy operation for connected data stream. Assigns keys to the elements of
input1 and input2 using keySelector1 and keySelector2 with explicit type information
for the common key type.
:param key_selector1: The `KeySelector` used for grouping the first input.
:param key_selector2: The `KeySelector` used for grouping the second input.
:param key_type_info: The type information of the common key type.
:return: The partitioned `ConnectedStreams`
"""
ds1 = self.stream1
ds2 = self.stream2
if isinstance(self.stream1, KeyedStream):
ds1 = self.stream1._origin_stream
if isinstance(self.stream2, KeyedStream):
ds2 = self.stream2._origin_stream
return ConnectedStreams(
ds1.key_by(key_selector1, key_type_info),
ds2.key_by(key_selector2, key_type_info))
def map(self, func: CoMapFunction, output_type: TypeInformation = None) \
-> 'DataStream':
"""
Applies a CoMap transformation on a `ConnectedStreams` and maps the output to a common
type. The transformation calls a `CoMapFunction.map1` for each element of the first
input and `CoMapFunction.map2` for each element of the second input. Each CoMapFunction
call returns exactly one element.
:param func: The CoMapFunction used to jointly transform the two input DataStreams
:param output_type: `TypeInformation` for the result type of the function.
:return: The transformed `DataStream`
"""
if not isinstance(func, CoMapFunction):
raise TypeError("The input function must be a CoMapFunction!")
func_name = str(func)
# get connected stream
j_connected_stream = self.stream1._j_data_stream.connect(self.stream2._j_data_stream)
from pyflink.fn_execution import flink_fn_execution_pb2
j_operator, j_output_type = self._get_connected_stream_operator(
func,
output_type,
func_name,
flink_fn_execution_pb2.UserDefinedDataStreamFunction.CO_MAP)
return DataStream(j_connected_stream.transform("Co-Map", j_output_type, j_operator))
def flat_map(self, func: CoFlatMapFunction, output_type: TypeInformation = None) \
-> 'DataStream':
"""
Applies a CoFlatMap transformation on a `ConnectedStreams` and maps the output to a
common type. The transformation calls a `CoFlatMapFunction.flatMap1` for each element
of the first input and `CoFlatMapFunction.flatMap2` for each element of the second
input. Each CoFlatMapFunction call returns any number of elements including none.
:param func: The CoFlatMapFunction used to jointly transform the two input DataStreams
:param output_type: `TypeInformation` for the result type of the function.
:return: The transformed `DataStream`
"""
if not isinstance(func, CoFlatMapFunction):
raise TypeError("The input must be a CoFlatMapFunction!")
func_name = str(func)
# get connected stream
j_connected_stream = self.stream1._j_data_stream.connect(self.stream2._j_data_stream)
from pyflink.fn_execution import flink_fn_execution_pb2
j_operator, j_output_type = self._get_connected_stream_operator(
func,
output_type,
func_name,
flink_fn_execution_pb2.UserDefinedDataStreamFunction.CO_FLAT_MAP)
return DataStream(j_connected_stream.transform("Co-Flat Map", j_output_type, j_operator))
def _get_connected_stream_operator(self, func: Union[Function, FunctionWrapper],
type_info: TypeInformation, func_name: str,
func_type: int):
gateway = get_gateway()
import cloudpickle
serialized_func = cloudpickle.dumps(func)
j_input_types1 = self.stream1._j_data_stream.getTransformation().getOutputType()
j_input_types2 = self.stream2._j_data_stream.getTransformation().getOutputType()
if type_info is None:
output_type_info = PickledBytesTypeInfo.PICKLED_BYTE_ARRAY_TYPE_INFO()
else:
if isinstance(type_info, list):
output_type_info = RowTypeInfo(type_info)
else:
output_type_info = type_info
DataStreamPythonFunction = gateway.jvm.org.apache.flink.datastream.runtime.functions \
.python.DataStreamPythonFunction
j_python_data_stream_scalar_function = DataStreamPythonFunction(
func_name,
bytearray(serialized_func),
_get_python_env())
DataStreamPythonFunctionInfo = gateway.jvm. \
org.apache.flink.datastream.runtime.functions.python \
.DataStreamPythonFunctionInfo
j_python_data_stream_function_info = DataStreamPythonFunctionInfo(
j_python_data_stream_scalar_function,
func_type)
j_conf = gateway.jvm.org.apache.flink.configuration.Configuration()
DataStreamPythonFunctionOperator = gateway.jvm.org.apache.flink.datastream.runtime \
.operators.python.DataStreamTwoInputPythonStatelessFunctionOperator
j_python_data_stream_function_operator = DataStreamPythonFunctionOperator(
j_conf,
j_input_types1,
j_input_types2,
output_type_info.get_java_type_info(),
j_python_data_stream_function_info,
self._is_keyed_stream())
return j_python_data_stream_function_operator, output_type_info.get_java_type_info()
def _is_keyed_stream(self):
return isinstance(self.stream1, KeyedStream) and isinstance(self.stream2, KeyedStream)
|
|
import pytest, py
from _pytest.main import Session
class TestCollector:
def test_collect_versus_item(self):
from pytest import Collector, Item
assert not issubclass(Collector, Item)
assert not issubclass(Item, Collector)
def test_compat_attributes(self, testdir, recwarn):
modcol = testdir.getmodulecol("""
def test_pass(): pass
def test_fail(): assert 0
""")
recwarn.clear()
assert modcol.Module == pytest.Module
assert modcol.Class == pytest.Class
assert modcol.Item == pytest.Item
assert modcol.File == pytest.File
assert modcol.Function == pytest.Function
def test_check_equality(self, testdir):
modcol = testdir.getmodulecol("""
def test_pass(): pass
def test_fail(): assert 0
""")
fn1 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn1, pytest.Function)
fn2 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn2, pytest.Function)
assert fn1 == fn2
assert fn1 != modcol
if py.std.sys.version_info < (3, 0):
assert cmp(fn1, fn2) == 0
assert hash(fn1) == hash(fn2)
fn3 = testdir.collect_by_name(modcol, "test_fail")
assert isinstance(fn3, pytest.Function)
assert not (fn1 == fn3)
assert fn1 != fn3
for fn in fn1,fn2,fn3:
assert fn != 3
assert fn != modcol
assert fn != [1,2,3]
assert [1,2,3] != fn
assert modcol != fn
def test_getparent(self, testdir):
modcol = testdir.getmodulecol("""
class TestClass:
def test_foo():
pass
""")
cls = testdir.collect_by_name(modcol, "TestClass")
fn = testdir.collect_by_name(
testdir.collect_by_name(cls, "()"), "test_foo")
parent = fn.getparent(pytest.Module)
assert parent is modcol
parent = fn.getparent(pytest.Function)
assert parent is fn
parent = fn.getparent(pytest.Class)
assert parent is cls
def test_getcustomfile_roundtrip(self, testdir):
hello = testdir.makefile(".xxx", hello="world")
testdir.makepyfile(conftest="""
import pytest
class CustomFile(pytest.File):
pass
def pytest_collect_file(path, parent):
if path.ext == ".xxx":
return CustomFile(path, parent=parent)
""")
node = testdir.getpathnode(hello)
assert isinstance(node, pytest.File)
assert node.name == "hello.xxx"
nodes = node.session.perform_collect([node.nodeid], genitems=False)
assert len(nodes) == 1
assert isinstance(nodes[0], pytest.File)
class TestCollectFS:
def test_ignored_certain_directories(self, testdir):
tmpdir = testdir.tmpdir
tmpdir.ensure("_darcs", 'test_notfound.py')
tmpdir.ensure("CVS", 'test_notfound.py')
tmpdir.ensure("{arch}", 'test_notfound.py')
tmpdir.ensure(".whatever", 'test_notfound.py')
tmpdir.ensure(".bzr", 'test_notfound.py')
tmpdir.ensure("normal", 'test_found.py')
for x in tmpdir.visit("test_*.py"):
x.write("def test_hello(): pass")
result = testdir.runpytest("--collect-only")
s = result.stdout.str()
assert "test_notfound" not in s
assert "test_found" in s
def test_custom_norecursedirs(self, testdir):
testdir.makeini("""
[pytest]
norecursedirs = mydir xyz*
""")
tmpdir = testdir.tmpdir
tmpdir.ensure("mydir", "test_hello.py").write("def test_1(): pass")
tmpdir.ensure("xyz123", "test_2.py").write("def test_2(): 0/0")
tmpdir.ensure("xy", "test_ok.py").write("def test_3(): pass")
rec = testdir.inline_run()
rec.assertoutcome(passed=1)
rec = testdir.inline_run("xyz123/test_2.py")
rec.assertoutcome(failed=1)
class TestCollectPluginHookRelay:
def test_pytest_collect_file(self, testdir):
wascalled = []
class Plugin:
def pytest_collect_file(self, path, parent):
wascalled.append(path)
testdir.makefile(".abc", "xyz")
pytest.main([testdir.tmpdir], plugins=[Plugin()])
assert len(wascalled) == 1
assert wascalled[0].ext == '.abc'
def test_pytest_collect_directory(self, testdir):
wascalled = []
class Plugin:
def pytest_collect_directory(self, path, parent):
wascalled.append(path.basename)
testdir.mkdir("hello")
testdir.mkdir("world")
pytest.main(testdir.tmpdir, plugins=[Plugin()])
assert "hello" in wascalled
assert "world" in wascalled
class TestPrunetraceback:
def test_collection_error(self, testdir):
p = testdir.makepyfile("""
import not_exists
""")
result = testdir.runpytest(p)
assert "__import__" not in result.stdout.str(), "too long traceback"
result.stdout.fnmatch_lines([
"*ERROR collecting*",
"*mport*not_exists*"
])
def test_custom_repr_failure(self, testdir):
p = testdir.makepyfile("""
import not_exists
""")
testdir.makeconftest("""
import pytest
def pytest_collect_file(path, parent):
return MyFile(path, parent)
class MyError(Exception):
pass
class MyFile(pytest.File):
def collect(self):
raise MyError()
def repr_failure(self, excinfo):
if excinfo.errisinstance(MyError):
return "hello world"
return pytest.File.repr_failure(self, excinfo)
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*ERROR collecting*",
"*hello world*",
])
@pytest.mark.xfail(reason="other mechanism for adding to reporting needed")
def test_collect_report_postprocessing(self, testdir):
p = testdir.makepyfile("""
import not_exists
""")
testdir.makeconftest("""
import pytest
def pytest_make_collect_report(__multicall__):
rep = __multicall__.execute()
rep.headerlines += ["header1"]
return rep
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*ERROR collecting*",
"*header1*",
])
class TestCustomConftests:
def test_ignore_collect_path(self, testdir):
testdir.makeconftest("""
def pytest_ignore_collect(path, config):
return path.basename.startswith("x") or \
path.basename == "test_one.py"
""")
sub = testdir.mkdir("xy123")
sub.ensure("test_hello.py").write("syntax error")
sub.join("conftest.py").write("syntax error")
testdir.makepyfile("def test_hello(): pass")
testdir.makepyfile(test_one="syntax error")
result = testdir.runpytest("--fulltrace")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_ignore_collect_not_called_on_argument(self, testdir):
testdir.makeconftest("""
def pytest_ignore_collect(path, config):
return True
""")
p = testdir.makepyfile("def test_hello(): pass")
result = testdir.runpytest(p)
assert result.ret == 0
assert "1 passed" in result.stdout.str()
result = testdir.runpytest()
assert result.ret == 0
assert "1 passed" not in result.stdout.str()
def test_collectignore_exclude_on_option(self, testdir):
testdir.makeconftest("""
collect_ignore = ['hello', 'test_world.py']
def pytest_addoption(parser):
parser.addoption("--XX", action="store_true", default=False)
def pytest_configure(config):
if config.getvalue("XX"):
collect_ignore[:] = []
""")
testdir.mkdir("hello")
testdir.makepyfile(test_world="def test_hello(): pass")
result = testdir.runpytest()
assert result.ret == 0
assert "passed" not in result.stdout.str()
result = testdir.runpytest("--XX")
assert result.ret == 0
assert "passed" in result.stdout.str()
def test_pytest_fs_collect_hooks_are_seen(self, testdir):
conf = testdir.makeconftest("""
import pytest
class MyModule(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule(path, parent)
""")
sub = testdir.mkdir("sub")
p = testdir.makepyfile("def test_x(): pass")
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines([
"*MyModule*",
"*test_x*"
])
def test_pytest_collect_file_from_sister_dir(self, testdir):
sub1 = testdir.mkpydir("sub1")
sub2 = testdir.mkpydir("sub2")
conf1 = testdir.makeconftest("""
import pytest
class MyModule1(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule1(path, parent)
""")
conf1.move(sub1.join(conf1.basename))
conf2 = testdir.makeconftest("""
import pytest
class MyModule2(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule2(path, parent)
""")
conf2.move(sub2.join(conf2.basename))
p = testdir.makepyfile("def test_x(): pass")
p.copy(sub1.join(p.basename))
p.copy(sub2.join(p.basename))
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines([
"*MyModule1*",
"*MyModule2*",
"*test_x*"
])
class TestSession:
def test_parsearg(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
subdir = testdir.mkdir("sub")
subdir.ensure("__init__.py")
target = subdir.join(p.basename)
p.move(target)
testdir.chdir()
subdir.chdir()
config = testdir.parseconfig(p.basename)
rcol = Session(config=config)
assert rcol.fspath == subdir
parts = rcol._parsearg(p.basename)
assert parts[0] == target
assert len(parts) == 1
parts = rcol._parsearg(p.basename + "::test_func")
assert parts[0] == target
assert parts[1] == "test_func"
assert len(parts) == 2
def test_collect_topdir(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
id = "::".join([p.basename, "test_func"])
# XXX migrate to inline_genitems? (see below)
config = testdir.parseconfig(id)
topdir = testdir.tmpdir
rcol = Session(config)
assert topdir == rcol.fspath
rootid = rcol.nodeid
#root2 = rcol.perform_collect([rcol.nodeid], genitems=False)[0]
#assert root2 == rcol, rootid
colitems = rcol.perform_collect([rcol.nodeid], genitems=False)
assert len(colitems) == 1
assert colitems[0].fspath == p
def test_collect_protocol_single_function(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
id = "::".join([p.basename, "test_func"])
topdir = testdir.tmpdir
items, hookrec = testdir.inline_genitems(id)
item, = items
assert item.name == "test_func"
newid = item.nodeid
assert newid == id
py.std.pprint.pprint(hookrec.hookrecorder.calls)
hookrec.hookrecorder.contains([
("pytest_collectstart", "collector.fspath == topdir"),
("pytest_make_collect_report", "collector.fspath == topdir"),
("pytest_collectstart", "collector.fspath == p"),
("pytest_make_collect_report", "collector.fspath == p"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid.startswith(p.basename)"),
("pytest_collectreport", "report.nodeid == '.'")
])
def test_collect_protocol_method(self, testdir):
p = testdir.makepyfile("""
class TestClass:
def test_method(self):
pass
""")
normid = p.basename + "::TestClass::()::test_method"
for id in [p.basename,
p.basename + "::TestClass",
p.basename + "::TestClass::()",
normid,
]:
items, hookrec = testdir.inline_genitems(id)
assert len(items) == 1
assert items[0].name == "test_method"
newid = items[0].nodeid
assert newid == normid
def test_collect_custom_nodes_multi_id(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
testdir.makeconftest("""
import pytest
class SpecialItem(pytest.Item):
def runtest(self):
return # ok
class SpecialFile(pytest.File):
def collect(self):
return [SpecialItem(name="check", parent=self)]
def pytest_collect_file(path, parent):
if path.basename == %r:
return SpecialFile(fspath=path, parent=parent)
""" % p.basename)
id = p.basename
items, hookrec = testdir.inline_genitems(id)
py.std.pprint.pprint(hookrec.hookrecorder.calls)
assert len(items) == 2
hookrec.hookrecorder.contains([
("pytest_collectstart",
"collector.fspath == collector.session.fspath"),
("pytest_collectstart",
"collector.__class__.__name__ == 'SpecialFile'"),
("pytest_collectstart",
"collector.__class__.__name__ == 'Module'"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid.startswith(p.basename)"),
#("pytest_collectreport",
# "report.fspath == %r" % str(rcol.fspath)),
])
def test_collect_subdir_event_ordering(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
aaa = testdir.mkpydir("aaa")
test_aaa = aaa.join("test_aaa.py")
p.move(test_aaa)
items, hookrec = testdir.inline_genitems()
assert len(items) == 1
py.std.pprint.pprint(hookrec.hookrecorder.calls)
hookrec.hookrecorder.contains([
("pytest_collectstart", "collector.fspath == test_aaa"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport",
"report.nodeid.startswith('aaa/test_aaa.py')"),
])
def test_collect_two_commandline_args(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
aaa = testdir.mkpydir("aaa")
bbb = testdir.mkpydir("bbb")
test_aaa = aaa.join("test_aaa.py")
p.copy(test_aaa)
test_bbb = bbb.join("test_bbb.py")
p.move(test_bbb)
id = "."
items, hookrec = testdir.inline_genitems(id)
assert len(items) == 2
py.std.pprint.pprint(hookrec.hookrecorder.calls)
hookrec.hookrecorder.contains([
("pytest_collectstart", "collector.fspath == test_aaa"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid == 'aaa/test_aaa.py'"),
("pytest_collectstart", "collector.fspath == test_bbb"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid == 'bbb/test_bbb.py'"),
])
def test_serialization_byid(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
items, hookrec = testdir.inline_genitems()
assert len(items) == 1
item, = items
items2, hookrec = testdir.inline_genitems(item.nodeid)
item2, = items2
assert item2.name == item.name
assert item2.fspath == item.fspath
def test_find_byid_without_instance_parents(self, testdir):
p = testdir.makepyfile("""
class TestClass:
def test_method(self):
pass
""")
arg = p.basename + ("::TestClass::test_method")
items, hookrec = testdir.inline_genitems(arg)
assert len(items) == 1
item, = items
assert item.nodeid.endswith("TestClass::()::test_method")
class Test_getinitialnodes:
def test_global_file(self, testdir, tmpdir):
x = tmpdir.ensure("x.py")
config = testdir.parseconfigure(x)
col = testdir.getnode(config, x)
assert isinstance(col, pytest.Module)
assert col.name == 'x.py'
assert col.parent.name == testdir.tmpdir.basename
assert col.parent.parent is None
for col in col.listchain():
assert col.config is config
def test_pkgfile(self, testdir):
testdir.chdir()
tmpdir = testdir.tmpdir
subdir = tmpdir.join("subdir")
x = subdir.ensure("x.py")
subdir.ensure("__init__.py")
config = testdir.parseconfigure(x)
col = testdir.getnode(config, x)
assert isinstance(col, pytest.Module)
assert col.name == 'subdir/x.py'
assert col.parent.parent is None
for col in col.listchain():
assert col.config is config
class Test_genitems:
def test_check_collect_hashes(self, testdir):
p = testdir.makepyfile("""
def test_1():
pass
def test_2():
pass
""")
p.copy(p.dirpath(p.purebasename + "2" + ".py"))
items, reprec = testdir.inline_genitems(p.dirpath())
assert len(items) == 4
for numi, i in enumerate(items):
for numj, j in enumerate(items):
if numj != numi:
assert hash(i) != hash(j)
assert i != j
def test_example_items1(self, testdir):
p = testdir.makepyfile('''
def testone():
pass
class TestX:
def testmethod_one(self):
pass
class TestY(TestX):
pass
''')
items, reprec = testdir.inline_genitems(p)
assert len(items) == 3
assert items[0].name == 'testone'
assert items[1].name == 'testmethod_one'
assert items[2].name == 'testmethod_one'
# let's also test getmodpath here
assert items[0].getmodpath() == "testone"
assert items[1].getmodpath() == "TestX.testmethod_one"
assert items[2].getmodpath() == "TestY.testmethod_one"
s = items[0].getmodpath(stopatmodule=False)
assert s.endswith("test_example_items1.testone")
print(s)
def test_matchnodes_two_collections_same_file(testdir):
testdir.makeconftest("""
import pytest
def pytest_configure(config):
config.pluginmanager.register(Plugin2())
class Plugin2:
def pytest_collect_file(self, path, parent):
if path.ext == ".abc":
return MyFile2(path, parent)
def pytest_collect_file(path, parent):
if path.ext == ".abc":
return MyFile1(path, parent)
class MyFile1(pytest.Item, pytest.File):
def runtest(self):
pass
class MyFile2(pytest.File):
def collect(self):
return [Item2("hello", parent=self)]
class Item2(pytest.Item):
def runtest(self):
pass
""")
p = testdir.makefile(".abc", "")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines([
"*2 passed*",
])
res = testdir.runpytest("%s::hello" % p.basename)
res.stdout.fnmatch_lines([
"*1 passed*",
])
class TestNodekeywords:
def test_no_under(self, testdir):
modcol = testdir.getmodulecol("""
def test_pass(): pass
def test_fail(): assert 0
""")
l = list(modcol.keywords)
assert modcol.name in l
for x in l:
assert not x.startswith("_")
assert modcol.name in repr(modcol.keywords)
def test_issue345(self, testdir):
testdir.makepyfile("""
def test_should_not_be_selected():
assert False, 'I should not have been selected to run'
def test___repr__():
pass
""")
reprec = testdir.inline_run("-k repr")
reprec.assertoutcome(passed=1, failed=0)
|
|
"""
[email protected]
"""
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
from biobeam import Bpm3d
from gputools import convolve_spatial3
from collections import namedtuple
from six.moves import range
from functools import reduce
def _perm_inverse(perm):
inverse = [0] * len(perm)
for i, p in enumerate(perm):
inverse[p] = i
return inverse
class SimLSM_Base(object):
_GridSaveObject = namedtuple("GridSave",["grid_dim","u0"])
perm_illum = (2,0,1)
perm_illum_inv= _perm_inverse(perm_illum)
def __init__(self, dn = None,
signal = None,
shape = None,
size = None,
units = None,
lam_illum = .5,
NA_illum = .1,
lam_detect = .5,
NA_detect = .7,
n0 = 1.33,
n_volumes = 1,
zfoc_illum = None,
simul_xy_illum = None,
simul_z_illum = 1,
simul_xy_detect = None,
simul_z_detect = 1,
fftplan_kwargs = {}):
self.dn = dn
self.signal = signal
self._bpm_illum = Bpm3d(
size = self._trans_illum(size, shape_style="xyz"),
shape = self._trans_illum(shape, shape_style="xyz"),
dn = self._trans_illum(dn, copy = True),
units = self._trans_illum(units, shape_style="xyz"),
lam = lam_illum,
simul_xy=simul_xy_illum,
simul_z=simul_z_illum,
n_volumes=n_volumes,
n0 = n0,
fftplan_kwargs=fftplan_kwargs)
self._bpm_detect = Bpm3d(size = size,
shape = shape,
dn = dn,
units = units,
lam = lam_detect,
simul_xy=simul_xy_detect,
simul_z=simul_z_detect,
n_volumes=n_volumes,
n0 = n0,
fftplan_kwargs=fftplan_kwargs)
self.NA_illum = NA_illum
self.NA_detect = NA_detect
self.zfoc_illum = zfoc_illum
self.dn = self._bpm_detect.dn
self.units = self._bpm_detect.units
self.Nx, self.Ny, self.Nz = self._bpm_detect.shape
self.size = self._bpm_detect.size
self._last_grid_u0 = self._GridSaveObject(None,None)
self._prepare_u0_all()
def _trans_illum(self,obj, inv = False, copy=False, shape_style="zyx"):
"""handles the transformation between illumination and detection coords and shapes
_trans_illum(dn).shape is volume shape in illumination space
"""
perm = self.perm_illum_inv if inv else self.perm_illum
if obj is None:
return None
if isinstance(obj,np.ndarray):
if copy:
return obj.transpose(perm).copy()
else:
return obj.transpose(perm)
if isinstance(obj,(list, tuple)):
if shape_style=="zyx":
return type(obj)([obj[p] for p in perm])
elif shape_style=="xyz":
return type(obj)([obj[::-1][p] for p in perm])[::-1]
else:
raise NotImplementedError()
else:
raise NotImplementedError()
def _prepare_u0_illum(self, zfoc = None ):
raise NotImplementedError()
def _prepare_u0_all(self):
self.u0_detect = self._bpm_detect.u0_beam(NA = self.NA_detect, zfoc = 0.)
self._prepare_u0_illum(self.zfoc_illum)
def propagate_illum(self,cz = 0, **bpm_kwargs):
"""cz in microns from center axis"""
# the illumination pattern is shifted
bpm_kwargs.update({"return_comp":"intens"})
offset = int(cz/self._bpm_illum.dy)
assert abs(offset)<= self.u0_illum.shape[0]//2
print("offset: ",offset)
u0 = np.roll(self.u0_illum, offset ,axis=0)
u = self._bpm_illum.propagate(u0,**bpm_kwargs)
return self._trans_illum(u, inv = True)
def psf(self, c = [0,0,0], zslice = 16, with_sheet = False, **bpm_kwargs):
"""
c = [0,10,-10] relative to center in microns
c = [cz,cy,cx]
"""
u0 = np.roll(np.roll(self.u0_detect,int(np.round(c[1]/self._bpm_detect.dy)),axis=0),
np.round(int(c[2]/self._bpm_detect.dx)),axis=1)
offset_z = int(np.round(c[0]/self._bpm_detect.units[-1]))
u1 = self._bpm_detect.propagate(u0 = u0, offset=self.Nz//2+offset_z,
return_shape="last",**bpm_kwargs)
#refocus
u2 = self._bpm_detect.propagate(u0 = u1.conjugate(),
free_prop=True,
#offset=Nz//2+c[0],
return_shape="full",
return_comp="intens",
**bpm_kwargs)[::-1]
if with_sheet:
sheet = self.propagate_illum(c[0], **bpm_kwargs)
u2 *= sheet
if zslice is None:
return u2
else:
u2 = np.roll(u2,-offset_z,axis=0)[self.Nz//2-zslice:self.Nz//2+zslice]
return u2
def psf_grid_z(self, cz = 0, grid_dim = (4,4), zslice = 16,
with_sheet = False,
**bpm_kwargs):
"""cz in microns relative to the center
"""
print("computing psf grid %s..."%(str(grid_dim)))
offset_z = int(np.round(cz/self._bpm_detect.units[-1]))
n_y, n_x = grid_dim
Nb_x, Nb_y = self._bpm_detect.simul_xy[0]/n_x, self._bpm_detect.simul_xy[1]/n_y
# get the offset positions
xs = np.round([-self._bpm_detect.simul_xy[0]//2+n*Nb_x+Nb_x//2 for n in range(n_x)]).astype(np.int)
ys = np.round([-self._bpm_detect.simul_xy[1]//2+n*Nb_y+Nb_y//2 for n in range(n_y)]).astype(np.int)
# this is expensive, so memoize it if we use it several times after
if self._last_grid_u0.grid_dim == grid_dim and False:
print("using saved grid")
u0 = self._last_grid_u0.u0
else:
#u0 = np.sum([np.roll(np.sum([np.roll(self.u0_detect,_y,axis=0) for _y in ys],axis=0),_x, axis=1) for _x in xs],axis=0)
u0_y = reduce(np.add,[np.roll(self.u0_detect,_y,axis=0) for _y in ys])
u0 = reduce(np.add,[np.roll(u0_y,_x,axis=1) for _x in xs])
self._last_grid_u0 = self._GridSaveObject(grid_dim,u0)
u0 = self._bpm_detect.propagate(u0 = u0, offset=self.Nz//2+offset_z,
return_shape="last",
return_comp="field",
**bpm_kwargs)
bpm_kwargs.update({"free_prop":True})
#refocus
u = self._bpm_detect.propagate(u0 = u0.conjugate(),
#offset=Nz//2+c[0],
return_shape="full",
return_comp="intens",
**bpm_kwargs)[::-1]
if with_sheet:
sheet = self.propagate_illum(cz, **bpm_kwargs)
u *= sheet
if zslice is None:
return u
else:
u = np.roll(u,-offset_z,axis=0)[self.Nz//2-zslice:self.Nz//2+zslice]
#u = np.roll(u,offset_z,axis=0)[self.Nz//2-zslice:self.Nz//2+zslice][::-1]
return u
def psf_grid_z_stepped(self, cz = 0,
grid_dim = (4,4),
zslice = 16,
with_sheet = False,
step = (1,1),
offset = (0,0),
**bpm_kwargs):
"""cz in microns relative to the center
"""
print("computing psf grid %s..."%(str(grid_dim)))
offset_z = int(np.round(cz/self._bpm_detect.units[-1]))
n_y, n_x = grid_dim
Nb_x, Nb_y = self._bpm_detect.simul_xy[0]/n_x, self._bpm_detect.simul_xy[1]/n_y
# get the offset positions
xs = np.round([-self._bpm_detect.simul_xy[0]//2+n*Nb_x+Nb_x//2 for n in range(offset[1], n_x,step[1])]).astype(np.int)
ys = np.round([-self._bpm_detect.simul_xy[1]//2+n*Nb_y+Nb_y//2 for n in range(offset[0], n_y,step[0])]).astype(np.int)
#u0 = np.sum([np.roll(np.sum([np.roll(self.u0_detect,_y,axis=0) for _y in ys],axis=0),_x, axis=1) for _x in xs],axis=0)
u0_y = reduce(np.add,[np.roll(self.u0_detect,_y,axis=0) for _y in ys])
u0 = reduce(np.add,[np.roll(u0_y,_x,axis=1) for _x in xs])
self._last_grid_u0 = self._GridSaveObject(grid_dim,u0)
u0 = self._bpm_detect.propagate(u0 = u0, offset=self.Nz//2+offset_z,
return_shape="last",
return_comp="field",
**bpm_kwargs)
bpm_kwargs.update({"free_prop":True})
#refocus
u = self._bpm_detect.propagate(u0 = u0.conjugate(),
#offset=Nz//2+c[0],
return_shape="full",
return_comp="intens",
**bpm_kwargs)[::-1]
if with_sheet:
sheet = self.propagate_illum(cz, **bpm_kwargs)
u *= sheet
if zslice is None:
return u
else:
u = np.roll(u,-offset_z,axis=0)[self.Nz//2-zslice:self.Nz//2+zslice]
#u = np.roll(u,offset_z,axis=0)[self.Nz//2-zslice:self.Nz//2+zslice][::-1]
return u
def simulate_image_z(self, cz = 0,
signal = None,
psf_grid_dim = (8,8),
zslice = 16,
conv_sub_blocks = (1,1),
conv_pad_factor = 2,
conv_mode = "wrap",
mode = "product",
with_sheet = True,
**bpm_kwargs):
"""
mode = ["product","illum"]
"""
if not mode in ["product","illum"]:
raise KeyError("unknown mode: %s"%mode)
if signal is None:
signal = self.signal
if signal is None:
raise ValueError("no signal defined (signal)!")
# illumination
psfs = self.psf_grid_z(cz = cz,
grid_dim=psf_grid_dim,
zslice=zslice,
**bpm_kwargs)
offset_z = int(np.round(cz/self._bpm_detect.units[-1]))
assert offset_z+zslice<self.Nz and self.Nz//2+offset_z-zslice>=0
s = slice(self.Nz//2+offset_z-zslice,self.Nz//2+offset_z+zslice)
signal = 1.*signal[s].copy()
if with_sheet:
print("illuminating at z= %s mu with psf mode %s" % (cz, mode))
u = self.propagate_illum(cz = cz,**bpm_kwargs)
if mode =="psf_product":
psfs = psfs*u[s]
else:
signal = signal*u[s]
print("spatially varying convolution: %s %s"%(signal.shape,psfs.shape))
#convolve
conv = convolve_spatial3(signal.copy(), psfs.copy(),
grid_dim = (1,)+psf_grid_dim,
sub_blocks=(1,)+conv_sub_blocks,
pad_factor=conv_pad_factor,
mode = conv_mode, verbose = True)
#return u, self.signal[s].copy(), signal, psfs, conv
return conv
if __name__ == '__main__':
pass
|
|
#!/usr/bin/python
import glob
import argparse
import numpy as np
from dicom_tools.pyqtgraph.Qt import QtCore, QtGui
import dicom_tools.pyqtgraph as pg
import dicom
from dicom_tools.FileReader import FileReader
#from dicom_tools.read_files import read_files
from scipy import ndimage
import os
from dicom_tools.roiFileHandler import roiFileHandler
from dicom_tools.MyStatusBar import MyStatusBar
from dicom_tools.roi2myroi import roi2myroi
# class Window(QtGui.QWidget):
class Window_dicom_roi2(QtGui.QMainWindow):
def __init__(self):
# QtGui.QWidget.__init__(self)
super(Window_dicom_roi2, self).__init__()
# self.setGeometry(50, 50, 500, 300)
self.setWindowTitle("DICOM roi (v3)")
# self.setWindowIcon(QtGui.QIcon('pythonlogo.png'))
widgetWindow = QtGui.QWidget(self)
self.setCentralWidget(widgetWindow)
outfname="roi.txt"
self.inpath="."
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", help="increase output verbosity",
action="store_true")
parser.add_argument("-r", "--raw", help="dont read raw data",
action="store_true")
parser.add_argument("-i", "--inputpath", help="path of the DICOM directory (default ./)")
parser.add_argument("-o", "--outfile", help="define output file name (default roi.txt)")
parser.add_argument("-l", "--layer", help="select layer",
type=int)
parser.add_argument("-f", "--filterROI", help="filter the image with a ROI (folder path, nrrd file supported)")
group = parser.add_mutually_exclusive_group()
group.add_argument("-y", "--yview", help="swap axes",
action="store_true")
group.add_argument("-x", "--xview", help="swap axes",
action="store_true")
args = parser.parse_args()
self.layer=0
if args.outfile:
outfname = args.outfile
if args.inputpath:
self.inpath = args.inputpath
if args.layer:
self.layer = args.layer
self.raw = not args.raw
openFile = QtGui.QAction("&Open ROI File", self)
openFile.setShortcut("Ctrl+O")
openFile.setStatusTip('Open ROI File')
openFile.triggered.connect(self.file_open)
saveFile = QtGui.QAction("&Save ROI on File", self)
saveFile.setShortcut("Ctrl+S")
saveFile.setStatusTip('Save ROI on File')
saveFile.triggered.connect(self.file_save)
# self.statusBar()
mainMenu = self.menuBar()
fileMenu = mainMenu.addMenu('&ROI')
# fileMenu.addAction(extractAction)
fileMenu.addAction(openFile)
fileMenu.addAction(saveFile)
self.verbose = args.verbose
self.filterROI = args.filterROI
freader = FileReader(self.inpath, args.filterROI, args.verbose)
# dataRGB, unusedROI = read_files(inpath, False, args.verbose, False)
if self.raw:
dataBW, ROI = freader.read(True)
self.dataZ = dataBW#[:,:,::-1]
else:
dataRGB, ROI = freader.read(False)
self.dataZ = dataRGB#[:,:,:,0]
#
# self.data = dataRGB[:,:,::-1,:]
#dataswappedX = np.swapaxes(np.swapaxes(self.data,0,1),1,2)
self.dataswappedX = np.swapaxes(np.swapaxes(self.dataZ,0,1),1,2)[:,::-1,::-1]
self.dataswappedY = np.swapaxes(self.dataZ,0,2)[:,:,::-1]
self.ConstPixelDims = freader.ConstPixelDims
self.ConstPixelSpacing = freader.ConstPixelSpacing
if args.verbose:
print(self.dataZ.shape)
print("layer: ",self.layer)
self.xview = args.xview
self.yview = args.yview
self.img1a = pg.ImageItem()
self.arr = None
self.firsttime = True
if self.xview:
imgScaleFactor= 1./freader.scaleFactor
self.data = self.dataswappedX
self.ROI = np.swapaxes(np.swapaxes(ROI,0,1),1,2)[:,::-1,::-1]
self.xscale = self.ConstPixelSpacing[1]
self.yscale = self.ConstPixelSpacing[2]
elif self.yview:
imgScaleFactor= 1./freader.scaleFactor
self.data = self.dataswappedY
self.ROI = np.swapaxes(ROI,0,2)[:,:,::-1]
self.xscale = self.ConstPixelSpacing[0]
self.yscale = self.ConstPixelSpacing[2]
else:
imgScaleFactor= 1.
self.data = self.dataZ
self.ROI = ROI
self.xscale = self.ConstPixelSpacing[0]
self.yscale = self.ConstPixelSpacing[1]
self.xshift= 0
self.yshift=0
if self.verbose:
print("data len:",len(self.data))
self.updatemain()
self.rois = [None]*len(self.data)
self.button_next = QtGui.QPushButton('Next', self)
self.button_prev = QtGui.QPushButton('Prev', self)
self.button_next.clicked.connect(self.nextimg)
self.button_prev.clicked.connect(self.previmg)
# layout = QtGui.QVBoxLayout(self)
# layout = QtGui.QGridLayout(self)
layout = QtGui.QGridLayout(widgetWindow)
layout.addWidget(self.button_next,1,1)
layout.addWidget(self.button_prev,2,1)
self.button_setroi = QtGui.QPushButton('Set ROI', self)
self.button_setroi.clicked.connect(self.setROI)
layout.addWidget(self.button_setroi,12,1)
self.button_delroi = QtGui.QPushButton('Del ROI', self)
self.button_delroi.clicked.connect(self.delROI)
layout.addWidget(self.button_delroi,13,1)
label = QtGui.QLabel("Click on a line segment to add a new handle. Right click on a handle to remove.")
# label.setAlignment(Qt.AlignCenter)
layout.addWidget(label,0,0)
self.label_layer = QtGui.QLabel("layer: "+str(self.layer+1)+"/"+str(len(self.data)))
self.label_shape = QtGui.QLabel("shape: "+str(self.arr.shape))
self.label_size = QtGui.QLabel("size: "+str(self.arr.size))
self.label_min = QtGui.QLabel("min: "+str(self.arr.min()))
self.label_max = QtGui.QLabel("max: "+str(self.arr.max()))
self.label_mean = QtGui.QLabel("mean: "+str(self.arr.mean()))
self.label_sd = QtGui.QLabel("sd: "+str(ndimage.mean(self.arr)))
self.label_sum = QtGui.QLabel("sum: "+str(ndimage.sum(self.arr)))
layout.addWidget(self.label_layer,3,1)
layout.addWidget(self.label_shape,4,1)
layout.addWidget(self.label_size,5,1)
layout.addWidget(self.label_min,6,1)
layout.addWidget(self.label_max,7,1)
layout.addWidget(self.label_mean,8,1)
layout.addWidget(self.label_sd,9,1)
layout.addWidget(self.label_sum,10,1)
self.roisSetted = 0
self.label2_roisSetted = QtGui.QLabel("ROI setted: 0")
self.label2_shape = QtGui.QLabel()
self.label2_size = QtGui.QLabel()
self.label2_min = QtGui.QLabel()
self.label2_max = QtGui.QLabel()
self.label2_mean = QtGui.QLabel()
self.label2_sd = QtGui.QLabel()
self.label2_sum = QtGui.QLabel()
layout.addWidget(self.label2_roisSetted,14,1)
layout.addWidget(self.label2_shape,15,1)
layout.addWidget(self.label2_size,16,1)
layout.addWidget(self.label2_min,17,1)
layout.addWidget(self.label2_max,18,1)
layout.addWidget(self.label2_mean,19,1)
layout.addWidget(self.label2_sd,20,1)
layout.addWidget(self.label2_sum,21,1)
self.p1 = pg.PlotWidget()
self.p1.setAspectLocked(True,imgScaleFactor)
self.p1.addItem(self.img1a)
# imv = pg.ImageView(imageItem=img1a)
layout.addWidget(self.p1,1,0,10,1)
# self.slider = QtGui.QSlider(QtCore.Qt.Horizontal)
self.slider = QtGui.QScrollBar(QtCore.Qt.Horizontal)
self.slider.setMinimum(1)
self.slider.setMaximum(len(self.data))
self.slider.setValue(self.layer+1)
self.slider.setSingleStep(1)
self.slider.setFocus()
self.slider.setFocusPolicy(QtCore.Qt.StrongFocus)
# self.slider.setTickPosition(QtGui.QSlider.TicksBelow)
# self.slider.setTickInterval(5)
# self.slider.sliderMoved.connect(self.slider_jump_to)
self.slider.valueChanged.connect(self.slider_jump_to)
layout.addWidget(self.slider,11,0)
self.statusBar = MyStatusBar()
self.statusBar.setSize(len(self.data))
layout.addWidget(self.statusBar,12,0)
self.img1b = pg.ImageItem()
# self.img1a.scale(self.xscale, self.yscale)
# self.img1a.translate(self.xshift, self.yshift)
# self.img1b.scale(self.xscale, self.yscale)
# self.img1b.translate(self.xshift, self.yshift)
if not args.filterROI:
self.roi = pg.PolyLineROI([[80, 60], [90, 30], [60, 40]], pen=(6,9), closed=True)
else:
self.rois = roi2myroi(self.ROI)
if self.rois[self.layer]:
self.roi = self.rois[self.layer]
else:
self.roi = pg.PolyLineROI([[80, 60], [90, 30], [60, 40]], pen=(6,9), closed=True)
# for simplex in hull.simplices:
# if self.rois[self.layer]:
# self.roi = self.rois[self.layer]
self.p2 = pg.PlotWidget()
# self.p2.disableAutoRange('xy')
self.p2.setAspectLocked(True,imgScaleFactor)
self.p2.addItem(self.img1b)
# if not args.filterROI:
self.p1.addItem(self.roi)
self.roi.sigRegionChanged.connect(self.update)
layout.addWidget(self.p2,13,0,10,1)
def update(self):
# if not self.filterROI:
thisroi = self.roi.getArrayRegion(self.arr, self.img1a).astype(float)
self.img1b.setImage(thisroi, levels=(0, self.arr.max()))
self.label2_shape.setText("shape: "+str(thisroi.shape))
self.label2_size.setText("size: "+str(thisroi.size))
self.label2_min.setText("min: "+str(thisroi.min()))
self.label2_max.setText("max: "+str(thisroi.max()))
self.label2_mean.setText("mean: "+str(thisroi.mean()))
self.label2_sd.setText("sd: "+str( ndimage.standard_deviation(thisroi) ))
self.label2_sum.setText("sum: "+str( ndimage.sum(thisroi) ))
# self.img1b.scale(self.xscale, self.yscale)
# self.img1b.translate(self.xshift, self.yshift)
# else:
# self.img1b.setImage(self.data[self.layer,:,:,0]*self.ROI[self.layer])
# # print("entropy: ",entropy(thisroi, disk(5))
# # print("maximum: ",maximum(thisroi, disk(5))
# # print("\n"
# # print(disk(5)
# print("\n")
self.p2.autoRange()
def updatemain(self):
if self.verbose:
print "updating",self.layer
if self.xview:
# dataswappedX = np.swapaxes(self.data,0,1)
self.arr=self.dataswappedX[self.layer]
elif self.yview:
# dataswappedY = np.swapaxes(self.data,0,2)
self.arr=self.dataswappedY[self.layer]
else:
self.arr=self.data[self.layer]
self.img1a.setImage(self.arr)
if self.firsttime:
self.firsttime = False
else:
if self.verbose:
print self.rois
if self.rois[self.layer]:
# self.p1.removeItem(self.roi)
# self.restorePolyLineState(self.roi, self.rois[self.layer])
self.roi.setState(self.rois[self.layer])
# self.p1.addItem(self.roi)
self.update()
self.label_layer.setText("layer: "+str(self.layer+1)+"/"+str(len(self.data)))
self.img1a.updateImage()
def nextimg(self):
if self.layer < (len(self.data)-1):
self.layer +=1
self.slider.setValue(self.layer+1)
self.updatemain()
def previmg(self):
if self.layer > 0:
self.layer -=1
self.slider.setValue(self.layer+1)
self.updatemain()
def setROI(self):
# self.rois[self.layer] = self.savePolyLineState(self.roi)
self.rois[self.layer] = self.roi.saveState()
self.roisSetted += 1
self.label2_roisSetted.setText("ROI setted: "+str(self.roisSetted))
def delROI(self):
if self.rois[self.layer]:
self.rois[self.layer] = None
self.roisSetted -= 1
self.label2_roisSetted.setText("ROI setted: "+str(self.roisSetted))
def file_save(self):
filename = QtGui.QFileDialog.getSaveFileName(self, 'Save File')
writer = roiFileHandler()
writer.dicomsPath = os.path.abspath(self.inpath)
if not str(filename).endswith('.myroi'):
filename = filename+".myroi"
writer.write(filename, self.rois, self.roisSetted)
def file_open(self):
filename = QtGui.QFileDialog.getOpenFileName(self, 'Open File','ROI','ROI files (*.myroi)')
reader = roiFileHandler()
originalpath = reader.dicomsPath
self.rois, self.roisSetted = reader.read(filename)
self.updatemain()
self.label2_roisSetted.setText("ROI setted: "+str(self.roisSetted))
def slider_jump_to(self):
self.layer = self.slider.value()-1
self.updatemain()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = Window_dicom_roi2()
window.show()
sys.exit(app.exec_())
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitsend Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test decoding scripts via decodescript RPC command."""
from test_framework.messages import CTransaction, sha256
from test_framework.test_framework import BitsendTestFramework
from test_framework.util import assert_equal, bytes_to_hex_str, hex_str_to_bytes
from io import BytesIO
class DecodeScriptTest(BitsendTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def decodescript_script_sig(self):
signature = '304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
push_signature = '48' + signature
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
# below are test cases for all of the standard transaction types
# 1) P2PK scriptSig
# the scriptSig of a public key scriptPubKey simply pushes a signature onto the stack
rpc_result = self.nodes[0].decodescript(push_signature)
assert_equal(signature, rpc_result['asm'])
# 2) P2PKH scriptSig
rpc_result = self.nodes[0].decodescript(push_signature + push_public_key)
assert_equal(signature + ' ' + public_key, rpc_result['asm'])
# 3) multisig scriptSig
# this also tests the leading portion of a P2SH multisig scriptSig
# OP_0 <A sig> <B sig>
rpc_result = self.nodes[0].decodescript('00' + push_signature + push_signature)
assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm'])
# 4) P2SH scriptSig
# an empty P2SH redeemScript is valid and makes for a very simple test case.
# thus, such a spending scriptSig would just need to pass the outer redeemScript
# hash test and leave true on the top of the stack.
rpc_result = self.nodes[0].decodescript('5100')
assert_equal('1 0', rpc_result['asm'])
# 5) null data scriptSig - no such thing because null data scripts can not be spent.
# thus, no test case for that standard transaction type is here.
def decodescript_script_pub_key(self):
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
public_key_hash = '5dd1d3a048119c27b28293056724d9522f26d945'
push_public_key_hash = '14' + public_key_hash
uncompressed_public_key = '04b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb25e01fc8fde47c96c98a4f3a8123e33a38a50cf9025cc8c4494a518f991792bb7'
push_uncompressed_public_key = '41' + uncompressed_public_key
p2wsh_p2pk_script_hash = 'd8590cf8ea0674cf3d49fd7ca249b85ef7485dea62c138468bddeb20cd6519f7'
# below are test cases for all of the standard transaction types
# 1) P2PK scriptPubKey
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_public_key + 'ac')
assert_equal(public_key + ' OP_CHECKSIG', rpc_result['asm'])
# P2PK is translated to P2WPKH
assert_equal('0 ' + public_key_hash, rpc_result['segwit']['asm'])
# 2) P2PKH scriptPubKey
# OP_DUP OP_HASH160 <PubKeyHash> OP_EQUALVERIFY OP_CHECKSIG
rpc_result = self.nodes[0].decodescript('76a9' + push_public_key_hash + '88ac')
assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm'])
# P2PKH is translated to P2WPKH
assert_equal('0 ' + public_key_hash, rpc_result['segwit']['asm'])
# 3) multisig scriptPubKey
# <m> <A pubkey> <B pubkey> <C pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
multisig_script = '52' + push_public_key + push_public_key + push_public_key + '53ae'
rpc_result = self.nodes[0].decodescript(multisig_script)
assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm'])
# multisig in P2WSH
multisig_script_hash = bytes_to_hex_str(sha256(hex_str_to_bytes(multisig_script)))
assert_equal('0 ' + multisig_script_hash, rpc_result['segwit']['asm'])
# 4) P2SH scriptPubKey
# OP_HASH160 <Hash160(redeemScript)> OP_EQUAL.
# push_public_key_hash here should actually be the hash of a redeem script.
# but this works the same for purposes of this test.
rpc_result = self.nodes[0].decodescript('a9' + push_public_key_hash + '87')
assert_equal('OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm'])
# P2SH does not work in segwit secripts. decodescript should not return a result for it.
assert 'segwit' not in rpc_result
# 5) null data scriptPubKey
# use a signature look-alike here to make sure that we do not decode random data as a signature.
# this matters if/when signature sighash decoding comes along.
# would want to make sure that no such decoding takes place in this case.
signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
# OP_RETURN <data>
rpc_result = self.nodes[0].decodescript('6a' + signature_imposter)
assert_equal('OP_RETURN ' + signature_imposter[2:], rpc_result['asm'])
# 6) a CLTV redeem script. redeem scripts are in-effect scriptPubKey scripts, so adding a test here.
# OP_NOP2 is also known as OP_CHECKLOCKTIMEVERIFY.
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
#
# OP_IF
# <receiver-pubkey> OP_CHECKSIGVERIFY
# OP_ELSE
# <lock-until> OP_CHECKLOCKTIMEVERIFY OP_DROP
# OP_ENDIF
# <sender-pubkey> OP_CHECKSIG
#
# lock until block 500,000
cltv_script = '63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac'
rpc_result = self.nodes[0].decodescript(cltv_script)
assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm'])
# CLTV script in P2WSH
cltv_script_hash = bytes_to_hex_str(sha256(hex_str_to_bytes(cltv_script)))
assert_equal('0 ' + cltv_script_hash, rpc_result['segwit']['asm'])
# 7) P2PK scriptPubKey
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_uncompressed_public_key + 'ac')
assert_equal(uncompressed_public_key + ' OP_CHECKSIG', rpc_result['asm'])
# uncompressed pubkeys are invalid for checksigs in segwit scripts.
# decodescript should not return a P2WPKH equivalent.
assert 'segwit' not in rpc_result
# 8) multisig scriptPubKey with an uncompressed pubkey
# <m> <A pubkey> <B pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# the purpose of this test is to check that a segwit script is not returned for bare multisig scripts
# with an uncompressed pubkey in them.
rpc_result = self.nodes[0].decodescript('52' + push_public_key + push_uncompressed_public_key +'52ae')
assert_equal('2 ' + public_key + ' ' + uncompressed_public_key + ' 2 OP_CHECKMULTISIG', rpc_result['asm'])
# uncompressed pubkeys are invalid for checksigs in segwit scripts.
# decodescript should not return a P2WPKH equivalent.
assert 'segwit' not in rpc_result
# 9) P2WPKH scriptpubkey
# 0 <PubKeyHash>
rpc_result = self.nodes[0].decodescript('00' + push_public_key_hash)
assert_equal('0 ' + public_key_hash, rpc_result['asm'])
# segwit scripts do not work nested into each other.
# a nested segwit script should not be returned in the results.
assert 'segwit' not in rpc_result
# 10) P2WSH scriptpubkey
# 0 <ScriptHash>
# even though this hash is of a P2PK script which is better used as bare P2WPKH, it should not matter
# for the purpose of this test.
rpc_result = self.nodes[0].decodescript('0020' + p2wsh_p2pk_script_hash)
assert_equal('0 ' + p2wsh_p2pk_script_hash, rpc_result['asm'])
# segwit scripts do not work nested into each other.
# a nested segwit script should not be returned in the results.
assert 'segwit' not in rpc_result
def decoderawtransaction_asm_sighashtype(self):
"""Test decoding scripts via RPC command "decoderawtransaction".
This test is in with the "decodescript" tests because they are testing the same "asm" script decodes.
"""
# this test case uses a random plain vanilla mainnet transaction with a single P2PKH input and output
tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])
# this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.
# it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc
# verify that we have not altered scriptPubKey decoding.
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])
assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])
assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
txSave = CTransaction()
txSave.deserialize(BytesIO(hex_str_to_bytes(tx)))
# make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type
tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])
# verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
# some more full transaction tests of varying specific scriptSigs. used instead of
# tests in decodescript_script_sig because the decodescript RPC is specifically
# for working on scriptPubKeys (argh!).
push_signature = bytes_to_hex_str(txSave.vin[0].scriptSig)[2:(0x48*2+4)]
signature = push_signature[2:]
der_signature = signature[:-2]
signature_sighash_decoded = der_signature + '[ALL]'
signature_2 = der_signature + '82'
push_signature_2 = '48' + signature_2
signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'
# 1) P2PK scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# make sure that the sighash decodes come out correctly for a more complex / lesser used case.
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 2) multisig scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes('00' + push_signature + push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 3) test a scriptSig that contains more than push operations.
# in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.
txSave.vin[0].scriptSig = hex_str_to_bytes('6a143011020701010101010101020601010101010101')
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])
def run_test(self):
self.decodescript_script_sig()
self.decodescript_script_pub_key()
self.decoderawtransaction_asm_sighashtype()
if __name__ == '__main__':
DecodeScriptTest().main()
|
|
"""
Import the given MODIS tile into the provided NDVI and QA worldgrid.
Assert that the MODIS tile contains (at least) the requested dates.
Note that this require a .csv file with NDVI dates to import. This file
can be create with the ``ndvi_collect_dates.py`` script.
Example invocation::
python rastercube/scripts/create_ndvi_worldgrid.py
--tile=h10v09
--worldgrid=hdfs:///user/test/
--dates_csv=$RASTERCUBE_TEST_DATA/1_manual/ndvi_dates.2.csv
"""
import os
import sys
import time
import argparse
import warnings
import ctypes
import numpy as np
import multiprocessing.sharedctypes
import rastercube.utils as utils
import rastercube.datasources.modis as modis
import rastercube.jgrid as jgrid
import rastercube.worldgrid.grids as grids
parser = argparse.ArgumentParser(description="Create a new NDVI worldgrid")
parser.add_argument('--tile', type=str, required=True,
help='tile name (e.g. h17v07)')
parser.add_argument('--noconfirm', action='store_true',
help='Skip confirmation')
parser.add_argument('--modis_dir', type=str, required=False,
help='directory where input MODIS files are stored')
parser.add_argument('--worldgrid', type=str, required=True,
help='worldgrid root')
# If we have fractions of 400x400x50 and store int16, we get
# 400 * 400 * 50 * 2 / (1024 * 1024.) = 15MB
parser.add_argument('--frac_ndates', type=int, default=50,
help='Size of a chunk along the time axis')
parser.add_argument('--nworkers', type=int, default=5,
help='Number of workers (if using multiprocessing)')
parser.add_argument('--dates_csv', type=str, default=None,
help='The dates that must be included in the grid'
'see scripts/ndvi_collect_dates.py')
parser.add_argument('--test_limit_fractions', type=int, default=None,
help='(TESTING ONLY) : Only create the first n fractions')
def collect_hdf_files(tilename, hdf_dir):
# hdf_files contains (full path, timestamp_ms)
hdf_files = modis.ndvi_hdf_for_tile(tilename, hdf_dir)
assert len(hdf_files) > 0, 'No matching HDF files found'
print len(hdf_files), ' HDF files in srcdir'
return hdf_files
# ------------------------------------- Shared multiprocessing globals
# Global variable initialize by _mp_init
_mp_ndvi = None
_mp_qa = None
def _mp_init(shared_ndvi, shared_qa):
global _mp_ndvi, _mp_qa
_mp_ndvi = shared_ndvi
_mp_qa = shared_qa
# ------------------------------------- Multiprocess HDF processing
def _real_mp_process_hdf(hdf_file, frac_ti, grid_w, grid_h, frac_ndates):
"""
Args:
frac_ti: The time index of the hdf_file in the current frac array
"""
# ignore the PEP 3118 buffer warning
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
s_ndvi = np.ctypeslib.as_array(_mp_ndvi)
s_ndvi.shape = (grid_h, grid_w, frac_ndates)
s_ndvi.dtype = np.int16
s_qa = np.ctypeslib.as_array(_mp_qa)
s_qa.shape = (grid_h, grid_w, frac_ndates)
s_qa.dtype = np.uint16
_start = time.time()
modhdf = modis.ModisHDF(hdf_file)
# -- ndvi
_ndvi_start = time.time()
ds = modhdf.load_gdal_dataset(modis.MODIS_NDVI_DATASET_NAME)
ds.ReadAsArray(buf_obj=s_ndvi[:, :, frac_ti])
_ndvi_elapsed = time.time() - _ndvi_start
del ds
# -- qa
_qa_start = time.time()
ds = modhdf.load_gdal_dataset(modis.MODIS_QA_DATASET_NAME)
ds.ReadAsArray(buf_obj=s_qa[:, :, frac_ti])
_qa_elapsed = time.time() - _qa_start
del ds
print 'Loading ', os.path.basename(hdf_file),\
'took %.02f [s] (%.02f ndvi read, %.02f qa)' % (
time.time() - _start, _ndvi_elapsed, _qa_elapsed)
sys.stdout.flush()
def _mp_process_hdf(args):
"""
Wrapper around _mp_process_hdf that correctly handles keyboard
interrupt
"""
# TODO: This is supposed to make CTRL-C work but it doesn't
try:
_real_mp_process_hdf(*args)
except (KeyboardInterrupt, SystemExit):
print "Worker interrupted, exiting..."
return False
# ------------------------------------- Multiprocess fractions writing
def _real_mp_write_frac(frac_id, grid_w, grid_h, frac_ndates):
# ignore the PEP 3118 buffer warning
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
s_ndvi = np.ctypeslib.as_array(_mp_ndvi)
s_ndvi.shape = (grid_h, grid_w, frac_ndates)
s_ndvi.dtype = np.int16
s_qa = np.ctypeslib.as_array(_mp_qa)
s_qa.shape = (grid_h, grid_w, frac_ndates)
s_qa.dtype = np.uint16
frac_num, frac_d = frac_id
i_range, j_range = modgrid.get_cell_indices_in_tile(
frac_num, tile_h, tile_v)
frac_ndvi = s_ndvi[i_range[0]:i_range[1], j_range[0]:j_range[1], :]
frac_qa = s_qa[i_range[0]:i_range[1], j_range[0]:j_range[1], :]
ndvi_header.write_frac(frac_id, frac_ndvi)
qa_header.write_frac(frac_id, frac_qa)
def _mp_write_frac(args):
try:
_real_mp_write_frac(*args)
except (KeyboardInterrupt, SystemExit):
print "Worker interrupted, exiting..."
return False
if __name__ == '__main__':
# Print help if no arguments are provided
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
tilename = args.tile
modis_dir = args.modis_dir
if modis_dir is None:
modis_dir = utils.get_modis_hdf_dir()
test_limit_fractions = args.test_limit_fractions
nworkers = args.nworkers
worldgrid = args.worldgrid
ndvi_grid_root = os.path.join(worldgrid, 'ndvi')
qa_grid_root = os.path.join(worldgrid, 'qa')
if not jgrid.Header.exists(ndvi_grid_root):
assert args.dates_csv is not None
dates = np.genfromtxt(args.dates_csv, dtype=str)
dates_ms = sorted([utils.parse_date(d) for d in dates])
grid = grids.MODISGrid()
ndvi_header = jgrid.Header(
grid_root=ndvi_grid_root,
width=grid.width,
height=grid.height,
frac_width=grid.cell_width,
frac_height=grid.cell_height,
frac_ndates=args.frac_ndates,
sr_wkt=grid.proj_wkt,
dtype=np.int16,
geot=grid.geot,
shape=(grid.height, grid.width, len(dates_ms)),
timestamps_ms=dates_ms,
nodataval=modis.MODIS_NDVI_NODATA
)
ndvi_header.save()
qa_header = jgrid.Header(
grid_root=qa_grid_root,
width=grid.width,
height=grid.height,
frac_width=grid.cell_width,
frac_height=grid.cell_height,
frac_ndates=args.frac_ndates,
sr_wkt=grid.proj_wkt,
dtype=np.uint16,
geot=grid.geot,
shape=(grid.height, grid.width, len(dates_ms)),
timestamps_ms=dates_ms,
nodataval=modis.MODIS_QA_NODATA
)
qa_header.save()
print 'Saved header in ', ndvi_grid_root, qa_grid_root
else:
ndvi_header = jgrid.Header.load(ndvi_grid_root)
qa_header = jgrid.Header.load(qa_grid_root)
assert np.all(ndvi_header.timestamps_ms == qa_header.timestamps_ms)
if args.dates_csv is not None:
# Verify that dates_csv match the header
dates = np.genfromtxt(args.dates_csv, dtype=str)
dates_ms = sorted([utils.parse_date(d) for d in dates])
assert np.all(ndvi_header.timestamps_ms == dates_ms)
assert args.frac_ndates == ndvi_header.frac_ndates,\
"Existing header has different frac_ndates (%d) than requested (%d)" % \
(ndvi_header.frac_ndates, args.frac_ndates)
hdf_files = collect_hdf_files(tilename, modis_dir)
# Verify that we have all necessary timestamps
header_timestamps = set(ndvi_header.timestamps_ms)
files_timestamps = set([t[1] for t in hdf_files])
difference = header_timestamps.difference(files_timestamps)
assert len(difference) == 0, \
'difference between available' \
' dates and required : %s' % \
' '.join([utils.format_date(d) for d in difference])
# only pick files for which the timestamp has been requested
hdf_files = filter(lambda f: f[1] in header_timestamps, hdf_files)
modgrid = grids.MODISGrid()
tile_h, tile_v = modis.parse_tilename(tilename)
fractions = modgrid.get_cells_for_tile(tile_h, tile_v)
if test_limit_fractions is not None:
# This should only be used for testing as a mean to speed things
# up by only creating a limited number of fractions
print 'TEST - Limiting fractions'
fractions = fractions[:test_limit_fractions]
grid_w = modgrid.MODIS_tile_width
grid_h = modgrid.MODIS_tile_height
max_frac_size_mb = (grid_w * grid_h * ndvi_header.frac_ndates * 2 / (1024. * 1024.))
print
print 'Will import the following :'
print 'tilename : %s' % tilename
print 'tile_h=%d, tile_v=%d' % (tile_h, tile_v)
print 'num fractions : %d' % len(fractions)
print 'input MODIS dir : %s' % modis_dir
print 'output NDVI grid root : %s' % ndvi_grid_root
print 'output QA grid root : %s' % qa_grid_root
print 'date range : %s' % (utils.format_date(hdf_files[0][1]) + ' - ' +
utils.format_date(hdf_files[-1][1]))
print 'num source hdf files : %d' % len(hdf_files)
print 'required memory : %d [Mb]' % max_frac_size_mb
print
if len(fractions) == 0:
print 'No fractions to process - terminating'
sys.exit(0)
if not args.noconfirm:
if not utils.confirm(prompt='Proceed?', resp=True):
sys.exit(-1)
_start = time.time()
assert ndvi_header.frac_ndates == qa_header.frac_ndates
for frac_d in xrange(ndvi_header.num_dates_fracs):
frac_time_range = np.arange(*ndvi_header.frac_time_range(frac_d))
frac_ndates = len(frac_time_range)
# We directly use short for data and let the workers do the conversion
shared_ndvi = multiprocessing.sharedctypes.RawArray(
ctypes.c_short, grid_w * grid_h * frac_ndates)
shared_qa = multiprocessing.sharedctypes.RawArray(
ctypes.c_short, grid_w * grid_h * frac_ndates)
pool = multiprocessing.Pool(
processes=nworkers,
initializer=_mp_init,
initargs=(shared_ndvi, shared_qa)
)
try:
# The .get(9999999) are a ugly fix for a python bug where the keyboard
# interrupt isn't raised depending on when it happens
# see
# http://stackoverflow.com/a/1408476
# 1. Read data
_read_start = time.time()
args = []
for frac_ti, t in enumerate(frac_time_range):
fname, timestamp = hdf_files[t]
args.append((fname, frac_ti, grid_w, grid_h, frac_ndates))
pool.map_async(_mp_process_hdf, args).get(9999999)
print 'Read took %f [s]' % (time.time() - _read_start)
# 2. Write fractions
_write_start = time.time()
args = []
for frac_num in fractions:
frac_id = (frac_num, frac_d)
args.append((frac_id, grid_w, grid_h, frac_ndates))
pool.map_async(_mp_write_frac, args).get(9999999)
print 'Write took %f [s]' % (time.time() - _write_start)
pool.close()
pool.join()
except KeyboardInterrupt:
print "Caught KeyboardInterrupt, terminating workers"
pool.terminate()
pool.join()
sys.exit(-1)
print 'Took %f [s]' % (time.time() - _start)
|
|
'''
XbrlPublicPostgresDB.py implements a relational database interface for Arelle, based
on the XBRL US Public Database. The database schema is described by "XBRL US Public Database",
published by XBRL US, 2011. This is a syntactic representation of XBRL information.
This module provides the execution context for saving a dts and assession in
XBRL Public Database Tables. It may be loaded by Arelle'sRSS feed, or by individual
DTS and instances opened by interactive or command line/web service mode.
(c) Copyright 2013 Mark V Systems Limited, California US, All rights reserved.
Mark V copyright applies to this software, which is licensed according to the terms of Arelle(r).
and does not apply to the XBRL US Database schema and description.
The XBRL US Database schema and description is (c) Copyright XBRL US 2011, The
resulting database may contain data from SEC interactive data filings (or any other XBRL
instance documents and DTS) in a relational model. Mark V Systems conveys neither
rights nor license for the database schema.
The XBRL US Database and this code is intended for Postgres. XBRL-US uses Postgres 8.4,
Arelle uses 9.1, via Python DB API 2.0 interface, using the pg8000 library.
Information for the 'official' XBRL US-maintained database (this schema, containing SEC filings):
Database Name: edgar_db
Database engine: Postgres version 8.4
\Host: public.xbrl.us
Port: 5432
to use from command line:
linux
# be sure plugin is installed
arelleCmdLine --plugin '+xbrlDB|show'
arelleCmdLine -f http://sec.org/somewhere/some.rss -v --store-to-XBRL-DB 'myserver.com,portnumber,pguser,pgpasswd,database,timeoutseconds'
windows
rem be sure plugin is installed
arelleCmdLine --plugin "+xbrlDB|show"
arelleCmdLine -f http://sec.org/somewhere/some.rss -v --store-to-XBRL-DB "myserver.com,portnumber,pguser,pgpasswd,database,timeoutseconds"
'''
import time, datetime
from arelle.ModelDocument import Type
from arelle.ModelDtsObject import ModelConcept, ModelResource
from arelle.ModelObject import ModelObject
from arelle.ModelValue import qname
from arelle.ValidateXbrlCalcs import roundValue
from arelle.XmlUtil import elementFragmentIdentifier
from arelle import XbrlConst
from .SqlDb import XPDBException, isSqlConnection, SqlDbConnection
def insertIntoDB(modelXbrl,
user=None, password=None, host=None, port=None, database=None, timeout=None,
product="postgres", rssItem=None, **kwargs):
xpgdb = None
try:
xpgdb = XbrlPostgresDatabaseConnection(modelXbrl, user, password, host, port, database, timeout, product)
xpgdb.verifyTables()
xpgdb.insertXbrl(rssItem=rssItem)
xpgdb.close()
except Exception as ex:
if xpgdb is not None:
try:
xpgdb.close(rollback=True)
except Exception as ex2:
pass
raise # reraise original exception with original traceback
def isDBPort(host, port, timeout=10):
return isSqlConnection(host, port, timeout, product="postgres")
XBRLDBTABLES = {
"fact", "fact_aug",
"entity",
"entity_name_history",
"unit", "unit_measure",
"context", "context_aug", "context_dimension",
"accession", "accession_document_association", "accession_element", "accession_timestamp",
"attribute_value",
"custom_role_type",
"uri",
"document",
"qname",
"taxonomy", "taxonomy_version", "namespace",
"element", "element_attribute", "element_attribute_value_association",
"network", "relationship",
"custom_arcrole_type", "custom_arcrole_used_on", "custom_role_used_on",
"label_resource",
"reference_part", "reference_part_type", "reference_resource",
"resource",
"enumeration_arcrole_cycles_allowed",
"enumeration_element_balance",
"enumeration_element_period_type",
"enumeration_unit_measure_location",
"industry", "industry_level",
"industry_structure",
"query_log",
"sic_code",
}
class XbrlPostgresDatabaseConnection(SqlDbConnection):
def verifyTables(self):
missingTables = XBRLDBTABLES - self.tablesInDB()
# if no tables, initialize database
if missingTables == XBRLDBTABLES:
self.create("xbrlPublicPostgresDB.ddl")
# load fixed tables
self.getTable('enumeration_arcrole_cycles_allowed', 'enumeration_arcrole_cycles_allowed_id',
('description',), ('description',),
(('any',), ('undirected',), ('none',)))
self.getTable('enumeration_element_balance', 'enumeration_element_balance_id',
('description',), ('description',),
(('credit',), ('debit',)))
self.getTable('enumeration_element_period_type', 'enumeration_element_period_type_id',
('description',), ('description',),
(('instant',), ('duration',), ('forever',)))
missingTables = XBRLDBTABLES - self.tablesInDB()
if missingTables:
raise XPDBException("xpgDB:MissingTables",
_("The following tables are missing, suggest reinitializing database schema: %(missingTableNames)s"),
missingTableNames=', '.join(t for t in sorted(missingTables)))
def insertXbrl(self, rssItem):
try:
# must also have default dimensions loaded
from arelle import ValidateXbrlDimensions
ValidateXbrlDimensions.loadDimensionDefaults(self.modelXbrl)
# find pre-existing documents in server database
self.identifyPreexistingDocuments()
self.identifyConceptsUsed()
startedAt = time.time()
self.insertAccession(rssItem)
self.insertUris()
self.insertQnames()
self.insertNamespaces()
self.insertDocuments()
self.insertCustomArcroles()
self.insertCustomRoles()
self.insertElements()
self.insertResources()
self.insertNetworks()
self.modelXbrl.profileStat(_("XbrlPublicDB: DTS insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertFacts()
self.modelXbrl.profileStat(_("XbrlPublicDB: instance insertion"), time.time() - startedAt)
startedAt = time.time()
self.showStatus("Committing entries")
self.commit()
self.modelXbrl.profileStat(_("XbrlPublicDB: insertion committed"), time.time() - startedAt)
self.showStatus("DB insertion completed", clearAfter=5000)
except Exception as ex:
self.showStatus("DB insertion failed due to exception", clearAfter=5000)
raise
def insertAccession(self, rssItem):
self.accessionId = "(TBD)"
self.showStatus("insert accession")
if rssItem is None:
_time = time.time()
now = datetime.datetime.fromtimestamp(_time)
today = datetime.date(now.year, now.month, now.day)
table = self.getTable('accession', 'accession_id',
('filing_date','entity_id','creation_software',
'entry_url', 'filing_accession_number'),
('filing_accession_number',),
((today, # NOT NULL
0, # NOT NULL
self.modelXbrl.modelDocument.creationSoftwareComment,
self.modelXbrl.uri,
str(int(_time)) # NOT NULL
),),
checkIfExisting=True,
returnExistenceStatus=True)
else:
table = self.getTable('accession', 'accession_id',
('accepted_timestamp', 'is_most_current', 'filing_date','entity_id',
'entity_name', 'creation_software', 'standard_industrial_classification',
'sec_html_url', 'entry_url', 'filing_accession_number'),
('filing_accession_number',),
((rssItem.acceptanceDatetime,
True,
rssItem.filingDate or datetime.datetime.min, # NOT NULL
rssItem.cikNumber or 0, # NOT NULL
rssItem.companyName,
self.modelXbrl.modelDocument.creationSoftwareComment,
rssItem.assignedSic or -1, # NOT NULL
rssItem.htmlUrl,
rssItem.url,
rssItem.accessionNumber or 'UNKNOWN' # NOT NULL
),),
checkIfExisting=True,
returnExistenceStatus=True)
for id, filing_accession_number, existenceStatus in table:
self.accessionId = id
self.accessionPreviouslyInDB = existenceStatus
break
def insertUris(self):
uris = (_DICT_SET(self.modelXbrl.namespaceDocs.keys()) |
_DICT_SET(self.modelXbrl.arcroleTypes.keys()) |
_DICT_SET(XbrlConst.standardArcroleCyclesAllowed.keys()) |
_DICT_SET(self.modelXbrl.roleTypes.keys()) |
XbrlConst.standardRoles)
self.showStatus("insert uris")
table = self.getTable('uri', 'uri_id',
('uri',),
('uri',), # indexed match cols
tuple((uri,)
for uri in uris),
checkIfExisting=True)
self.uriId = dict((uri, id)
for id, uri in table)
def insertQnames(self):
qnames = (_DICT_SET(self.modelXbrl.qnameConcepts.keys()) |
_DICT_SET(self.modelXbrl.qnameAttributes.keys()) |
_DICT_SET(self.modelXbrl.qnameTypes.keys()) |
set(measure
for unit in self.modelXbrl.units.values()
for measures in unit.measures
for measure in measures))
self.showStatus("insert qnames")
table = self.getTable('qname', 'qname_id',
('namespace', 'local_name'),
('namespace', 'local_name'), # indexed match cols
tuple((qn.namespaceURI, qn.localName)
for qn in qnames),
checkIfExisting=True)
self.qnameId = dict((qname(ns, ln), id)
for id, ns, ln in table)
def insertNamespaces(self):
self.showStatus("insert namespaces")
if self.disclosureSystem.baseTaxonomyNamespaces:
# use only base taxonomy namespaces in disclosure system
namespaceUris = self.disclosureSystem.baseTaxonomyNamespaces
else:
# use all namespace URIs
namespaceUris = self.modelXbrl.namespaceDocs.keys()
table = self.getTable('namespace', 'namespace_id',
('uri', 'is_base', 'taxonomy_version_id', 'prefix'),
('uri',), # indexed matchcol
tuple((uri, True, 0, self.disclosureSystem.standardPrefixes.get(uri,None))
for uri in namespaceUris),
checkIfExisting=True)
self.namespaceId = dict((uri, id)
for id, uri in table)
def identifyPreexistingDocuments(self):
self.existingDocumentIds = {}
docUris = set()
for modelDocument in self.modelXbrl.urlDocs.values():
if modelDocument.type == Type.SCHEMA:
docUris.add(self.dbStr(modelDocument.uri))
if docUris:
results = self.execute("SELECT document_id, document_uri FROM document WHERE document_uri IN (" +
', '.join(docUris) + ");")
self.existingDocumentIds = dict((docUri,docId) for docId, docUri in results)
def identifyConceptsUsed(self):
# relationshipSets are a dts property
self.relationshipSets = [(arcrole, ELR, linkqname, arcqname)
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys()
if ELR and (arcrole.startswith("XBRL-") or (linkqname and arcqname))]
conceptsUsed = set(f.qname for f in self.modelXbrl.factsInInstance)
for cntx in self.modelXbrl.contexts.values():
for dim in cntx.qnameDims.values():
conceptsUsed.add(dim.dimensionQname)
if dim.isExplicit:
conceptsUsed.add(dim.memberQname)
else:
conceptsUsed.add(dim.typedMember.qname)
for defaultDim, defaultDimMember in self.modelXbrl.qnameDimensionDefaults.items():
conceptsUsed.add(defaultDim)
conceptsUsed.add(defaultDimMember)
for relationshipSetKey in self.relationshipSets:
relationshipSet = self.modelXbrl.relationshipSet(*relationshipSetKey)
for rel in relationshipSet.modelRelationships:
if isinstance(rel.fromModelObject, ModelConcept):
conceptsUsed.add(rel.fromModelObject)
if isinstance(rel.toModelObject, ModelConcept):
conceptsUsed.add(rel.toModelObject)
for qn in (XbrlConst.qnXbrliIdentifier, XbrlConst.qnXbrliPeriod, XbrlConst.qnXbrliUnit):
conceptsUsed.add(self.modelXbrl.qnameConcepts[qn])
conceptsUsed -= {None} # remove None if in conceptsUsed
self.conceptsUsed = conceptsUsed
def insertDocuments(self):
self.showStatus("insert documents")
table = self.getTable('document', 'document_id',
('document_uri',),
('document_uri',),
set((docUri,)
for docUri in self.modelXbrl.urlDocs.keys()
if docUri not in self.existingDocumentIds),
checkIfExisting=True)
self.documentIds = dict((uri, id)
for id, uri in table)
self.documentIds.update(self.existingDocumentIds)
table = self.getTable('accession_document_association', 'accession_document_association_id',
('accession_id','document_id'),
('document_id',),
tuple((self.accessionId, docId)
for docId in self.documentIds.values()),
checkIfExisting=True)
def insertCustomArcroles(self):
self.showStatus("insert arcrole types")
arcroleTypesByIds = dict(((self.documentIds[arcroleType.modelDocument.uri],
self.uriId[arcroleType.arcroleURI]), # key on docId, uriId
arcroleType) # value is roleType object
for arcroleTypes in self.modelXbrl.arcroleTypes.values()
for arcroleType in arcroleTypes
if arcroleType.modelDocument.uri not in self.existingDocumentIds)
table = self.getTable('custom_arcrole_type', 'custom_arcrole_type_id',
('document_id', 'uri_id', 'definition', 'cycles_allowed'),
('document_id', 'uri_id'),
tuple((arcroleTypeIDs[0], # doc Id
arcroleTypeIDs[1], # uri Id
arcroleType.definition,
{'any':1, 'undirected':2, 'none':3}[arcroleType.cyclesAllowed])
for arcroleTypeIDs, arcroleType in arcroleTypesByIds.items()))
table = self.getTable('custom_arcrole_used_on', 'custom_arcrole_used_on_id',
('custom_arcrole_type_id', 'qname_id'),
('custom_arcrole_type_id', 'qname_id'),
tuple((id, self.qnameId[usedOn])
for id, docid, uriid in table
for usedOn in arcroleTypesByIds[(docid,uriid)].usedOns))
def insertCustomRoles(self):
self.showStatus("insert role types")
roleTypesByIds = dict(((self.documentIds[roleType.modelDocument.uri],
self.uriId[roleType.roleURI]), # key on docId, uriId
roleType) # value is roleType object
for roleTypes in self.modelXbrl.roleTypes.values()
for roleType in roleTypes
if roleType.modelDocument.uri not in self.existingDocumentIds)
table = self.getTable('custom_role_type', 'custom_role_type_id',
('document_id', 'uri_id', 'definition'),
('document_id', 'uri_id'),
tuple((roleTypeIDs[0], # doc Id
roleTypeIDs[1], # uri Id
roleType.definition)
for roleTypeIDs, roleType in roleTypesByIds.items()))
table = self.getTable('custom_role_used_on', 'custom_role_used_on_id',
('custom_role_type_id', 'qname_id'),
('custom_role_type_id', 'qname_id'),
tuple((id, self.qnameId[usedOn])
for id, docid, uriid in table
for usedOn in roleTypesByIds[(docid,uriid)].usedOns))
def insertElements(self):
self.showStatus("insert elements")
filingDocumentConcepts = set()
existingDocumentUsedConcepts = set()
for concept in self.modelXbrl.qnameConcepts.values():
if concept.modelDocument.uri not in self.existingDocumentIds:
filingDocumentConcepts.add(concept)
elif concept in self.conceptsUsed:
existingDocumentUsedConcepts.add(concept)
table = self.getTable('element', 'element_id',
('qname_id', 'datatype_qname_id', 'xbrl_base_datatype_qname_id', 'balance_id',
'period_type_id', 'substitution_group_qname_id', 'abstract', 'nillable',
'document_id', 'is_numeric', 'is_monetary'),
('qname_id',),
tuple((self.qnameId[concept.qname],
self.qnameId.get(concept.typeQname), # may be None
self.qnameId.get(concept.baseXbrliTypeQname
if not isinstance(concept.baseXbrliTypeQname, list)
else concept.baseXbrliTypeQname[0]
), # may be None or may be a list for a union
{'debit':1, 'credit':2, None:None}[concept.balance],
{'instant':1, 'duration':2, 'forever':3, None:0}[concept.periodType],
self.qnameId.get(concept.substitutionGroupQname), # may be None
concept.isAbstract,
concept.isNillable,
self.documentIds[concept.modelDocument.uri],
concept.isNumeric,
concept.isMonetary)
for concept in filingDocumentConcepts)
)
self.elementId = dict((qnameId, elementId) # indexed by qnameId, not by qname value
for elementId, qnameId in table)
filingDocumentConcepts.clear() # dereference
# get existing element IDs
if existingDocumentUsedConcepts:
conceptQnameIds = []
for concept in existingDocumentUsedConcepts:
conceptQnameIds.append(str(self.qnameId[concept.qname]))
results = self.execute("SELECT element_id, qname_id FROM element WHERE qname_id IN (" +
', '.join(conceptQnameIds) + ");")
for elementId, qnameId in results:
self.elementId[qnameId] = elementId
existingDocumentUsedConcepts.clear() # dereference
def conceptElementId(self, concept):
if isinstance(concept, ModelConcept):
return self.elementId.get(self.qnameId.get(concept.qname))
else:
return None
def insertResources(self):
self.showStatus("insert resources")
# deduplicate resources (may be on multiple arcs)
# note that lxml has no column numbers, use objectIndex as pseudo-column number
uniqueResources = dict(((self.documentIds[resource.modelDocument.uri],
resource.sourceline,
resource.objectIndex), resource)
for arcrole in (XbrlConst.conceptLabel, XbrlConst.conceptReference,
XbrlConst.elementLabel, XbrlConst.elementReference)
for rel in self.modelXbrl.relationshipSet(arcrole).modelRelationships
for resource in (rel.fromModelObject, rel.toModelObject)
if isinstance(resource, ModelResource))
resourceData = tuple((self.uriId[resource.role],
self.qnameId[resource.qname],
self.documentIds[resource.modelDocument.uri],
resource.sourceline,
resource.objectIndex)
for resource in uniqueResources.values())
uniqueResources.clear() # dereference before getTable
table = self.getTable('resource', 'resource_id',
('role_uri_id', 'qname_id', 'document_id', 'document_line_number', 'document_column_number'),
('document_id', 'document_line_number', 'document_column_number'),
resourceData,
checkIfExisting=True)
self.resourceId = dict(((docId, line, offset), id)
for id, docId, line, offset in table)
self.showStatus("insert labels")
uniqueResources = dict(((self.resourceId[self.documentIds[resource.modelDocument.uri],
resource.sourceline,
resource.objectIndex]), resource)
for arcrole in (XbrlConst.conceptLabel, XbrlConst.elementLabel)
for rel in self.modelXbrl.relationshipSet(arcrole).modelRelationships
for resource in (rel.fromModelObject, rel.toModelObject)
if isinstance(resource, ModelResource))
table = self.getTable('label_resource', 'resource_id',
('resource_id', 'label', 'xml_lang'),
('resource_id',),
tuple((resourceId,
resource.textValue,
resource.xmlLang)
for resourceId, resource in uniqueResources.items()),
checkIfExisting=True)
uniqueResources.clear()
def insertNetworks(self):
self.showStatus("insert networks")
table = self.getTable('network', 'network_id',
('accession_id', 'extended_link_qname_id', 'extended_link_role_uri_id',
'arc_qname_id', 'arcrole_uri_id', 'description'),
('accession_id', 'extended_link_qname_id', 'extended_link_role_uri_id',
'arc_qname_id', 'arcrole_uri_id'),
tuple((self.accessionId,
self.qnameId[linkqname],
self.uriId[ELR],
self.qnameId[arcqname],
self.uriId[arcrole],
None if ELR in XbrlConst.standardRoles else
self.modelXbrl.roleTypes[ELR][0].definition)
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys()
if ELR and linkqname and arcqname and not arcrole.startswith("XBRL-")))
self.networkId = dict(((accId, linkQnId, linkRoleId, arcQnId, arcRoleId), id)
for id, accId, linkQnId, linkRoleId, arcQnId, arcRoleId in table)
# do tree walk to build relationships with depth annotated, no targetRole navigation
dbRels = []
def walkTree(rels, seq, depth, relationshipSet, visited, dbRels, networkId):
for rel in rels:
if rel not in visited and isinstance(rel.toModelObject, ModelObject):
visited.add(rel)
dbRels.append((rel, seq, depth, networkId))
seq += 1
seq = walkTree(relationshipSet.fromModelObject(rel.toModelObject), seq, depth+1, relationshipSet, visited, dbRels, networkId)
visited.remove(rel)
return seq
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys():
if ELR and linkqname and arcqname and not arcrole.startswith("XBRL-"):
networkId = self.networkId[(self.accessionId,
self.qnameId[linkqname],
self.uriId[ELR],
self.qnameId[arcqname],
self.uriId[arcrole])]
relationshipSet = self.modelXbrl.relationshipSet(arcrole, ELR, linkqname, arcqname)
seq = 1
for rootConcept in relationshipSet.rootConcepts:
seq = walkTree(relationshipSet.fromModelObject(rootConcept), seq, 1, relationshipSet, set(), dbRels, networkId)
def resourceResourceId(resource):
if isinstance(resource, ModelResource):
return self.resourceId.get((self.documentIds[resource.modelDocument.uri],
resource.sourceline,
resource.objectIndex))
else:
return None
relsData = tuple((networkId,
self.conceptElementId(rel.fromModelObject), # may be None
self.conceptElementId(rel.toModelObject), # may be None
self.dbNum(rel.order),
resourceResourceId(rel.fromModelObject), # may be None
resourceResourceId(rel.toModelObject), # may be None
self.dbNum(rel.weight), # none if no weight
sequence,
depth,
self.qnameId.get(rel.preferredLabel) if rel.preferredLabel else None)
for rel, sequence, depth, networkId in dbRels
if isinstance(rel.fromModelObject, ModelConcept) and isinstance(rel.toModelObject, ModelConcept))
del dbRels[:] # dererefence
table = self.getTable('relationship', 'relationship_id',
('network_id', 'from_element_id', 'to_element_id', 'reln_order',
'from_resource_id', 'to_resource_id', 'calculation_weight',
'tree_sequence', 'tree_depth', 'preferred_label_role_uri_id'),
('network_id', 'tree_sequence'),
relsData)
def insertFacts(self):
accsId = self.accessionId
if self.accessionPreviouslyInDB:
self.showStatus("deleting prior facts of this accession")
# remove prior facts
self.execute("DELETE FROM fact WHERE fact.accession_id = {};".format(accsId),
close=False, fetch=False)
self.execute("DELETE from unit_measure "
"USING unit "
"WHERE unit.accession_id = {0} AND unit_measure.unit_id = unit.unit_id;".format(accsId),
close=False, fetch=False)
self.execute("DELETE from unit WHERE unit.accession_id = {0};".format(accsId),
close=False, fetch=False)
self.execute("DELETE from context WHERE context.accession_id = {0};".format(accsId),
close=False, fetch=False)
self.showStatus("insert facts")
# units
table = self.getTable('unit', 'unit_id',
('accession_id', 'unit_xml_id'),
('accession_id', 'unit_xml_id'),
tuple((accsId,
unitId)
for unitId in self.modelXbrl.units.keys()))
self.unitId = dict(((_accsId, xmlId), id)
for id, _accsId, xmlId in table)
# measures
table = self.getTable('unit_measure', 'unit_measure_id',
('unit_id', 'qname_id', 'location_id'),
('qname_id', 'location_id'),
tuple((self.unitId[(accsId,unit.id)],
self.qnameId[measure],
1 if (not unit.measures[1]) else (i + 1))
for unit in self.modelXbrl.units.values()
for i in range(2)
for measure in unit.measures[i]))
#table = self.getTable('enumeration_measure_location', 'enumeration_measure_location_id',
# ('description',),
# ('description',),
# (('measure',), ('numerator',), ('denominator',)))
# context
table = self.getTable('context', 'context_id',
('accession_id', 'period_start', 'period_end', 'period_instant', 'specifies_dimensions', 'context_xml_id', 'entity_scheme', 'entity_identifier'),
('accession_id', 'context_xml_id'),
tuple((accsId,
cntx.startDatetime if cntx.isStartEndPeriod else None,
cntx.endDatetime if cntx.isStartEndPeriod else None,
cntx.instantDatetime if cntx.isInstantPeriod else None,
bool(cntx.qnameDims),
cntx.id,
cntx.entityIdentifier[0],
cntx.entityIdentifier[1])
for cntx in self.modelXbrl.contexts.values()))
self.cntxId = dict(((_accsId, xmlId), id)
for id, _accsId, xmlId in table)
# context_dimension
values = []
for cntx in self.modelXbrl.contexts.values():
for dim in cntx.qnameDims.values():
values.append((self.cntxId[(accsId,cntx.id)],
self.qnameId[dim.dimensionQname],
self.qnameId.get(dim.memberQname), # may be None
self.qnameId.get(dim.typedMember.qname) if dim.isTyped else None,
False, # not default
dim.contextElement == "segment",
dim.typedMember.stringValue if dim.isTyped else None))
for dimQname, memQname in self.modelXbrl.qnameDimensionDefaults.items():
if dimQname not in cntx.qnameDims:
values.append((self.cntxId[(accsId,cntx.id)],
self.qnameId[dimQname],
self.qnameId[memQname],
None,
True, # is default
True, # ambiguous and irrelevant for the XDT model
None))
table = self.getTable('context_dimension', 'context_dimension_id',
('context_id', 'dimension_qname_id', 'member_qname_id', 'typed_qname_id', 'is_default', 'is_segment', 'typed_text_content'),
('context_id', 'dimension_qname_id', 'member_qname_id'), # shouldn't typed_qname_id be here? not good idea because it's not indexed in XBRL-US DDL
values)
# facts
def insertFactSet(modelFacts, tupleFactId):
table = self.getTable('fact', 'fact_id',
('accession_id', 'tuple_fact_id', 'context_id', 'unit_id', 'element_id', 'effective_value', 'fact_value',
'xml_id', 'precision_value', 'decimals_value',
'is_precision_infinity', 'is_decimals_infinity', ),
('accession_id', 'xml_id'),
tuple((accsId,
tupleFactId,
self.cntxId.get((accsId,fact.contextID)),
self.unitId.get((accsId,fact.unitID)),
self.conceptElementId(fact.concept),
roundValue(fact.value, fact.precision, fact.decimals) if fact.isNumeric and not fact.isNil else None,
fact.value,
elementFragmentIdentifier(fact),
fact.xAttributes['precision'].xValue if ('precision' in fact.xAttributes and isinstance(fact.xAttributes['precision'].xValue,int)) else None,
fact.xAttributes['decimals'].xValue if ('decimals' in fact.xAttributes and isinstance(fact.xAttributes['decimals'].xValue,int)) else None,
'precision' in fact.xAttributes and fact.xAttributes['precision'].xValue == 'INF',
'decimals' in fact.xAttributes and fact.xAttributes['decimals'].xValue == 'INF',
)
for fact in modelFacts))
factId = dict((xmlId, id)
for id, _accsId, xmlId in table)
for fact in modelFacts:
if fact.isTuple:
insertFactSet(fact.modelTupleFacts,
factId[elementFragmentIdentifier(fact)])
insertFactSet(self.modelXbrl.facts, None)
# hashes
|
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from rally.common.i18n import _
from rally.common import log as logging
LOG = logging.getLogger(__name__)
class RallyException(Exception):
"""Base Rally Exception
To correctly use this class, inherit from it and define
a "msg_fmt" property. That msg_fmt will get printf'd
with the keyword arguments provided to the constructor.
"""
msg_fmt = _("%(message)s")
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if "%(message)s" in self.msg_fmt:
kwargs.update({"message": message})
super(RallyException, self).__init__(self.msg_fmt % kwargs)
def format_message(self):
return six.text_type(self)
class ImmutableException(RallyException):
msg_fmt = _("This object is immutable.")
class InvalidArgumentsException(RallyException):
msg_fmt = _("Invalid arguments: '%(message)s'")
class InvalidConfigException(RallyException):
msg_fmt = _("This config has invalid schema: `%(message)s`")
class InvalidRunnerResult(RallyException):
msg_fmt = _("Type of result of `%(name)s` runner should be"
" `base.ScenarioRunnerResult`. Got: `%(results_type)s`")
class InvalidTaskException(InvalidConfigException):
msg_fmt = _("Task config is invalid: `%(message)s`")
class NotFoundScenarios(InvalidTaskException):
msg_fmt = _("There are no benchmark scenarios with names: `%(names)s`.")
class InvalidBenchmarkConfig(InvalidTaskException):
msg_fmt = _("Input task is invalid!\n\n"
"Benchmark %(name)s[%(pos)s] has wrong configuration"
"\nBenchmark configuration:\n%(config)s\n"
"\nReason:\n %(reason)s")
class NotFoundException(RallyException):
msg_fmt = _("The resource can not be found.")
class PluginNotFound(NotFoundException):
msg_fmt = _("There is no plugin with name: %(name)s in "
"%(namespace)s namespace.")
class PluginWithSuchNameExists(RallyException):
msg_fmt = _("Plugin with such name: %(name)s already exists in "
"%(namespace)s namespace")
class NoSuchConfigField(NotFoundException):
msg_fmt = _("There is no field in the task config with name `%(name)s`.")
class NoSuchRole(NotFoundException):
msg_fmt = _("There is no role with name `%(role)s`.")
class TaskNotFound(NotFoundException):
msg_fmt = _("Task with uuid=%(uuid)s not found.")
class DeploymentNotFound(NotFoundException):
msg_fmt = _("Deployment %(deployment)s not found.")
class DeploymentNameExists(RallyException):
msg_fmt = _("Deployment name '%(deployment)s' already registered.")
class DeploymentIsBusy(RallyException):
msg_fmt = _("There are allocated resources for the deployment with "
"uuid=%(uuid)s.")
class RallyAssertionError(RallyException):
msg_fmt = _("Assertion error: %(message)s")
class ResourceNotFound(NotFoundException):
msg_fmt = _("Resource with id=%(id)s not found.")
class TimeoutException(RallyException):
msg_fmt = _("Rally tired waiting for %(resource_type)s %(resource_name)s:"
"%(resource_id)s to become %(desired_status)s current "
"status %(resource_status)s")
class GetResourceFailure(RallyException):
msg_fmt = _("Failed to get the resource %(resource)s: %(err)s")
class GetResourceNotFound(GetResourceFailure):
msg_fmt = _("Resource %(resource)s is not found.")
class GetResourceErrorStatus(GetResourceFailure):
msg_fmt = _("Resource %(resource)s has %(status)s status.\n"
"Fault: %(fault)s")
class ScriptError(RallyException):
msg_fmt = _("Script execution failed: %(message)s")
class TaskInvalidStatus(RallyException):
msg_fmt = _("Task `%(uuid)s` in `%(actual)s` status but `%(require)s` is "
"required.")
class ChecksumMismatch(RallyException):
msg_fmt = _("Checksum mismatch for image: %(url)s")
class InvalidAdminException(InvalidArgumentsException):
msg_fmt = _("user %(username)s doesn't have 'admin' role")
class InvalidEndpointsException(InvalidArgumentsException):
msg_fmt = _("wrong keystone credentials specified in your endpoint"
" properties. (HTTP 401)")
class HostUnreachableException(InvalidArgumentsException):
msg_fmt = _("unable to establish connection to the remote host: %(url)s")
class InvalidScenarioArgument(RallyException):
msg_fmt = _("Invalid scenario argument: '%(message)s'")
class BenchmarkSetupFailure(RallyException):
msg_fmt = _("Unable to setup benchmark: '%(message)s'")
class ContextSetupFailure(RallyException):
msg_fmt = _("Unable to setup context '%(ctx_name)s': '%(msg)s'")
class ValidationError(RallyException):
msg_fmt = _("Validation error: %(message)s")
class NoNodesFound(RallyException):
msg_fmt = _("There is no nodes matching filters: %(filters)r")
class UnknownRelease(RallyException):
msg_fmt = _("Unknown release '%(release)s'")
class CleanUpException(RallyException):
msg_fmt = _("Cleanup failed.")
class ImageCleanUpException(CleanUpException):
msg_fmt = _("Image Deletion Failed")
class IncompatiblePythonVersion(RallyException):
msg_fmt = _("Incompatible python version found '%(version)s', "
"required '%(required_version)s'")
class WorkerNotFound(NotFoundException):
msg_fmt = _("Worker %(worker)s could not be found")
class WorkerAlreadyRegistered(RallyException):
msg_fmt = _("Worker %(worker)s already registered")
class SaharaClusterFailure(RallyException):
msg_fmt = _("Sahara cluster %(name)s has failed to %(action)s. "
"Reason: '%(reason)s'")
class LiveMigrateException(RallyException):
msg_fmt = _("Live Migration failed: %(message)s")
class MigrateException(RallyException):
msg_fmt = _("Migration failed: %(message)s")
class InvalidHostException(RallyException):
msg_fmt = _("Live Migration failed: %(message)s")
class MultipleMatchesFound(RallyException):
msg_fmt = _("Found multiple %(needle)s: %(haystack)s")
class TempestConfigCreationFailure(RallyException):
msg_fmt = _("Unable to create Tempest config file: %(message)s")
class TempestResourceCreationFailure(RallyException):
msg_fmt = _("Unable to create resource needed for Tempest: %(message)s")
|
|
from __future__ import print_function, division
from sympy.tensor.indexed import Idx
from sympy.core.mul import Mul
from sympy.core.singleton import S
from sympy.concrete.expr_with_intlimits import ExprWithIntLimits
from sympy.functions.elementary.exponential import exp, log
from sympy.polys import quo, roots
from sympy.simplify import powsimp
from sympy.core.compatibility import range
class Product(ExprWithIntLimits):
r"""Represents unevaluated products.
``Product`` represents a finite or infinite product, with the first
argument being the general form of terms in the series, and the second
argument being ``(dummy_variable, start, end)``, with ``dummy_variable``
taking all integer values from ``start`` through ``end``. In accordance
with long-standing mathematical convention, the end term is included in
the product.
Finite products
===============
For finite products (and products with symbolic limits assumed to be finite)
we follow the analogue of the summation convention described by Karr [1],
especially definition 3 of section 1.4. The product:
.. math::
\prod_{m \leq i < n} f(i)
has *the obvious meaning* for `m < n`, namely:
.. math::
\prod_{m \leq i < n} f(i) = f(m) f(m+1) \cdot \ldots \cdot f(n-2) f(n-1)
with the upper limit value `f(n)` excluded. The product over an empty set is
one if and only if `m = n`:
.. math::
\prod_{m \leq i < n} f(i) = 1 \quad \mathrm{for} \quad m = n
Finally, for all other products over empty sets we assume the following
definition:
.. math::
\prod_{m \leq i < n} f(i) = \frac{1}{\prod_{n \leq i < m} f(i)} \quad \mathrm{for} \quad m > n
It is important to note that above we define all products with the upper
limit being exclusive. This is in contrast to the usual mathematical notation,
but does not affect the product convention. Indeed we have:
.. math::
\prod_{m \leq i < n} f(i) = \prod_{i = m}^{n - 1} f(i)
where the difference in notation is intentional to emphasize the meaning,
with limits typeset on the top being inclusive.
Examples
========
>>> from sympy.abc import a, b, i, k, m, n, x
>>> from sympy import Product, factorial, oo
>>> Product(k, (k, 1, m))
Product(k, (k, 1, m))
>>> Product(k, (k, 1, m)).doit()
factorial(m)
>>> Product(k**2,(k, 1, m))
Product(k**2, (k, 1, m))
>>> Product(k**2,(k, 1, m)).doit()
(factorial(m))**2
Wallis' product for pi:
>>> W = Product(2*i/(2*i-1) * 2*i/(2*i+1), (i, 1, oo))
>>> W
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, oo))
Direct computation currently fails:
>>> W.doit()
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, oo))
But we can approach the infinite product by a limit of finite products:
>>> from sympy import limit
>>> W2 = Product(2*i/(2*i-1)*2*i/(2*i+1), (i, 1, n))
>>> W2
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, n))
>>> W2e = W2.doit()
>>> W2e
2**(-2*n)*4**n*(factorial(n))**2/(RisingFactorial(1/2, n)*RisingFactorial(3/2, n))
>>> limit(W2e, n, oo)
pi/2
By the same formula we can compute sin(pi/2):
>>> from sympy import pi, gamma, simplify
>>> P = pi * x * Product(1 - x**2/k**2, (k, 1, n))
>>> P = P.subs(x, pi/2)
>>> P
pi**2*Product(1 - pi**2/(4*k**2), (k, 1, n))/2
>>> Pe = P.doit()
>>> Pe
pi**2*RisingFactorial(1 + pi/2, n)*RisingFactorial(-pi/2 + 1, n)/(2*(factorial(n))**2)
>>> Pe = Pe.rewrite(gamma)
>>> Pe
pi**2*gamma(n + 1 + pi/2)*gamma(n - pi/2 + 1)/(2*gamma(1 + pi/2)*gamma(-pi/2 + 1)*gamma(n + 1)**2)
>>> Pe = simplify(Pe)
>>> Pe
sin(pi**2/2)*gamma(n + 1 + pi/2)*gamma(n - pi/2 + 1)/gamma(n + 1)**2
>>> limit(Pe, n, oo)
sin(pi**2/2)
Products with the lower limit being larger than the upper one:
>>> Product(1/i, (i, 6, 1)).doit()
120
>>> Product(i, (i, 2, 5)).doit()
120
The empty product:
>>> Product(i, (i, n, n-1)).doit()
1
An example showing that the symbolic result of a product is still
valid for seemingly nonsensical values of the limits. Then the Karr
convention allows us to give a perfectly valid interpretation to
those products by interchanging the limits according to the above rules:
>>> P = Product(2, (i, 10, n)).doit()
>>> P
2**(n - 9)
>>> P.subs(n, 5)
1/16
>>> Product(2, (i, 10, 5)).doit()
1/16
>>> 1/Product(2, (i, 6, 9)).doit()
1/16
An explicit example of the Karr summation convention applied to products:
>>> P1 = Product(x, (i, a, b)).doit()
>>> P1
x**(-a + b + 1)
>>> P2 = Product(x, (i, b+1, a-1)).doit()
>>> P2
x**(a - b - 1)
>>> simplify(P1 * P2)
1
And another one:
>>> P1 = Product(i, (i, b, a)).doit()
>>> P1
RisingFactorial(b, a - b + 1)
>>> P2 = Product(i, (i, a+1, b-1)).doit()
>>> P2
RisingFactorial(a + 1, -a + b - 1)
>>> P1 * P2
RisingFactorial(b, a - b + 1)*RisingFactorial(a + 1, -a + b - 1)
>>> simplify(P1 * P2)
1
See Also
========
Sum, summation
product
References
==========
.. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM,
Volume 28 Issue 2, April 1981, Pages 305-350
http://dl.acm.org/citation.cfm?doid=322248.322255
.. [2] http://en.wikipedia.org/wiki/Multiplication#Capital_Pi_notation
.. [3] http://en.wikipedia.org/wiki/Empty_product
"""
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
obj = ExprWithIntLimits.__new__(cls, function, *symbols, **assumptions)
return obj
def _eval_rewrite_as_Sum(self, *args):
from sympy.concrete.summations import Sum
return exp(Sum(log(self.function), *self.limits))
@property
def term(self):
return self._args[0]
function = term
def _eval_is_zero(self):
# a Product is zero only if its term is zero.
return self.term.is_zero
def doit(self, **hints):
f = self.function
for index, limit in enumerate(self.limits):
i, a, b = limit
dif = b - a
if dif.is_Integer and dif < 0:
a, b = b + 1, a - 1
f = 1 / f
if isinstance(i, Idx):
i = i.label
g = self._eval_product(f, (i, a, b))
if g in (None, S.NaN):
return self.func(powsimp(f), *self.limits[index:])
else:
f = g
if hints.get('deep', True):
return f.doit(**hints)
else:
return powsimp(f)
def _eval_adjoint(self):
if self.is_commutative:
return self.func(self.function.adjoint(), *self.limits)
return None
def _eval_conjugate(self):
return self.func(self.function.conjugate(), *self.limits)
def _eval_product(self, term, limits):
from sympy.concrete.delta import deltaproduct, _has_simple_delta
from sympy.concrete.summations import summation
from sympy.functions import KroneckerDelta, RisingFactorial
(k, a, n) = limits
if k not in term.free_symbols:
if (term - 1).is_zero:
return S.One
return term**(n - a + 1)
if a == n:
return term.subs(k, a)
if term.has(KroneckerDelta) and _has_simple_delta(term, limits[0]):
return deltaproduct(term, limits)
dif = n - a
if dif.is_Integer:
return Mul(*[term.subs(k, a + i) for i in range(dif + 1)])
elif term.is_polynomial(k):
poly = term.as_poly(k)
A = B = Q = S.One
all_roots = roots(poly)
M = 0
for r, m in all_roots.items():
M += m
A *= RisingFactorial(a - r, n - a + 1)**m
Q *= (n - r)**m
if M < poly.degree():
arg = quo(poly, Q.as_poly(k))
B = self.func(arg, (k, a, n)).doit()
return poly.LC()**(n - a + 1) * A * B
elif term.is_Add:
p, q = term.as_numer_denom()
p = self._eval_product(p, (k, a, n))
q = self._eval_product(q, (k, a, n))
return p / q
elif term.is_Mul:
exclude, include = [], []
for t in term.args:
p = self._eval_product(t, (k, a, n))
if p is not None:
exclude.append(p)
else:
include.append(t)
if not exclude:
return None
else:
arg = term._new_rawargs(*include)
A = Mul(*exclude)
B = self.func(arg, (k, a, n)).doit()
return A * B
elif term.is_Pow:
if not term.base.has(k):
s = summation(term.exp, (k, a, n))
return term.base**s
elif not term.exp.has(k):
p = self._eval_product(term.base, (k, a, n))
if p is not None:
return p**term.exp
elif isinstance(term, Product):
evaluated = term.doit()
f = self._eval_product(evaluated, limits)
if f is None:
return self.func(evaluated, limits)
else:
return f
def _eval_simplify(self, ratio, measure):
from sympy.simplify.simplify import product_simplify
return product_simplify(self)
def _eval_transpose(self):
if self.is_commutative:
return self.func(self.function.transpose(), *self.limits)
return None
def reverse_order(expr, *indices):
"""
Reverse the order of a limit in a Product.
Usage
=====
``reverse_order(expr, *indices)`` reverses some limits in the expression
``expr`` which can be either a ``Sum`` or a ``Product``. The selectors in
the argument ``indices`` specify some indices whose limits get reversed.
These selectors are either variable names or numerical indices counted
starting from the inner-most limit tuple.
Examples
========
>>> from sympy import Product, simplify, RisingFactorial, gamma, Sum
>>> from sympy.abc import x, y, a, b, c, d
>>> P = Product(x, (x, a, b))
>>> Pr = P.reverse_order(x)
>>> Pr
Product(1/x, (x, b + 1, a - 1))
>>> Pr = Pr.doit()
>>> Pr
1/RisingFactorial(b + 1, a - b - 1)
>>> simplify(Pr)
gamma(b + 1)/gamma(a)
>>> P = P.doit()
>>> P
RisingFactorial(a, -a + b + 1)
>>> simplify(P)
gamma(b + 1)/gamma(a)
While one should prefer variable names when specifying which limits
to reverse, the index counting notation comes in handy in case there
are several symbols with the same name.
>>> S = Sum(x*y, (x, a, b), (y, c, d))
>>> S
Sum(x*y, (x, a, b), (y, c, d))
>>> S0 = S.reverse_order(0)
>>> S0
Sum(-x*y, (x, b + 1, a - 1), (y, c, d))
>>> S1 = S0.reverse_order(1)
>>> S1
Sum(x*y, (x, b + 1, a - 1), (y, d + 1, c - 1))
Of course we can mix both notations:
>>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(x, 1)
Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
>>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(y, x)
Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
See Also
========
index, reorder_limit, reorder
References
==========
.. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM,
Volume 28 Issue 2, April 1981, Pages 305-350
http://dl.acm.org/citation.cfm?doid=322248.322255
"""
l_indices = list(indices)
for i, indx in enumerate(l_indices):
if not isinstance(indx, int):
l_indices[i] = expr.index(indx)
e = 1
limits = []
for i, limit in enumerate(expr.limits):
l = limit
if i in l_indices:
e = -e
l = (limit[0], limit[2] + 1, limit[1] - 1)
limits.append(l)
return Product(expr.function ** e, *limits)
def product(*args, **kwargs):
r"""
Compute the product.
The notation for symbols is similar to the notation used in Sum or
Integral. product(f, (i, a, b)) computes the product of f with
respect to i from a to b, i.e.,
::
b
_____
product(f(n), (i, a, b)) = | | f(n)
| |
i = a
If it cannot compute the product, it returns an unevaluated Product object.
Repeated products can be computed by introducing additional symbols tuples::
>>> from sympy import product, symbols
>>> i, n, m, k = symbols('i n m k', integer=True)
>>> product(i, (i, 1, k))
factorial(k)
>>> product(m, (i, 1, k))
m**k
>>> product(i, (i, 1, k), (k, 1, n))
Product(factorial(k), (k, 1, n))
"""
prod = Product(*args, **kwargs)
if isinstance(prod, Product):
return prod.doit(deep=False)
else:
return prod
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SchemaRegistryOperations:
"""SchemaRegistryOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.eventhub.v2021_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_namespace(
self,
resource_group_name: str,
namespace_name: str,
skip: Optional[int] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.SchemaGroupListResult"]:
"""Gets all the Schema Groups in a Namespace.
:param resource_group_name: Name of the resource group within the azure subscription.
:type resource_group_name: str
:param namespace_name: The Namespace name.
:type namespace_name: str
:param skip: Skip is only used if a previous operation returned a partial result. If a previous
response contains a nextLink element, the value of the nextLink element will include a skip
parameter that specifies a starting point to use for subsequent calls.
:type skip: int
:param top: May be used to limit the number of results to the most recent N usageDetails.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SchemaGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.eventhub.v2021_11_01.models.SchemaGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SchemaGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_namespace.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', maximum=1000, minimum=0)
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', maximum=1000, minimum=1)
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SchemaGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_namespace.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/schemagroups'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
namespace_name: str,
schema_group_name: str,
parameters: "_models.SchemaGroup",
**kwargs: Any
) -> "_models.SchemaGroup":
"""create_or_update.
:param resource_group_name: Name of the resource group within the azure subscription.
:type resource_group_name: str
:param namespace_name: The Namespace name.
:type namespace_name: str
:param schema_group_name: The Schema Group name.
:type schema_group_name: str
:param parameters: Parameters supplied to create an Event Hub resource.
:type parameters: ~azure.mgmt.eventhub.v2021_11_01.models.SchemaGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SchemaGroup, or the result of cls(response)
:rtype: ~azure.mgmt.eventhub.v2021_11_01.models.SchemaGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SchemaGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'schemaGroupName': self._serialize.url("schema_group_name", schema_group_name, 'str', max_length=256, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SchemaGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SchemaGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/schemagroups/{schemaGroupName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
namespace_name: str,
schema_group_name: str,
**kwargs: Any
) -> None:
"""delete.
:param resource_group_name: Name of the resource group within the azure subscription.
:type resource_group_name: str
:param namespace_name: The Namespace name.
:type namespace_name: str
:param schema_group_name: The Schema Group name.
:type schema_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-11-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'schemaGroupName': self._serialize.url("schema_group_name", schema_group_name, 'str', max_length=256, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/schemagroups/{schemaGroupName}'} # type: ignore
async def get(
self,
resource_group_name: str,
namespace_name: str,
schema_group_name: str,
**kwargs: Any
) -> "_models.SchemaGroup":
"""get.
:param resource_group_name: Name of the resource group within the azure subscription.
:type resource_group_name: str
:param namespace_name: The Namespace name.
:type namespace_name: str
:param schema_group_name: The Schema Group name.
:type schema_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SchemaGroup, or the result of cls(response)
:rtype: ~azure.mgmt.eventhub.v2021_11_01.models.SchemaGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SchemaGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'schemaGroupName': self._serialize.url("schema_group_name", schema_group_name, 'str', max_length=256, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SchemaGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/schemagroups/{schemaGroupName}'} # type: ignore
|
|
'''
Created on Mar 7, 2011
@author: Mark V Systems Limited
(c) Copyright 2011 Mark V Systems Limited, All rights reserved.
'''
import inspect, os
from arelle import XmlUtil, XbrlConst, XPathParser, Locale, XPathContext
from arelle.ModelDtsObject import ModelResource
from arelle.ModelInstanceObject import ModelDimensionValue
from arelle.ModelValue import qname, QName
from arelle.ModelObject import ModelObject
from arelle.ModelFormulaObject import (Trace, ModelFormulaResource, ModelFormulaRules, ModelConceptName,
ModelParameter, Aspect, aspectStr, aspectRuleAspects)
from arelle.ModelInstanceObject import ModelFact
from arelle.FormulaEvaluator import (filterFacts as formulaEvaluatorFilterFacts,
aspectsMatch, factsPartitions, VariableBinding)
from arelle.PrototypeInstanceObject import FactPrototype
ROLLUP_NOT_ANALYZED = 0
CHILD_ROLLUP_FIRST = 1
CHILD_ROLLUP_LAST = 2
CHILDREN_BUT_NO_ROLLUP = 3
OPEN_ASPECT_ENTRY_SURROGATE = '\uDBFF'
EMPTY_SET = set()
def definitionNodes(nodes):
return [(ord.definitionNodeObject if isinstance(node, StructuralNode) else node) for node in nodes]
# table linkbase structural nodes for rendering
class StructuralNode:
def __init__(self, parentStructuralNode, breakdownNode, definitionNode, zInheritance=None, contextItemFact=None, tableNode=None, rendrCntx=None):
self.parentStructuralNode = parentStructuralNode
self._rendrCntx = rendrCntx or parentStructuralNode._rendrCntx # copy from parent except at root
self.definitionNode = definitionNode
self.variables = {}
self.aspects = {}
self.childStructuralNodes = []
self.rollUpStructuralNode = None
self.zInheritance = zInheritance
if contextItemFact is not None:
self.contextItemBinding = VariableBinding(self._rendrCntx,
boundFact=contextItemFact)
if isinstance(self.contextItemBinding.yieldedFact, FactPrototype):
for aspect in definitionNode.aspectsCovered():
if aspect != Aspect.DIMENSIONS:
self.aspectEntryObjectId = self.aspects[aspect] = contextItemFact.aspectEntryObjectId
break
else:
self.contextItemBinding = None
self.subtreeRollUp = ROLLUP_NOT_ANALYZED
self.depth = parentStructuralNode.depth + 1 if parentStructuralNode else 0
if tableNode is not None:
self.tableNode = tableNode
self.breakdownNode = breakdownNode # CR definition node
self.tagSelector = definitionNode.tagSelector
self.isLabeled = True
@property
def modelXbrl(self):
return self.definitionNode.modelXbrl
@property
def choiceStructuralNodes(self):
if hasattr(self, "_choiceStructuralNodes"):
return self._choiceStructuralNodes
if self.parentStructuralNode is not None:
return self.parentStructuralNode.choiceStructuralNodes
# choiceStrNodes are on the breakdown node (if any)
return None
@property
def isAbstract(self):
if self.subtreeRollUp:
return self.subtreeRollUp == CHILDREN_BUT_NO_ROLLUP
try:
try:
return self.abstract # ordinate may have an abstract attribute
except AttributeError: # if none use axis object
return self.definitionNode.isAbstract
except AttributeError: # axis may never be abstract
return False
def isSummary(self):
if self.childStructuralNodes is not None and not self.isAbstract:
return True
else:
return False
@property
def isRollUp(self):
return self.definitionNode.isRollUp
@property
def cardinalityAndDepth(self):
return self.definitionNode.cardinalityAndDepth(self)
@property
def structuralDepth(self):
if self.parentStructuralNode is not None:
return self.parentStructuralNode.structuralDepth + 1
return 0
'''
def breakdownNode(self, tableELR):
definitionNode = self.definitionNode
if isinstance(definitionNode, ModelBreakdown):
return definitionNode
axisSubtreeRelSet = definitionNode.modelXbrl.relationshipSet((XbrlConst.tableBreakdownTree, XbrlConst.tableBreakdownTreeMMDD, XbrlConst.tableBreakdownTree201305, XbrlConst.tableDefinitionNodeSubtree, XbrlConst.tableDefinitionNodeSubtreeMMDD, XbrlConst.tableDefinitionNodeSubtree201305, XbrlConst.tableDefinitionNodeSubtree201301, XbrlConst.tableAxisSubtree2011), tableELR)
while (True):
for parentRel in axisSubtreeRelSet.toModelObject(definitionNode):
definitionNode = parentRel.fromModelObject
if isinstance(definitionNode, ModelBreakdown):
return definitionNode
break # recurse to move to this node's parent breakdown node
return definitionNode # give up here
'''
def constraintSet(self, tagSelectors=None):
definitionNode = self.definitionNode
if tagSelectors:
for tag in tagSelectors:
if tag in definitionNode.constraintSets:
return definitionNode.constraintSets[tag]
return definitionNode.constraintSets.get(None) # returns None if no default constraint set
def aspectsCovered(self, inherit=False):
aspectsCovered = _DICT_SET(self.aspects.keys()) | self.definitionNode.aspectsCovered()
if inherit and self.parentStructuralNode is not None:
aspectsCovered.update(self.parentStructuralNode.aspectsCovered(inherit=inherit))
return aspectsCovered
def hasAspect(self, aspect, inherit=True):
return (aspect in self.aspects or
self.definitionNode.hasAspect(self, aspect) or
(inherit and
self.parentStructuralNode is not None and
self.parentStructuralNode.hasAspect(aspect, inherit)))
def aspectValue(self, aspect, inherit=True, dims=None, depth=0, tagSelectors=None):
xc = self._rendrCntx
if False: # TEST: self.choiceStructuralNodes: # use aspects from choice structural node
choiceNodeIndex = getattr(self,"choiceNodeIndex",0)
if choiceNodeIndex != -1:
chosenStructuralNode = self.choiceStructuralNodes[choiceNodeIndex]
aspects = chosenStructuralNode.aspects
definitionNode = chosenStructuralNode.definitionNode
contextItemBinding = chosenStructuralNode.contextItemBinding
else: # aspect entry mode
aspects = self.aspects
definitionNode = self.choiceStructuralNodes[0].definitionNode
contextItemBinding = None
else:
aspects = self.aspects
definitionNode = self.definitionNode
contextItemBinding = self.contextItemBinding
constraintSet = self.constraintSet(tagSelectors)
if aspect == Aspect.DIMENSIONS:
if dims is None: dims = set()
if inherit and self.parentStructuralNode is not None:
dims |= self.parentStructuralNode.aspectValue(aspect, dims=dims, depth=depth+1)
if aspect in aspects:
dims |= aspects[aspect]
elif constraintSet is not None and constraintSet.hasAspect(self, aspect):
dims |= set(definitionNode.aspectValue(xc, aspect) or {})
if constraintSet is not None and constraintSet.hasAspect(self, Aspect.OMIT_DIMENSIONS):
dims -= set(constraintSet.aspectValue(xc, Aspect.OMIT_DIMENSIONS))
return dims
if aspect in aspects:
return aspects[aspect]
elif constraintSet is not None and constraintSet.hasAspect(self, aspect):
if isinstance(definitionNode, ModelSelectionDefinitionNode):
# result is in the indicated variable of ordCntx
return self.variables.get(self.definitionNode.variableQname)
elif isinstance(definitionNode, ModelFilterDefinitionNode):
if contextItemBinding:
return contextItemBinding.aspectValue(aspect)
elif isinstance(definitionNode, ModelTupleDefinitionNode):
if aspect == Aspect.LOCATION and contextItemBinding:
return contextItemBinding.yieldedFact
# non-location tuple aspects don't leak into cell bindings
else:
return constraintSet.aspectValue(xc, aspect)
if inherit and self.parentStructuralNode is not None:
return self.parentStructuralNode.aspectValue(aspect, depth=depth+1)
return None
'''
@property
def primaryItemQname(self): # for compatibility with viewRelationsihps
if Aspect.CONCEPT in self.aspects:
return self.aspects[Aspect.CONCEPT]
return self.definitionNode.primaryItemQname
@property
def explicitDims(self):
return self.definitionNode.explicitDims
'''
def objectId(self, refId=""):
return self.definitionNode.objectId(refId)
def header(self, role=None, lang=None, evaluate=True, returnGenLabel=True, returnMsgFormatString=False, recurseParent=True, returnStdLabel=True):
# if ord is a nested selectionAxis selection, use selection-message or text contents instead of axis headers
isZSelection = isinstance(self.definitionNode, ModelSelectionDefinitionNode) and hasattr(self, "zSelection")
if role is None:
# check for message before checking for genLabel
msgsRelationshipSet = self.definitionNode.modelXbrl.relationshipSet(
(XbrlConst.tableDefinitionNodeSelectionMessage201301, XbrlConst.tableAxisSelectionMessage2011)
if isZSelection else
(XbrlConst.tableDefinitionNodeMessage201301, XbrlConst.tableAxisMessage2011))
if msgsRelationshipSet:
msg = msgsRelationshipSet.label(self.definitionNode, XbrlConst.standardMessage, lang, returnText=False)
if msg is not None:
if evaluate:
if returnMsgFormatString:
return msg.formatString # not possible to evaluate (during resolution)
else:
return self.evaluate(msg, msg.evaluate)
else:
return XmlUtil.text(msg)
if isZSelection: # no message, return text of selection
return self.variables.get(self.definitionNode.variableQname, "selection")
if returnGenLabel:
label = self.definitionNode.genLabel(role=role, lang=lang)
if label:
return label
if self.isEntryAspect and role is None:
# True if open node bound to a prototype, false if boudn to a real fact
return OPEN_ASPECT_ENTRY_SURROGATE # sort pretty high, work ok for python 2.7/3.2 as well as 3.3
# if there's a child roll up, check for it
if self.rollUpStructuralNode is not None: # check the rolling-up child too
return self.rollUpStructuralNode.header(role, lang, evaluate, returnGenLabel, returnMsgFormatString, recurseParent)
# if aspect is a concept of dimension, return its standard label
concept = None
if role is None and returnStdLabel:
for aspect in self.aspectsCovered():
aspectValue = self.aspectValue(aspect, inherit=recurseParent)
if isinstance(aspect, QName) or aspect == Aspect.CONCEPT: # dimension or concept
if isinstance(aspectValue, QName):
concept = self.modelXbrl.qnameConcepts[aspectValue]
break
elif isinstance(aspectValue, ModelDimensionValue):
if aspectValue.isExplicit:
concept = aspectValue.member
elif aspectValue.isTyped:
return XmlUtil.innerTextList(aspectValue.typedMember)
elif isinstance(aspectValue, ModelObject):
text = XmlUtil.innerTextList(aspectValue)
if not text and XmlUtil.hasChild(aspectValue, aspectValue.namespaceURI, "forever"):
text = "forever"
return text
if concept is not None:
label = concept.label(lang=lang)
if label:
return label
# if there is a role, check if it's available on a parent node
if role and recurseParent and self.parentStructuralNode is not None:
return self.parentStructuralNode.header(role, lang, evaluate, returnGenLabel, returnMsgFormatString, recurseParent)
return None
def evaluate(self, evalObject, evalMethod, otherAxisStructuralNode=None, evalArgs=(), handleXPathException=True, **kwargs):
xc = self._rendrCntx
if self.contextItemBinding and not isinstance(xc.contextItem, ModelFact):
previousContextItem = xc.contextItem # xbrli.xbrl
xc.contextItem = self.contextItemBinding.yieldedFact
else:
previousContextItem = None
if self.choiceStructuralNodes and hasattr(self,"choiceNodeIndex"):
variables = self.choiceStructuralNodes[self.choiceNodeIndex].variables
else:
variables = self.variables
removeVarQnames = []
for variablesItems in variables.items():
for qn, value in variablesItems:
if qn not in xc.inScopeVars:
removeVarQnames.append(qn)
xc.inScopeVars[qn] = value
if self.parentStructuralNode is not None:
result = self.parentStructuralNode.evaluate(evalObject, evalMethod, otherAxisStructuralNode, evalArgs)
elif otherAxisStructuralNode is not None:
# recurse to other ordinate (which will recurse to z axis)
result = otherAxisStructuralNode.evaluate(evalObject, evalMethod, None, evalArgs)
elif self.zInheritance is not None:
result = self.zInheritance.evaluate(evalObject, evalMethod, None, evalArgs)
else:
try:
result = evalMethod(xc, *evalArgs)
except XPathContext.XPathException as err:
if not handleXPathException:
raise
xc.modelXbrl.error(err.code,
_("%(element)s set %(xlinkLabel)s \nException: %(error)s"),
modelObject=evalObject, element=evalObject.localName,
xlinkLabel=evalObject.xlinkLabel, error=err.message)
result = ''
for qn in removeVarQnames:
xc.inScopeVars.pop(qn)
if previousContextItem is not None:
xc.contextItem = previousContextItem # xbrli.xbrl
return result
def hasValueExpression(self, otherAxisStructuralNode=None):
return (self.definitionNode.hasValueExpression or
(otherAxisStructuralNode is not None and otherAxisStructuralNode.definitionNode.hasValueExpression))
def evalValueExpression(self, fact, otherAxisStructuralNode=None):
for structuralNode in (self, otherAxisStructuralNode):
if structuralNode is not None and structuralNode.definitionNode.hasValueExpression:
return self.evaluate(self.definitionNode, structuralNode.definitionNode.evalValueExpression, otherAxisStructuralNode=otherAxisStructuralNode, evalArgs=(fact,))
return None
@property
def isEntryAspect(self):
# true if open node and bound to a fact prototype
return self.contextItemBinding is not None and isinstance(self.contextItemBinding.yieldedFact, FactPrototype)
def isEntryPrototype(self, default=False):
# true if all axis open nodes before this one are entry prototypes (or not open axes)
if self.contextItemBinding is not None:
# True if open node bound to a prototype, false if bound to a real fact
return isinstance(self.contextItemBinding.yieldedFact, FactPrototype)
if self.parentStructuralNode is not None:
return self.parentStructuralNode.isEntryPrototype(default)
return default # nothing open to be bound to a fact
@property
def tableDefinitionNode(self):
if self.parentStructuralNode is None:
return self.tableNode
else:
return self.parentStructuralNode.tableDefinitionNode
@property
def tagSelectors(self):
try:
return self._tagSelectors
except AttributeError:
if self.parentStructuralNode is None:
self._tagSelectors = set()
else:
self._tagSelectors = self.parentStructuralNode.tagSelectors
if self.tagSelector:
self._tagSelectors.add(self.tagSelector)
return self._tagSelectors
@property
def leafNodeCount(self):
childLeafCount = 0
if self.childStructuralNodes:
for childStructuralNode in self.childStructuralNodes:
childLeafCount += childStructuralNode.leafNodeCount
if childLeafCount == 0:
return 1
if not self.isAbstract and isinstance(self.definitionNode, (ModelClosedDefinitionNode, ModelEuAxisCoord)):
childLeafCount += 1 # has a roll up
return childLeafCount
def setHasOpenNode(self):
if self.parentStructuralNode is not None:
self.parentStructuralNode.setHasOpenNode()
else:
self.hasOpenNode = True
def inheritedPrimaryItemQname(self, view):
return (self.primaryItemQname or self.inheritedPrimaryItemQname(self.parentStructuralNode, view))
def inheritedExplicitDims(self, view, dims=None, nested=False):
if dims is None: dims = {}
if self.parentOrdinateContext:
self.parentStructuralNode.inheritedExplicitDims(view, dims, True)
for dim, mem in self.explicitDims:
dims[dim] = mem
if not nested:
return {(dim,mem) for dim,mem in dims.items() if mem != 'omit'}
def inheritedAspectValue(self, otherAxisStructuralNode,
view, aspect, tagSelectors,
xAspectStructuralNodes, yAspectStructuralNodes, zAspectStructuralNodes):
aspectStructuralNodes = xAspectStructuralNodes.get(aspect, EMPTY_SET) | yAspectStructuralNodes.get(aspect, EMPTY_SET) | zAspectStructuralNodes.get(aspect, EMPTY_SET)
structuralNode = None
if len(aspectStructuralNodes) == 1:
structuralNode = aspectStructuralNodes.pop()
elif len(aspectStructuralNodes) > 1:
if aspect == Aspect.LOCATION:
hasClash = False
for _aspectStructuralNode in aspectStructuralNodes:
if not _aspectStructuralNode.definitionNode.aspectValueDependsOnVars(aspect):
if structuralNode:
hasClash = True
else:
structuralNode = _aspectStructuralNode
else:
# take closest structural node
hasClash = True
''' reported in static analysis by RenderingEvaluator.py
if hasClash:
from arelle.ModelFormulaObject import aspectStr
view.modelXbrl.error("xbrlte:aspectClashBetweenBreakdowns",
_("Aspect %(aspect)s covered by multiple axes."),
modelObject=view.modelTable, aspect=aspectStr(aspect))
'''
if structuralNode:
definitionNodeConstraintSet = structuralNode.constraintSet(tagSelectors)
if definitionNodeConstraintSet is not None and definitionNodeConstraintSet.aspectValueDependsOnVars(aspect):
return self.evaluate(definitionNodeConstraintSet,
definitionNodeConstraintSet.aspectValue, # this passes a method
otherAxisStructuralNode=otherAxisStructuralNode,
evalArgs=(aspect,))
return structuralNode.aspectValue(aspect, tagSelectors=tagSelectors)
return None
def __repr__(self):
return ("structuralNode[{0}]{1})".format(self.objectId(),self.definitionNode))
# Root class for rendering is formula, to allow linked and nested compiled expressions
def definitionModelLabelsView(mdlObj):
return tuple(sorted([("{} {} {} {}".format(label.localName,
str(rel.order).rstrip("0").rstrip("."),
os.path.basename(label.role or ""),
label.xmlLang),
label.stringValue)
for rel in mdlObj.modelXbrl.relationshipSet((XbrlConst.elementLabel,XbrlConst.elementReference)).fromModelObject(mdlObj)
for label in (rel.toModelObject,)] +
[("xlink:label", mdlObj.xlinkLabel)]))
# 2010 EU Table linkbase
class ModelEuTable(ModelResource):
def init(self, modelDocument):
super(ModelEuTable, self).init(modelDocument)
self.aspectsInTaggedConstraintSets = set()
@property
def aspectModel(self):
return "dimensional"
@property
def propertyView(self):
return ((("id", self.id),) +
self.definitionLabelsView)
''' now only accessed from structural node
def header(self, role=None, lang=None, strip=False, evaluate=True):
return self.genLabel(role=role, lang=lang, strip=strip)
'''
@property
def parameters(self):
return {}
@property
def definitionLabelsView(self):
return definitionModelLabelsView(self)
def filteredFacts(self, xpCtx, facts):
return facts
@property
def xpathContext(self):
return None
def __repr__(self):
return ("table[{0}]{1})".format(self.objectId(),self.propertyView))
class ModelEuAxisCoord(ModelResource):
def init(self, modelDocument):
super(ModelEuAxisCoord, self).init(modelDocument)
@property
def abstract(self):
return self.get("abstract") or 'false'
@property
def isAbstract(self):
return self.abstract == "true"
@property
def isMerged(self):
return False
@property
def parentChildOrder(self):
return self.get("parentChildOrder")
@property
def isRollUp(self):
return False
@property
def parentDefinitionNode(self):
try:
return self._parentDefinitionNode
except AttributeError:
parentDefinitionNode = None
for rel in self.modelXbrl.relationshipSet(XbrlConst.euAxisMember).toModelObject(self):
parentDefinitionNode = rel.fromModelObject
break
self._parentDefinitionNode = parentDefinitionNode
return parentDefinitionNode
def aspectsCovered(self):
aspectsCovered = set()
if XmlUtil.hasChild(self, XbrlConst.euRend, "primaryItem"):
aspectsCovered.add(Aspect.CONCEPT)
if XmlUtil.hasChild(self, XbrlConst.euRend, "timeReference"):
aspectsCovered.add(Aspect.INSTANT)
for e in XmlUtil.children(self, XbrlConst.euRend, "explicitDimCoord"):
aspectsCovered.add(self.prefixedNameQname(e.get("dimension")))
return aspectsCovered
@property
def constraintSets(self):
return {None: self}
@property
def tagSelector(self): # default constraint set for ruleNode has name None
return None
def hasAspect(self, structuralNode, aspect):
if aspect == Aspect.CONCEPT:
return XmlUtil.hasChild(self, XbrlConst.euRend, "primaryItem")
elif aspect == Aspect.DIMENSIONS:
return XmlUtil.hasChild(self, XbrlConst.euRend, "explicitDimCoord")
elif aspect in (Aspect.PERIOD_TYPE, Aspect.INSTANT):
return XmlUtil.hasChild(self, XbrlConst.euRend, "timeReference")
elif isinstance(aspect, QName):
for e in XmlUtil.children(self, XbrlConst.euRend, "explicitDimCoord"):
if self.prefixedNameQname(e.get("dimension")) == aspect:
return True
return False
def aspectValueDependsOnVars(self, aspect):
return False
def aspectValue(self, xpCtx, aspect, inherit=False):
if aspect == Aspect.DIMENSIONS:
dims = set(self.prefixedNameQname(e.get("dimension"))
for e in XmlUtil.children(self, XbrlConst.euRend, "explicitDimCoord"))
if inherit and self.parentDefinitionNode is not None:
dims |= self.parentDefinitionNode.aspectValue(None, aspect, inherit)
return dims
if inherit and not self.hasAspect(None, aspect):
if self.parentDefinitionNode is not None:
return self.parentDefinitionNode.aspectValue(None, aspect, inherit)
return None
if aspect == Aspect.CONCEPT:
priItem = XmlUtil.childAttr(self, XbrlConst.euRend, "primaryItem", "name")
if priItem is not None:
return self.prefixedNameQname(priItem)
return None
elif aspect == Aspect.PERIOD_TYPE:
if XmlUtil.hasChild(self, XbrlConst.euRend, "timeReference"):
return "instant"
elif aspect == Aspect.INSTANT:
return XmlUtil.datetimeValue(XmlUtil.childAttr(self, XbrlConst.euRend, "timeReference", "instant"),
addOneDay=True)
elif isinstance(aspect, QName):
for e in XmlUtil.children(self, XbrlConst.euRend, "explicitDimCoord"):
if self.prefixedNameQname(e.get("dimension")) == aspect:
return self.prefixedNameQname(e.get("value"))
return None
'''
@property
def primaryItemQname(self):
priItem = XmlUtil.childAttr(self, XbrlConst.euRend, "primaryItem", "name")
if priItem is not None:
return self.prefixedNameQname(priItem)
return None
@property
def explicitDims(self):
return {(self.prefixedNameQname(e.get("dimension")),
self.prefixedNameQname(e.get("value")))
for e in XmlUtil.children(self, XbrlConst.euRend, "explicitDimCoord")}
@property
def instant(self):
return XmlUtil.datetimeValue(XmlUtil.childAttr(self, XbrlConst.euRend, "timeReference", "instant"),
addOneDay=True)
'''
def cardinalityAndDepth(self, structuralNode, **kwargs):
return (1, 1)
''' now only accessed from structural node
def header(self, role=None, lang=None, strip=False, evaluate=True):
return self.genLabel(role=role, lang=lang, strip=strip)
'''
@property
def hasValueExpression(self):
return False
@property
def definitionLabelsView(self):
return definitionModelLabelsView(self)
@property
def propertyView(self):
explicitDims = self.aspectValue(None, Aspect.DIMENSIONS, inherit=True)
return ((("id", self.id),
("primary item", self.aspectValue(None, Aspect.CONCEPT, inherit=True)),
("dimensions", "({0})".format(len(explicitDims)),
tuple((str(dim),str(self.aspectValue(None, dim, inherit=True)))
for dim in sorted(explicitDims)))
if explicitDims else (),
("abstract", self.abstract)) +
self.definitionLabelsView)
def __repr__(self):
return ("axisCoord[{0}]{1})".format(self.objectId(),self.propertyView))
# 2011 Table linkbase
class ModelTable(ModelFormulaResource):
def init(self, modelDocument):
super(ModelTable, self).init(modelDocument)
self.modelXbrl.modelRenderingTables.add(self)
self.modelXbrl.hasRenderingTables = True
self.aspectsInTaggedConstraintSets = set()
def clear(self):
if getattr(self, "_rendrCntx"):
self._rendrCntx.close()
super(ModelTable, self).clear() # delete children
@property
def aspectModel(self):
return self.get("aspectModel", "dimensional") # attribute removed 2013-06, always dimensional
@property
def descendantArcroles(self):
return (XbrlConst.tableFilter, XbrlConst.tableFilterMMDD, XbrlConst.tableFilter201305, XbrlConst.tableFilter201301, XbrlConst.tableFilter2011,
XbrlConst.tableBreakdown, XbrlConst.tableBreakdownMMDD, XbrlConst.tableBreakdown201305, XbrlConst.tableBreakdown201301, XbrlConst.tableAxis2011,
XbrlConst.tableParameter, XbrlConst.tableParameterMMDD)
@property
def filterRelationships(self):
try:
return self._filterRelationships
except AttributeError:
rels = [] # order so conceptName filter is first (if any) (may want more sorting in future)
for rel in self.modelXbrl.relationshipSet((XbrlConst.tableFilter, XbrlConst.tableFilterMMDD, XbrlConst.tableFilter201305, XbrlConst.tableFilter201301, XbrlConst.tableFilter2011)).fromModelObject(self):
if isinstance(rel.toModelObject, ModelConceptName):
rels.insert(0, rel) # put conceptName filters first
else:
rels.append(rel)
self._filterRelationships = rels
return rels
''' now only accessed from structural node
def header(self, role=None, lang=None, strip=False, evaluate=True):
return self.genLabel(role=role, lang=lang, strip=strip)
'''
@property
def definitionLabelsView(self):
return definitionModelLabelsView(self)
def filteredFacts(self, xpCtx, facts):
return formulaEvaluatorFilterFacts(xpCtx, VariableBinding(xpCtx),
facts, self.filterRelationships, None)
@property
def renderingXPathContext(self):
try:
return self._rendrCntx
except AttributeError:
xpCtx = getattr(self.modelXbrl, "rendrCntx", None) # none for EU 2010 tables
if xpCtx is not None:
self._rendrCntx = xpCtx.copy()
for tblParamRel in self.modelXbrl.relationshipSet((XbrlConst.tableParameter, XbrlConst.tableParameterMMDD)).fromModelObject(self):
varQname = tblParamRel.variableQname
parameter = tblParamRel.toModelObject
if isinstance(parameter, ModelParameter):
self._rendrCntx.inScopeVars[varQname] = xpCtx.inScopeVars.get(parameter.parameterQname)
else:
self._rendrCntx = None
return self._rendrCntx
@property
def propertyView(self):
return ((("id", self.id),) +
self.definitionLabelsView)
def __repr__(self):
return ("modlTable[{0}]{1})".format(self.objectId(),self.propertyView))
class ModelDefinitionNode(ModelFormulaResource):
def init(self, modelDocument):
super(ModelDefinitionNode, self).init(modelDocument)
@property
def parentDefinitionNode(self):
return None
@property
def descendantArcroles(self):
return (XbrlConst.tableDefinitionNodeMessage201301, XbrlConst.tableAxisMessage2011,
XbrlConst.tableDefinitionNodeSubtree201305,
XbrlConst.tableDefinitionNodeSubtree, XbrlConst.tableDefinitionNodeSubtreeMMDD)
def hasAspect(self, structuralNode, aspect):
return False
def aspectValueDependsOnVars(self, aspect):
return False
@property
def variablename(self):
"""(str) -- name attribute"""
return self.getStripped("name")
@property
def variableQname(self):
"""(QName) -- resolved name for an XPath bound result having a QName name attribute"""
varName = self.variablename
return qname(self, varName, noPrefixIsNoNamespace=True) if varName else None
def aspectValue(self, xpCtx, aspect, inherit=True):
if aspect == Aspect.DIMENSIONS:
return []
return None
def aspectsCovered(self):
return set()
@property
def constraintSets(self):
return {None: self}
@property
def tagSelector(self):
return self.get("tagSelector")
@property
def valueExpression(self):
return self.get("value")
@property
def hasValueExpression(self):
return bool(self.valueProg) # non empty program
def compile(self):
if not hasattr(self, "valueProg"):
value = self.valueExpression
self.valueProg = XPathParser.parse(self, value, self, "value", Trace.VARIABLE)
# duplicates formula resource for RuleAxis but not for other subclasses
super(ModelDefinitionNode, self).compile()
def evalValueExpression(self, xpCtx, fact):
# compiled by FormulaResource compile()
return xpCtx.evaluateAtomicValue(self.valueProg, 'xs:string', fact)
'''
@property
def primaryItemQname(self): # for compatibility with viewRelationsihps
return None
@property
def explicitDims(self):
return set()
'''
@property
def isAbstract(self):
return False
@property
def isMerged(self):
return False
@property
def isRollUp(self):
return self.get("rollUp") == 'true'
def cardinalityAndDepth(self, structuralNode, **kwargs):
return (1,
1 if (structuralNode.header(evaluate=False) is not None) else 0)
''' now only accessed from structural node (mulst have table context for evaluate)
def header(self, role=None, lang=None, strip=False, evaluate=True):
if role is None:
# check for message before checking for genLabel
msgsRelationshipSet = self.modelXbrl.relationshipSet((XbrlConst.tableDefinitionNodeMessage201301, XbrlConst.tableAxisMessage2011))
if msgsRelationshipSet:
msg = msgsRelationshipSet.label(self, XbrlConst.standardMessage, lang, returnText=False)
if msg is not None:
if evaluate:
result = msg.evaluate(self.modelXbrl.rendrCntx)
else:
result = XmlUtil.text(msg)
if strip:
return result.strip()
return result
return self.genLabel(role=role, lang=lang, strip=strip)
'''
@property
def definitionNodeView(self):
return XmlUtil.xmlstring(self, stripXmlns=True, prettyPrint=True)
@property
def definitionLabelsView(self):
return definitionModelLabelsView(self)
class ModelBreakdown(ModelDefinitionNode):
def init(self, modelDocument):
super(ModelBreakdown, self).init(modelDocument)
@property
def parentChildOrder(self):
return self.get("parentChildOrder")
@property
def descendantArcroles(self):
return (XbrlConst.tableBreakdownTree, XbrlConst.tableBreakdownTreeMMDD, XbrlConst.tableBreakdownTree201305)
@property
def propertyView(self):
return ((("id", self.id),
("parent child order", self.parentChildOrder),
("definition", self.definitionNodeView)) +
self.definitionLabelsView)
class ModelClosedDefinitionNode(ModelDefinitionNode):
def init(self, modelDocument):
super(ModelClosedDefinitionNode, self).init(modelDocument)
@property
def abstract(self):
return self.get("abstract")
@property
def isAbstract(self):
return self.abstract == 'true'
@property
def parentChildOrder(self):
return self.get("parentChildOrder")
@property
def descendantArcroles(self):
return (XbrlConst.tableDefinitionNodeSubtree, XbrlConst.tableDefinitionNodeSubtreeMMDD, XbrlConst.tableDefinitionNodeSubtree201305, XbrlConst.tableDefinitionNodeSubtree201301, XbrlConst.tableAxisSubtree2011, XbrlConst.tableDefinitionNodeMessage201301, XbrlConst.tableAxisMessage2011)
def filteredFacts(self, xpCtx, facts):
aspects = self.aspectsCovered()
axisAspectValues = dict((aspect, self.aspectValue(xpCtx, aspect))
for aspect in aspects)
fp = FactPrototype(self, axisAspectValues)
return set(fact
for fact in facts
if aspectsMatch(xpCtx, fact, fp, aspects))
class ModelConstraintSet(ModelFormulaRules):
def init(self, modelDocument):
super(ModelConstraintSet, self).init(modelDocument)
self._locationSourceVar = self.source(Aspect.LOCATION_RULE, acceptFormulaSource=False)
self._locationAspectCovered = set()
self.aspectValues = {} # only needed if error blocks compiling this node, replaced by compile()
self.aspectProgs = {} # ditto
if self._locationSourceVar: self._locationAspectCovered.add(Aspect.LOCATION) # location is parent (tuple), not sibling
def hasAspect(self, structuralNode, aspect, inherit=None):
return self._hasAspect(structuralNode, aspect, inherit)
def _hasAspect(self, structuralNode, aspect, inherit=None): # opaque from ModelRuleDefinitionNode
if aspect == Aspect.LOCATION and self._locationSourceVar:
return True
elif aspect in aspectRuleAspects:
return any(self.hasRule(a) for a in aspectRuleAspects[aspect])
return self.hasRule(aspect)
def aspectValue(self, xpCtx, aspect, inherit=None):
try:
# if xpCtx is None: xpCtx = self.modelXbrl.rendrCntx (must have xpCtx of callint table)
if aspect == Aspect.LOCATION and self._locationSourceVar in xpCtx.inScopeVars:
return xpCtx.inScopeVars[self._locationSourceVar]
return self.evaluateRule(xpCtx, aspect)
except AttributeError:
return '(unavailable)' # table defective or not initialized
def aspectValueDependsOnVars(self, aspect):
return aspect in _DICT_SET(self.aspectProgs.keys()) or aspect in self._locationAspectCovered
def aspectsCovered(self):
return _DICT_SET(self.aspectValues.keys()) | _DICT_SET(self.aspectProgs.keys()) | self._locationAspectCovered
# provide model table's aspect model to compile() method of ModelFormulaRules
@property
def aspectModel(self):
for frameRecord in inspect.stack():
obj = frameRecord[0].f_locals['self']
if isinstance(obj,ModelTable):
return obj.aspectModel
return None
'''
@property
def primaryItemQname(self):
return self.evaluateRule(self.modelXbrl.rendrCntx, Aspect.CONCEPT)
@property
def explicitDims(self):
dimMemSet = set()
dims = self.evaluateRule(self.modelXbrl.rendrCntx, Aspect.DIMENSIONS)
if dims: # may be none if no dim aspects on this ruleAxis
for dim in dims:
mem = self.evaluateRule(self.modelXbrl.rendrCntx, dim)
if mem: # may be none if dimension was omitted
dimMemSet.add( (dim, mem) )
return dimMemSet
@property
def instant(self):
periodType = self.evaluateRule(self.modelXbrl.rendrCntx, Aspect.PERIOD_TYPE)
if periodType == "forever":
return None
return self.evaluateRule(self.modelXbrl.rendrCntx,
{"instant": Aspect.INSTANT,
"duration": Aspect.END}[periodType])
'''
def cardinalityAndDepth(self, structuralNode, **kwargs):
if self.aspectValues or self.aspectProgs or structuralNode.header(evaluate=False) is not None:
return (1, 1)
else:
return (0, 0)
class ModelRuleSet(ModelConstraintSet, ModelFormulaResource):
def init(self, modelDocument):
super(ModelRuleSet, self).init(modelDocument)
@property
def tagName(self): # can't call it tag because that would hide ElementBase.tag
return self.get("tag")
class ModelRuleDefinitionNode(ModelConstraintSet, ModelClosedDefinitionNode):
def init(self, modelDocument):
super(ModelRuleDefinitionNode, self).init(modelDocument)
@property
def merge(self):
return self.get("merge")
@property
def isMerged(self):
return self.merge == "true"
@property
def constraintSets(self):
try:
return self._constraintSets
except AttributeError:
self._constraintSets = dict((ruleSet.tagName, ruleSet)
for ruleSet in XmlUtil.children(self, self.namespaceURI, "ruleSet"))
if self.aspectsCovered(): # any local rule?
self._constraintSets[None] = self
return self._constraintSets
def hasAspect(self, structuralNode, aspect):
return any(constraintSet._hasAspect(structuralNode, aspect)
for constraintSet in self.constraintSets.values())
@property
def aspectsInTaggedConstraintSet(self):
try:
return self._aspectsInTaggedConstraintSet
except AttributeError:
self._aspectsInTaggedConstraintSet = set()
for tag, constraintSet in self.constraitSets().items():
if tag is not None:
for aspect in constraintSet.aspectsCovered():
if aspect != Aspect.DIMENSIONS:
self._aspectsInTaggedConstraintSet.add(aspect)
return self._aspectsInTaggedConstraintSet
def compile(self):
super(ModelRuleDefinitionNode, self).compile()
for constraintSet in self.constraintSets.values():
if constraintSet != self: # compile nested constraint sets
constraintSet.compile()
@property
def propertyView(self):
return ((("id", self.id),
("abstract", self.abstract),
("merge", self.merge),
("definition", self.definitionNodeView)) +
self.definitionLabelsView)
def __repr__(self):
return ("modelRuleDefinitionNode[{0}]{1})".format(self.objectId(),self.propertyView))
# deprecated 2013-05-17
class ModelTupleDefinitionNode(ModelRuleDefinitionNode):
def init(self, modelDocument):
super(ModelTupleDefinitionNode, self).init(modelDocument)
@property
def descendantArcroles(self):
return (XbrlConst.tableTupleContent201301, XbrlConst.tableTupleContent2011, XbrlConst.tableDefinitionNodeMessage201301, XbrlConst.tableAxisMessage2011)
@property
def contentRelationships(self):
return self.modelXbrl.relationshipSet((XbrlConst.tableTupleContent201301, XbrlConst.tableTupleContent2011)).fromModelObject(self)
def hasAspect(self, structuralNode, aspect, inherit=None):
return aspect == Aspect.LOCATION # non-location aspects aren't leaked to ordinate for Tuple or self.hasRule(aspect)
def aspectValue(self, xpCtx, aspect, inherit=None):
return self.evaluateRule(xpCtx, aspect)
def aspectsCovered(self):
return {Aspect.LOCATION} # tuple's aspects don't leak to ordinates
def tupleAspectsCovered(self):
return _DICT_SET(self.aspectValues.keys()) | _DICT_SET(self.aspectProgs.keys()) | {Aspect.LOCATION}
def filteredFacts(self, xpCtx, facts):
aspects = self.aspectsCovered()
axisAspectValues = dict((aspect, self.tupleAspectsCovered(aspect))
for aspect in aspects
if aspect != Aspect.LOCATION) # location determined by ordCntx, not axis
fp = FactPrototype(self, axisAspectValues)
return set(fact
for fact in facts
if fact.isTuple and aspectsMatch(xpCtx, fact, fp, aspects))
class ModelCompositionDefinitionNode(ModelClosedDefinitionNode):
def init(self, modelDocument):
super(ModelCompositionDefinitionNode, self).init(modelDocument)
@property
def abstract(self): # always abstract, no filters, no data
return 'true'
class ModelRelationshipDefinitionNode(ModelClosedDefinitionNode):
def init(self, modelDocument):
super(ModelRelationshipDefinitionNode, self).init(modelDocument)
def aspectsCovered(self):
return {Aspect.CONCEPT}
@property
def conceptQname(self):
name = self.getStripped("conceptname")
return qname(self, name, noPrefixIsNoNamespace=True) if name else None
@property
def relationshipSourceQname(self):
sourceQname = XmlUtil.child(self, (XbrlConst.table, XbrlConst.tableMMDD, XbrlConst.table201305, XbrlConst.table201301, XbrlConst.table2011), "relationshipSource")
if sourceQname is not None:
return qname( sourceQname, XmlUtil.text(sourceQname) )
return None
@property
def linkrole(self):
return XmlUtil.childText(self, (XbrlConst.table, XbrlConst.tableMMDD, XbrlConst.table201305, XbrlConst.table201301, XbrlConst.table2011), "linkrole")
@property
def axis(self):
a = XmlUtil.childText(self, (XbrlConst.table, XbrlConst.tableMMDD, XbrlConst.table201305, XbrlConst.table201301, XbrlConst.table2011), ("axis", "formulaAxis"))
if not a: a = 'descendant' # would be an XML error
return a
@property
def isOrSelfAxis(self):
return self.axis.endswith('-or-self')
@property
def generations(self):
try:
return _INT( XmlUtil.childText(self, (XbrlConst.table, XbrlConst.tableMMDD, XbrlConst.table201305, XbrlConst.table201301, XbrlConst.table2011), "generations") )
except (TypeError, ValueError):
if self.axis in ('sibling', 'child', 'parent'):
return 1
return 0
@property
def relationshipSourceQnameExpression(self):
return XmlUtil.childText(self, (XbrlConst.table, XbrlConst.tableMMDD, XbrlConst.table201305, XbrlConst.table201301, XbrlConst.table2011), "relationshipSourceExpression")
@property
def linkroleExpression(self):
return XmlUtil.childText(self, (XbrlConst.table, XbrlConst.tableMMDD, XbrlConst.table201305, XbrlConst.table201301, XbrlConst.table2011), "linkroleExpression")
@property
def axisExpression(self):
return XmlUtil.childText(self, (XbrlConst.table, XbrlConst.tableMMDD, XbrlConst.table201305, XbrlConst.table201301, XbrlConst.table2011), ("axisExpression", "formulAxisExpression"))
@property
def generationsExpression(self):
return XmlUtil.childText(self, (XbrlConst.table, XbrlConst.tableMMDD, XbrlConst.table201305, XbrlConst.table201301, XbrlConst.table2011), "generationsExpression")
def compile(self):
if not hasattr(self, "relationshipSourceQnameExpressionProg"):
self.relationshipSourceQnameExpressionProg = XPathParser.parse(self, self.relationshipSourceQnameExpression, self, "relationshipSourceQnameExpressionProg", Trace.VARIABLE)
self.linkroleExpressionProg = XPathParser.parse(self, self.linkroleExpression, self, "linkroleQnameExpressionProg", Trace.VARIABLE)
self.axisExpressionProg = XPathParser.parse(self, self.axisExpression, self, "axisExpressionProg", Trace.VARIABLE)
self.generationsExpressionProg = XPathParser.parse(self, self.generationsExpression, self, "generationsExpressionProg", Trace.VARIABLE)
super(ModelRelationshipDefinitionNode, self).compile()
def variableRefs(self, progs=[], varRefSet=None):
if self.relationshipSourceQname and self.relationshipSourceQname != XbrlConst.qnXfiRoot:
if varRefSet is None: varRefSet = set()
varRefSet.add(self.relationshipSourceQname)
return super(ModelRelationshipDefinitionNode, self).variableRefs(
[p for p in (self.relationshipSourceQnameExpressionProg,
self.linkroleExpressionProg, self.axisExpressionProg,
self.generationsExpressionProg)
if p], varRefSet)
def evalRrelationshipSourceQname(self, xpCtx, fact=None):
if self.relationshipSourceQname:
return self.relationshipSourceQname
return xpCtx.evaluateAtomicValue(self.relationshipSourceQnameExpressionProg, 'xs:QName', fact)
def evalLinkrole(self, xpCtx, fact=None):
if self.linkrole:
return self.linkrole
return xpCtx.evaluateAtomicValue(self.linkroleExpressionProg, 'xs:anyURI', fact)
def evalAxis(self, xpCtx, fact=None):
if self.axis:
return self.axis
return xpCtx.evaluateAtomicValue(self.axisExpressionProg, 'xs:token', fact)
def evalGenerations(self, xpCtx, fact=None):
if self.generations:
return self.generations
return xpCtx.evaluateAtomicValue(self.generationsExpressionProg, 'xs:integer', fact)
def cardinalityAndDepth(self, structuralNode, **kwargs):
return self.lenDepth(self.relationships(structuralNode, **kwargs),
self.axis.endswith('-or-self'))
def lenDepth(self, nestedRelationships, includeSelf):
l = 0
d = 1
for rel in nestedRelationships:
if isinstance(rel, list):
nl, nd = self.lenDepth(rel, False)
l += nl
nd += 1 # returns 0 if sublist is not nested
if nd > d:
d = nd
else:
l += 1
if includeSelf:
l += 1 # root relationships include root in addition
if includeSelf:
d += 1
return (l, d)
@property
def propertyView(self):
return ((("id", self.id),
("abstract", self.abstract),
("definition", self.definitionNodeView)) +
self.definitionLabelsView)
def __repr__(self):
return ("modelRelationshipDefinitionNode[{0}]{1})".format(self.objectId(),self.propertyView))
class ModelConceptRelationshipDefinitionNode(ModelRelationshipDefinitionNode):
def init(self, modelDocument):
super(ModelConceptRelationshipDefinitionNode, self).init(modelDocument)
def hasAspect(self, structuralNode, aspect):
return aspect == Aspect.CONCEPT
@property
def arcrole(self):
return XmlUtil.childText(self, (XbrlConst.table, XbrlConst.tableMMDD, XbrlConst.table201305, XbrlConst.table201301, XbrlConst.table2011), "arcrole")
@property
def arcQname(self):
arcnameElt = XmlUtil.child(self, (XbrlConst.table, XbrlConst.tableMMDD, XbrlConst.table201305, XbrlConst.table201301, XbrlConst.table2011), "arcname")
if arcnameElt is not None:
return qname( arcnameElt, XmlUtil.text(arcnameElt) )
return None
@property
def linkQname(self):
linknameElt = XmlUtil.child(self, (XbrlConst.table, XbrlConst.tableMMDD, XbrlConst.table201305, XbrlConst.table201301, XbrlConst.table2011), "linkname")
if linknameElt is not None:
return qname( linknameElt, XmlUtil.text(linknameElt) )
return None
def compile(self):
if not hasattr(self, "arcroleExpressionProg"):
self.arcroleExpressionProg = XPathParser.parse(self, self.arcroleExpression, self, "arcroleExpressionProg", Trace.VARIABLE)
self.linkQnameExpressionProg = XPathParser.parse(self, self.linkQnameExpression, self, "linkQnameExpressionProg", Trace.VARIABLE)
self.arcQnameExpressionProg = XPathParser.parse(self, self.arcQnameExpression, self, "arcQnameExpressionProg", Trace.VARIABLE)
super(ModelConceptRelationshipDefinitionNode, self).compile()
def variableRefs(self, progs=[], varRefSet=None):
return super(ModelConceptRelationshipDefinitionNode, self).variableRefs(
[p for p in (self.arcroleExpressionProg,
self.linkQnameExpressionProg, self.arcQnameExpressionProg)
if p], varRefSet)
def evalArcrole(self, xpCtx, fact=None):
if self.arcrole:
return self.arcrole
return xpCtx.evaluateAtomicValue(self.arcroleExpressionProg, 'xs:anyURI', fact)
def evalLinkQname(self, xpCtx, fact=None):
if self.linkQname:
return self.linkQname
return xpCtx.evaluateAtomicValue(self.linkQnameExpressionProg, 'xs:QName', fact)
def evalArcQname(self, xpCtx, fact=None):
if self.arcQname:
return self.arcQname
return xpCtx.evaluateAtomicValue(self.arcQnameExpressionProg, 'xs:QName', fact)
@property
def arcroleExpression(self):
return XmlUtil.childText(self, (XbrlConst.table, XbrlConst.tableMMDD, XbrlConst.table201305, XbrlConst.table201301, XbrlConst.table2011), "arcroleExpression")
@property
def linkQnameExpression(self):
return XmlUtil.childText(self, (XbrlConst.table, XbrlConst.tableMMDD, XbrlConst.table201305, XbrlConst.table201301, XbrlConst.table2011), "linknameExpression")
@property
def arcQnameExpression(self):
return XmlUtil.childText(self, (XbrlConst.table, XbrlConst.tableMMDD, XbrlConst.table201305, XbrlConst.table201301, XbrlConst.table2011), "arcnameExpression")
def coveredAspect(self, ordCntx=None):
return Aspect.CONCEPT
def relationships(self, structuralNode, **kwargs):
self._sourceQname = structuralNode.evaluate(self, self.evalRrelationshipSourceQname, **kwargs) or XbrlConst.qnXfiRoot
linkrole = structuralNode.evaluate(self, self.evalLinkrole)
if not linkrole:
linkrole = "XBRL-all-linkroles"
linkQname = (structuralNode.evaluate(self, self.evalLinkQname) or () )
arcrole = (structuralNode.evaluate(self, self.evalArcrole) or () )
arcQname = (structuralNode.evaluate(self, self.evalArcQname) or () )
self._axis = (structuralNode.evaluate(self, self.evalAxis) or () )
self._generations = (structuralNode.evaluate(self, self.evalGenerations) or () )
return concept_relationships(self.modelXbrl.rendrCntx,
None,
(self._sourceQname,
linkrole,
arcrole,
self._axis.replace('-or-self',''),
self._generations,
linkQname,
arcQname),
True) # return nested lists representing concept tree nesting
class ModelDimensionRelationshipDefinitionNode(ModelRelationshipDefinitionNode):
def init(self, modelDocument):
super(ModelDimensionRelationshipDefinitionNode, self).init(modelDocument)
def hasAspect(self, structuralNode, aspect):
return aspect == self.coveredAspect(structuralNode) or aspect == Aspect.DIMENSIONS
def aspectValue(self, xpCtx, aspect, inherit=None):
if aspect == Aspect.DIMENSIONS:
return (self.coveredAspect(xpCtx), )
return None
def aspectsCovered(self):
return {self.dimensionQname}
@property
def dimensionQname(self):
dimensionElt = XmlUtil.child(self, (XbrlConst.table, XbrlConst.tableMMDD, XbrlConst.table201305, XbrlConst.table201301, XbrlConst.table2011), "dimension")
if dimensionElt is not None:
return qname( dimensionElt, XmlUtil.text(dimensionElt) )
return None
@property
def dimensionQnameExpression(self):
return XmlUtil.childText(self, (XbrlConst.table, XbrlConst.tableMMDD, XbrlConst.table201305, XbrlConst.table201301, XbrlConst.table2011), "dimensionExpression")
def compile(self):
if not hasattr(self, "dimensionQnameExpressionProg"):
self.dimensionQnameExpressionProg = XPathParser.parse(self, self.dimensionQnameExpression, self, "dimensionQnameExpressionProg", Trace.VARIABLE)
super(ModelDimensionRelationshipDefinitionNode, self).compile()
def variableRefs(self, progs=[], varRefSet=None):
return super(ModelDimensionRelationshipDefinitionNode, self).variableRefs(self.dimensionQnameExpressionProg, varRefSet)
def evalDimensionQname(self, xpCtx, fact=None):
if self.dimensionQname:
return self.dimensionQname
return xpCtx.evaluateAtomicValue(self.dimensionQnameExpressionProg, 'xs:QName', fact)
def coveredAspect(self, structuralNode=None):
try:
return self._coveredAspect
except AttributeError:
self._coveredAspect = self.dimRelationships(structuralNode, getDimQname=True)
return self._coveredAspect
def relationships(self, structuralNode, **kwargs):
return self.dimRelationships(structuralNode, getMembers=True)
def dimRelationships(self, structuralNode, getMembers=False, getDimQname=False):
self._dimensionQname = structuralNode.evaluate(self, self.evalDimensionQname)
self._sourceQname = structuralNode.evaluate(self, self.evalRrelationshipSourceQname) or XbrlConst.qnXfiRoot
linkrole = structuralNode.evaluate(self, self.evalLinkrole)
if not linkrole and getMembers:
linkrole = "XBRL-all-linkroles"
dimConcept = self.modelXbrl.qnameConcepts.get(self._dimensionQname)
sourceConcept = self.modelXbrl.qnameConcepts.get(self._sourceQname)
self._axis = (structuralNode.evaluate(self, self.evalAxis) or () )
self._generations = (structuralNode.evaluate(self, self.evalGenerations) or () )
if ((self._dimensionQname and (dimConcept is None or not dimConcept.isDimensionItem)) or
(self._sourceQname and self._sourceQname != XbrlConst.qnXfiRoot and (
sourceConcept is None or not sourceConcept.isItem))):
return ()
if dimConcept is not None:
if getDimQname:
return self._dimensionQname
if sourceConcept is None:
sourceConcept = dimConcept
if getMembers:
return concept_relationships(self.modelXbrl.rendrCntx,
None,
(self._sourceQname,
linkrole,
"XBRL-dimensions", # all dimensions arcroles
self._axis.replace('-or-self',''),
self._generations),
True) # return nested lists representing concept tree nesting
if getDimQname:
if sourceConcept is not None:
# look back from member to a dimension
return self.stepDimRel(sourceConcept, linkrole)
return None
def stepDimRel(self, stepConcept, linkrole):
if stepConcept.isDimensionItem:
return stepConcept.qname
for rel in self.modelXbrl.relationshipSet("XBRL-dimensions").toModelObject(stepConcept):
if not linkrole or linkrole == rel.consecutiveLinkrole:
dim = self.stepDimRel(rel.fromModelObject, rel.linkrole)
if dim:
return dim
return None
coveredAspectToken = {"concept": Aspect.CONCEPT,
"entity-identifier": Aspect.VALUE,
"period-start": Aspect.START, "period-end": Aspect.END,
"period-instant": Aspect.INSTANT, "period-instant-end": Aspect.INSTANT_END,
"unit": Aspect.UNIT}
class ModelOpenDefinitionNode(ModelDefinitionNode):
def init(self, modelDocument):
super(ModelOpenDefinitionNode, self).init(modelDocument)
# deprecated 2013-05-17
class ModelSelectionDefinitionNode(ModelOpenDefinitionNode):
def init(self, modelDocument):
super(ModelSelectionDefinitionNode, self).init(modelDocument)
@property
def descendantArcroles(self):
return (XbrlConst.tableDefinitionNodeMessage201301, XbrlConst.tableAxisMessage2011, XbrlConst.tableDefinitionNodeSelectionMessage201301, XbrlConst.tableAxisSelectionMessage2011)
def clear(self):
XPathParser.clearNamedProg(self, "selectProg")
super(ModelSelectionDefinitionNode, self).clear()
def coveredAspect(self, structuralNode=None):
try:
return self._coveredAspect
except AttributeError:
coveredAspect = self.get("coveredAspect")
if coveredAspect in coveredAspectToken:
self._coveredAspect = coveredAspectToken[coveredAspect]
else: # must be a qname
self._coveredAspect = qname(self, coveredAspect)
return self._coveredAspect
def aspectsCovered(self):
return {self.coveredAspect}
def hasAspect(self, structuralNode, aspect):
return aspect == self.coveredAspect() or (isinstance(self._coveredAspect,QName) and aspect == Aspect.DIMENSIONS)
@property
def select(self):
return self.get("select")
def compile(self):
if not hasattr(self, "selectProg"):
self.selectProg = XPathParser.parse(self, self.select, self, "select", Trace.PARAMETER)
super(ModelSelectionDefinitionNode, self).compile()
def variableRefs(self, progs=[], varRefSet=None):
return super(ModelSelectionDefinitionNode, self).variableRefs(self.selectProg, varRefSet)
def evaluate(self, xpCtx, typeQname=None):
if typeQname:
return xpCtx.evaluateAtomicValue(self.selectProg, typeQname)
else:
return xpCtx.flattenSequence(xpCtx.evaluate(self.selectProg, None))
aspectNodeAspectCovered = {"conceptAspect": Aspect.CONCEPT,
"unitAspect": Aspect.UNIT,
"entityIdentifierAspect": Aspect.ENTITY_IDENTIFIER,
"periodAspect": Aspect.PERIOD}
class ModelFilterDefinitionNode(ModelOpenDefinitionNode):
def init(self, modelDocument):
super(ModelFilterDefinitionNode, self).init(modelDocument)
@property
def descendantArcroles(self):
return (XbrlConst.tableAspectNodeFilter, XbrlConst.tableAspectNodeFilterMMDD, XbrlConst.tableAspectNodeFilter201305, XbrlConst.tableFilterNodeFilter2011, XbrlConst.tableAxisFilter2011,XbrlConst.tableAxisFilter201205, XbrlConst.tableDefinitionNodeMessage201301, XbrlConst.tableAxisMessage2011,
XbrlConst.tableDefinitionNodeSubtree, XbrlConst.tableDefinitionNodeSubtreeMMDD, XbrlConst.tableDefinitionNodeSubtree201305, XbrlConst.tableDefinitionNodeSubtree201301, XbrlConst.tableAxisSubtree2011, XbrlConst.tableDefinitionNodeMessage201301, XbrlConst.tableAxisMessage2011)
@property
def filterRelationships(self):
try:
return self._filterRelationships
except AttributeError:
rels = [] # order so conceptName filter is first (if any) (may want more sorting in future)
for rel in self.modelXbrl.relationshipSet((XbrlConst.tableAspectNodeFilter, XbrlConst.tableAspectNodeFilterMMDD, XbrlConst.tableAspectNodeFilter201305, XbrlConst.tableFilterNodeFilter2011, XbrlConst.tableAxisFilter2011,XbrlConst.tableAxisFilter201205)).fromModelObject(self):
if isinstance(rel.toModelObject, ModelConceptName):
rels.insert(0, rel) # put conceptName filters first
else:
rels.append(rel)
self._filterRelationships = rels
return rels
def hasAspect(self, structuralNode, aspect):
return aspect in self.aspectsCovered()
def aspectsCovered(self, varBinding=None):
try:
return self._aspectsCovered
except AttributeError:
self._aspectsCovered = set()
self._dimensionsCovered = set()
self.includeUnreportedValue = False
if self.localName == "aspectNode": # after 2-13-05-17
aspectElt = XmlUtil.child(self, self.namespaceURI, ("conceptAspect", "unitAspect", "entityIdentifierAspect", "periodAspect", "dimensionAspect"))
if aspectElt is not None:
if aspectElt.localName == "dimensionAspect":
dimQname = qname(aspectElt, aspectElt.textValue)
self._aspectsCovered.add(dimQname)
self._aspectsCovered.add(Aspect.DIMENSIONS)
self._dimensionsCovered.add(dimQname)
self.includeUnreportedValue = aspectElt.get("includeUnreportedValue") in ("true", "1")
else:
self._aspectsCovered.add(aspectNodeAspectCovered[aspectElt.localName])
else:
# filter node (prior to 2013-05-17)
for rel in self.filterRelationships:
if rel.isCovered:
_filter = rel.toModelObject
self._aspectsCovered |= _filter.aspectsCovered(varBinding)
self._dimensionsCovered = set(aspect for aspect in self._aspectsCovered if isinstance(aspect,QName))
if self._dimensionsCovered:
self._aspectsCovered.add(Aspect.DIMENSIONS)
return self._aspectsCovered
def aspectValue(self, xpCtx, aspect, inherit=None):
if aspect == Aspect.DIMENSIONS:
return self._dimensionsCovered
# does not apply to filter, value can only come from a bound fact
return None
def filteredFactsPartitions(self, xpCtx, facts):
filteredFacts = formulaEvaluatorFilterFacts(xpCtx, VariableBinding(xpCtx),
facts, self.filterRelationships, None)
if not self.includeUnreportedValue:
# remove unreported falue
reportedAspectFacts = set()
for fact in filteredFacts:
if all(fact.context is not None and
isinstance(fact.context.dimValue(dimAspect), ModelDimensionValue)
for dimAspect in self._dimensionsCovered):
reportedAspectFacts.add(fact)
else:
reportedAspectFacts = filteredFacts
return factsPartitions(xpCtx, reportedAspectFacts, self.aspectsCovered())
@property
def propertyView(self):
return ((("id", self.id),
("aspect", ", ".join(aspectStr(aspect)
for aspect in self.aspectsCovered()
if aspect != Aspect.DIMENSIONS)),
("definition", self.definitionNodeView)) +
self.definitionLabelsView)
from arelle.ModelObjectFactory import elementSubstitutionModelClass
elementSubstitutionModelClass.update((
# IWD
(XbrlConst.qnTableTableMMDD, ModelTable),
(XbrlConst.qnTableBreakdownMMDD, ModelBreakdown),
(XbrlConst.qnTableRuleSetMMDD, ModelRuleSet),
(XbrlConst.qnTableRuleNodeMMDD, ModelRuleDefinitionNode),
(XbrlConst.qnTableConceptRelationshipNodeMMDD, ModelConceptRelationshipDefinitionNode),
(XbrlConst.qnTableDimensionRelationshipNodeMMDD, ModelDimensionRelationshipDefinitionNode),
(XbrlConst.qnTableAspectNodeMMDD, ModelFilterDefinitionNode),
# PWD 2013-08-28
(XbrlConst.qnTableTable, ModelTable),
(XbrlConst.qnTableBreakdown, ModelBreakdown),
(XbrlConst.qnTableRuleSet, ModelRuleSet),
(XbrlConst.qnTableRuleNode, ModelRuleDefinitionNode),
(XbrlConst.qnTableConceptRelationshipNode, ModelConceptRelationshipDefinitionNode),
(XbrlConst.qnTableDimensionRelationshipNode, ModelDimensionRelationshipDefinitionNode),
(XbrlConst.qnTableAspectNode, ModelFilterDefinitionNode),
# PWD 2013-05-17
(XbrlConst.qnTableTable201305, ModelTable),
(XbrlConst.qnTableBreakdown201305, ModelBreakdown),
(XbrlConst.qnTableRuleNode201305, ModelRuleDefinitionNode),
(XbrlConst.qnTableConceptRelationshipNode201305, ModelConceptRelationshipDefinitionNode),
(XbrlConst.qnTableDimensionRelationshipNode201305, ModelDimensionRelationshipDefinitionNode),
(XbrlConst.qnTableAspectNode201305, ModelFilterDefinitionNode),
# PWD 2013-01-17
(XbrlConst.qnTableTable201301, ModelTable),
(XbrlConst.qnTableRuleNode201301, ModelRuleDefinitionNode),
(XbrlConst.qnTableCompositionNode201301, ModelCompositionDefinitionNode),
(XbrlConst.qnTableConceptRelationshipNode201301, ModelConceptRelationshipDefinitionNode),
(XbrlConst.qnTableDimensionRelationshipNode201301, ModelDimensionRelationshipDefinitionNode),
(XbrlConst.qnTableSelectionNode201301, ModelSelectionDefinitionNode),
(XbrlConst.qnTableFilterNode201301, ModelFilterDefinitionNode),
(XbrlConst.qnTableTupleNode201301, ModelTupleDefinitionNode),
# PWD 2011 Montreal
(XbrlConst.qnTableTable2011, ModelTable),
(XbrlConst.qnTableRuleAxis2011, ModelRuleDefinitionNode),
(XbrlConst.qnTableCompositionAxis2011, ModelCompositionDefinitionNode),
(XbrlConst.qnTableConceptRelationshipAxis2011, ModelConceptRelationshipDefinitionNode),
(XbrlConst.qnTableSelectionAxis2011, ModelSelectionDefinitionNode),
(XbrlConst.qnTableFilterAxis2011, ModelFilterDefinitionNode),
(XbrlConst.qnTableTupleAxis2011, ModelTupleDefinitionNode),
(XbrlConst.qnTableDimensionRelationshipAxis2011, ModelDimensionRelationshipDefinitionNode),
# Eurofiling
(XbrlConst.qnEuTable, ModelEuTable),
(XbrlConst.qnEuAxisCoord, ModelEuAxisCoord),
))
# import after other modules resolved to prevent circular references
from arelle.FunctionXfi import concept_relationships
|
|
# Copyright 2013 Donald Stufft and individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import hashlib
from binascii import hexlify, unhexlify
import pytest
from nacl import bindings as c
from nacl.exceptions import CryptoError
def tohex(b):
return hexlify(b).decode("ascii")
def test_hash():
msg = b"message"
h1 = c.crypto_hash(msg)
assert len(h1) == c.crypto_hash_BYTES
assert tohex(h1) == ("f8daf57a3347cc4d6b9d575b31fe6077"
"e2cb487f60a96233c08cb479dbf31538"
"cc915ec6d48bdbaa96ddc1a16db4f4f9"
"6f37276cfcb3510b8246241770d5952c")
assert tohex(h1) == hashlib.sha512(msg).hexdigest()
h2 = c.crypto_hash_sha512(msg)
assert len(h2) == c.crypto_hash_sha512_BYTES
assert tohex(h2) == tohex(h1)
h3 = c.crypto_hash_sha256(msg)
assert len(h3) == c.crypto_hash_sha256_BYTES
assert tohex(h3) == ("ab530a13e45914982b79f9b7e3fba994"
"cfd1f3fb22f71cea1afbf02b460c6d1d")
assert tohex(h3) == hashlib.sha256(msg).hexdigest()
def test_secretbox():
key = b"\x00" * c.crypto_secretbox_KEYBYTES
msg = b"message"
nonce = b"\x01" * c.crypto_secretbox_NONCEBYTES
ct = c.crypto_secretbox(msg, nonce, key)
assert len(ct) == len(msg) + c.crypto_secretbox_BOXZEROBYTES
assert tohex(ct) == "3ae84dfb89728737bd6e2c8cacbaf8af3d34cc1666533a"
msg2 = c.crypto_secretbox_open(ct, nonce, key)
assert msg2 == msg
with pytest.raises(CryptoError):
c.crypto_secretbox_open(
msg + b"!",
nonce,
key,
)
def test_aes256gcm():
key = b"\x00" * c.crypto_aead_aes256gcm_KEYBYTES
msg = b"message"
nonce = b"\x01" * c.crypto_aead_aes256gcm_NPUBBYTES
if c.crypto_aead_aes256gcm_is_available():
ciphertext = c.crypto_aead_aes256gcm_encrypt(msg, nonce, key, None, 0)
cipher = ciphertext[0:len(msg)]
tag = ciphertext[len(msg):]
msg2 = c.crypto_aead_aes256gcm_decrypt(
cipher, tag, nonce,
key, None, 0)
assert msg2[:] == msg
with pytest.raises(CryptoError):
c.crypto_aead_aes256gcm_decrypt(
msg + b"!",
tag,
nonce,
key,
None,
0
)
def test_secretbox_wrong_length():
with pytest.raises(ValueError):
c.crypto_secretbox(b"", b"", b"")
with pytest.raises(ValueError):
c.crypto_secretbox(b"", b"", b"\x00" * c.crypto_secretbox_KEYBYTES)
with pytest.raises(ValueError):
c.crypto_secretbox_open(b"", b"", b"")
with pytest.raises(ValueError):
c.crypto_secretbox_open(
b"", b"", b"\x00" * c.crypto_secretbox_KEYBYTES)
def test_box():
A_pubkey, A_secretkey = c.crypto_box_keypair()
assert len(A_secretkey) == c.crypto_box_SECRETKEYBYTES
assert len(A_pubkey) == c.crypto_box_PUBLICKEYBYTES
B_pubkey, B_secretkey = c.crypto_box_keypair()
k1 = c.crypto_box_beforenm(B_pubkey, A_secretkey)
assert len(k1) == c.crypto_box_BEFORENMBYTES
k2 = c.crypto_box_beforenm(A_pubkey, B_secretkey)
assert tohex(k1) == tohex(k2)
message = b"message"
nonce = b"\x01" * c.crypto_box_NONCEBYTES
ct1 = c.crypto_box_afternm(message, nonce, k1)
assert len(ct1) == len(message) + c.crypto_box_BOXZEROBYTES
ct2 = c.crypto_box(message, nonce, B_pubkey, A_secretkey)
assert tohex(ct2) == tohex(ct1)
m1 = c.crypto_box_open(ct1, nonce, A_pubkey, B_secretkey)
assert m1 == message
m2 = c.crypto_box_open_afternm(ct1, nonce, k1)
assert m2 == message
with pytest.raises(CryptoError):
c.crypto_box_open(
message + b"!", nonce, A_pubkey, A_secretkey)
def test_box_wrong_lengths():
A_pubkey, A_secretkey = c.crypto_box_keypair()
with pytest.raises(ValueError):
c.crypto_box(b"abc", "\x00", A_pubkey, A_secretkey)
with pytest.raises(ValueError):
c.crypto_box(
b"abc", "\x00" * c.crypto_box_NONCEBYTES, b"", A_secretkey)
with pytest.raises(ValueError):
c.crypto_box(
b"abc", "\x00" * c.crypto_box_NONCEBYTES, A_pubkey, b"")
with pytest.raises(ValueError):
c.crypto_box_open(b"", b"", b"", b"")
with pytest.raises(ValueError):
c.crypto_box_open(b"", "\x00" * c.crypto_box_NONCEBYTES, b"", b"")
with pytest.raises(ValueError):
c.crypto_box_open(b"", "\x00" * c.crypto_box_NONCEBYTES, A_pubkey, b"")
with pytest.raises(ValueError):
c.crypto_box_beforenm(b"", b"")
with pytest.raises(ValueError):
c.crypto_box_beforenm(A_pubkey, b"")
with pytest.raises(ValueError):
c.crypto_box_afternm(b"", b"", b"")
with pytest.raises(ValueError):
c.crypto_box_afternm(b"", b"\x00" * c.crypto_box_NONCEBYTES, b"")
with pytest.raises(ValueError):
c.crypto_box_open_afternm(b"", b"", b"")
with pytest.raises(ValueError):
c.crypto_box_open_afternm(b"", b"\x00" * c.crypto_box_NONCEBYTES, b"")
def test_sign():
seed = b"\x00" * c.crypto_sign_SEEDBYTES
pubkey, secretkey = c.crypto_sign_seed_keypair(seed)
assert len(pubkey) == c.crypto_sign_PUBLICKEYBYTES
assert len(secretkey) == c.crypto_sign_SECRETKEYBYTES
pubkey, secretkey = c.crypto_sign_keypair()
assert len(pubkey) == c.crypto_sign_PUBLICKEYBYTES
assert len(secretkey) == c.crypto_sign_SECRETKEYBYTES
msg = b"message"
sigmsg = c.crypto_sign(msg, secretkey)
assert len(sigmsg) == len(msg) + c.crypto_sign_BYTES
msg2 = c.crypto_sign_open(sigmsg, pubkey)
assert msg2 == msg
def test_sign_wrong_lengths():
with pytest.raises(ValueError):
c.crypto_sign_seed_keypair(b"")
def secret_scalar():
pubkey, secretkey = c.crypto_box_keypair()
assert len(secretkey) == c.crypto_box_SECRETKEYBYTES
assert c.crypto_box_SECRETKEYBYTES == c.crypto_scalarmult_BYTES
return secretkey, pubkey
def test_scalarmult():
x, xpub = secret_scalar()
assert len(x) == 32
y, ypub = secret_scalar()
# the Curve25519 base point (generator)
base = unhexlify(b"09" + b"00" * 31)
bx1 = c.crypto_scalarmult_base(x)
bx2 = c.crypto_scalarmult(x, base)
assert tohex(bx1) == tohex(bx2)
assert tohex(bx1) == tohex(xpub)
xby = c.crypto_scalarmult(x, c.crypto_scalarmult_base(y))
ybx = c.crypto_scalarmult(y, c.crypto_scalarmult_base(x))
assert tohex(xby) == tohex(ybx)
z = unhexlify(b"10" * 32)
bz1 = c.crypto_scalarmult_base(z)
assert tohex(bz1) == ("781faab908430150daccdd6f9d6c5086"
"e34f73a93ebbaa271765e5036edfc519")
bz2 = c.crypto_scalarmult(z, base)
assert tohex(bz1) == tohex(bz2)
def test_sign_test_key_conversion():
"""
Taken from test vectors in libsodium
"""
keypair_seed = unhexlify(b"421151a459faeade3d247115f94aedae"
b"42318124095afabe4d1451a559faedee")
ed25519_pk, ed25519_sk = c.crypto_sign_seed_keypair(keypair_seed)
curve25519_pk = c.crypto_sign_ed25519_pk_to_curve25519(ed25519_pk)
with pytest.raises(ValueError):
c.crypto_sign_ed25519_pk_to_curve25519(unhexlify(b"12"))
with pytest.raises(ValueError):
c.crypto_sign_ed25519_sk_to_curve25519(unhexlify(b"12"))
curve25519_sk = c.crypto_sign_ed25519_sk_to_curve25519(ed25519_sk)
assert tohex(curve25519_pk) == ("f1814f0e8ff1043d8a44d25babff3ced"
"cae6c22c3edaa48f857ae70de2baae50")
assert tohex(curve25519_sk) == ("8052030376d47112be7f73ed7a019293"
"dd12ad910b654455798b4667d73de166")
|
|
""" Cisco_IOS_XR_crypto_sam_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR crypto\-sam package configuration.
This module contains definitions
for the following management objects\:
crypto\: Crypto configuration
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class CryptoSamActionEnum(Enum):
"""
CryptoSamActionEnum
Crypto sam action
.. data:: PROCEED = 1
To respond YES to the SAM prompt
.. data:: TERMINATE = 2
To respond NO to the SAM prompt
"""
PROCEED = 1
TERMINATE = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_cfg as meta
return meta._meta_table['CryptoSamActionEnum']
class Crypto(object):
"""
Crypto configuration
.. attribute:: sam
Software Authentication Manager (SAM) Config
**type**\: :py:class:`Sam <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg.Crypto.Sam>`
.. attribute:: ssh
Secure Shell configuration
**type**\: :py:class:`Ssh <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg.Crypto.Ssh>`
"""
_prefix = 'crypto-sam-cfg'
_revision = '2015-01-07'
def __init__(self):
self.sam = Crypto.Sam()
self.sam.parent = self
self.ssh = Crypto.Ssh()
self.ssh.parent = self
class Sam(object):
"""
Software Authentication Manager (SAM) Config
.. attribute:: prompt_interval
Set prompt interval at reboot time
**type**\: :py:class:`PromptInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg.Crypto.Sam.PromptInterval>`
"""
_prefix = 'crypto-sam-cfg'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.prompt_interval = None
class PromptInterval(object):
"""
Set prompt interval at reboot time
.. attribute:: action
Respond to SAM prompt either Proceed/Terminate
**type**\: :py:class:`CryptoSamActionEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg.CryptoSamActionEnum>`
**mandatory**\: True
.. attribute:: prompt_time
Prompt time from 0 \- 300 seconds
**type**\: int
**range:** 0..300
**mandatory**\: True
.. attribute:: _is_presence
Is present if this instance represents presence container else not
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'crypto-sam-cfg'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self._is_presence = True
self.action = None
self.prompt_time = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-crypto-sam-cfg:crypto/Cisco-IOS-XR-crypto-sam-cfg:sam/Cisco-IOS-XR-crypto-sam-cfg:prompt-interval'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self._is_presence:
return True
if self.action is not None:
return True
if self.prompt_time is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_cfg as meta
return meta._meta_table['Crypto.Sam.PromptInterval']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-crypto-sam-cfg:crypto/Cisco-IOS-XR-crypto-sam-cfg:sam'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.prompt_interval is not None and self.prompt_interval._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_cfg as meta
return meta._meta_table['Crypto.Sam']['meta_info']
class Ssh(object):
"""
Secure Shell configuration
.. attribute:: client
Provide SSH client service
**type**\: :py:class:`Client <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg.Crypto.Ssh.Client>`
.. attribute:: server
Provide SSH server service
**type**\: :py:class:`Server <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg.Crypto.Ssh.Server>`
"""
_prefix = 'crypto-ssh-cfg'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.client = Crypto.Ssh.Client()
self.client.parent = self
self.server = Crypto.Ssh.Server()
self.server.parent = self
class Client(object):
"""
Provide SSH client service
.. attribute:: client_vrf
Source interface VRF for ssh client sessions
**type**\: str
**range:** 0..32
.. attribute:: dscp
Cisco sshd DSCP value
**type**\: int
**range:** 0..63
.. attribute:: host_public_key
Filename \- where to store known host file
**type**\: str
.. attribute:: source_interface
Source interface for ssh client sessions
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
"""
_prefix = 'crypto-ssh-cfg'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.client_vrf = None
self.dscp = None
self.host_public_key = None
self.source_interface = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-crypto-sam-cfg:crypto/Cisco-IOS-XR-crypto-ssh-cfg:ssh/Cisco-IOS-XR-crypto-ssh-cfg:client'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.client_vrf is not None:
return True
if self.dscp is not None:
return True
if self.host_public_key is not None:
return True
if self.source_interface is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_cfg as meta
return meta._meta_table['Crypto.Ssh.Client']['meta_info']
class Server(object):
"""
Provide SSH server service
.. attribute:: dscp
Cisco sshd DSCP value
**type**\: int
**range:** 0..63
.. attribute:: logging
Enable ssh server logging
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: netconf
port number on which ssh service to be started for netconf
**type**\: int
**range:** 1..65535
.. attribute:: netconf_vrf_table
Cisco sshd Netconf VRF name
**type**\: :py:class:`NetconfVrfTable <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg.Crypto.Ssh.Server.NetconfVrfTable>`
.. attribute:: rate_limit
Cisco sshd rate\-limit of service requests
**type**\: int
**range:** 1..600
.. attribute:: session_limit
Cisco sshd session\-limit of service requests
**type**\: int
**range:** 1..1024
.. attribute:: timeout
Timeout value between 5\-120 seconds defalut 30
**type**\: int
**range:** 5..120
.. attribute:: v2
Cisco sshd force protocol version 2 only
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: vrf_table
Cisco sshd VRF name
**type**\: :py:class:`VrfTable <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg.Crypto.Ssh.Server.VrfTable>`
"""
_prefix = 'crypto-ssh-cfg'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.dscp = None
self.logging = None
self.netconf = None
self.netconf_vrf_table = Crypto.Ssh.Server.NetconfVrfTable()
self.netconf_vrf_table.parent = self
self.rate_limit = None
self.session_limit = None
self.timeout = None
self.v2 = None
self.vrf_table = Crypto.Ssh.Server.VrfTable()
self.vrf_table.parent = self
class VrfTable(object):
"""
Cisco sshd VRF name
.. attribute:: vrf
Enter VRF name
**type**\: list of :py:class:`Vrf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg.Crypto.Ssh.Server.VrfTable.Vrf>`
"""
_prefix = 'crypto-ssh-cfg'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.vrf = YList()
self.vrf.parent = self
self.vrf.name = 'vrf'
class Vrf(object):
"""
Enter VRF name
.. attribute:: vrf_name <key>
Enter VRF name
**type**\: str
**range:** 0..32
.. attribute:: enable
Enable to use VRF
**type**\: :py:class:`Empty <ydk.types.Empty>`
**mandatory**\: True
.. attribute:: ipv4_access_list
SSH v4 access\-list name
**type**\: str
**range:** 0..32
.. attribute:: ipv6_access_list
SSH v6 access\-list name
**type**\: str
**range:** 0..32
"""
_prefix = 'crypto-ssh-cfg'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.vrf_name = None
self.enable = None
self.ipv4_access_list = None
self.ipv6_access_list = None
@property
def _common_path(self):
if self.vrf_name is None:
raise YPYModelError('Key property vrf_name is None')
return '/Cisco-IOS-XR-crypto-sam-cfg:crypto/Cisco-IOS-XR-crypto-ssh-cfg:ssh/Cisco-IOS-XR-crypto-ssh-cfg:server/Cisco-IOS-XR-crypto-ssh-cfg:vrf-table/Cisco-IOS-XR-crypto-ssh-cfg:vrf[Cisco-IOS-XR-crypto-ssh-cfg:vrf-name = ' + str(self.vrf_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.vrf_name is not None:
return True
if self.enable is not None:
return True
if self.ipv4_access_list is not None:
return True
if self.ipv6_access_list is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_cfg as meta
return meta._meta_table['Crypto.Ssh.Server.VrfTable.Vrf']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-crypto-sam-cfg:crypto/Cisco-IOS-XR-crypto-ssh-cfg:ssh/Cisco-IOS-XR-crypto-ssh-cfg:server/Cisco-IOS-XR-crypto-ssh-cfg:vrf-table'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.vrf is not None:
for child_ref in self.vrf:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_cfg as meta
return meta._meta_table['Crypto.Ssh.Server.VrfTable']['meta_info']
class NetconfVrfTable(object):
"""
Cisco sshd Netconf VRF name
.. attribute:: vrf
Enter VRF name
**type**\: list of :py:class:`Vrf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg.Crypto.Ssh.Server.NetconfVrfTable.Vrf>`
"""
_prefix = 'crypto-ssh-cfg'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.vrf = YList()
self.vrf.parent = self
self.vrf.name = 'vrf'
class Vrf(object):
"""
Enter VRF name
.. attribute:: vrf_name <key>
Enter VRF name
**type**\: str
**range:** 0..32
.. attribute:: enable
Enable to use VRF
**type**\: :py:class:`Empty <ydk.types.Empty>`
**mandatory**\: True
.. attribute:: ipv4_access_list
SSH v4 access\-list name
**type**\: str
**range:** 0..32
.. attribute:: ipv6_access_list
SSH v6 access\-list name
**type**\: str
**range:** 0..32
"""
_prefix = 'crypto-ssh-cfg'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.vrf_name = None
self.enable = None
self.ipv4_access_list = None
self.ipv6_access_list = None
@property
def _common_path(self):
if self.vrf_name is None:
raise YPYModelError('Key property vrf_name is None')
return '/Cisco-IOS-XR-crypto-sam-cfg:crypto/Cisco-IOS-XR-crypto-ssh-cfg:ssh/Cisco-IOS-XR-crypto-ssh-cfg:server/Cisco-IOS-XR-crypto-ssh-cfg:netconf-vrf-table/Cisco-IOS-XR-crypto-ssh-cfg:vrf[Cisco-IOS-XR-crypto-ssh-cfg:vrf-name = ' + str(self.vrf_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.vrf_name is not None:
return True
if self.enable is not None:
return True
if self.ipv4_access_list is not None:
return True
if self.ipv6_access_list is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_cfg as meta
return meta._meta_table['Crypto.Ssh.Server.NetconfVrfTable.Vrf']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-crypto-sam-cfg:crypto/Cisco-IOS-XR-crypto-ssh-cfg:ssh/Cisco-IOS-XR-crypto-ssh-cfg:server/Cisco-IOS-XR-crypto-ssh-cfg:netconf-vrf-table'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.vrf is not None:
for child_ref in self.vrf:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_cfg as meta
return meta._meta_table['Crypto.Ssh.Server.NetconfVrfTable']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-crypto-sam-cfg:crypto/Cisco-IOS-XR-crypto-ssh-cfg:ssh/Cisco-IOS-XR-crypto-ssh-cfg:server'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.dscp is not None:
return True
if self.logging is not None:
return True
if self.netconf is not None:
return True
if self.netconf_vrf_table is not None and self.netconf_vrf_table._has_data():
return True
if self.rate_limit is not None:
return True
if self.session_limit is not None:
return True
if self.timeout is not None:
return True
if self.v2 is not None:
return True
if self.vrf_table is not None and self.vrf_table._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_cfg as meta
return meta._meta_table['Crypto.Ssh.Server']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-crypto-sam-cfg:crypto/Cisco-IOS-XR-crypto-ssh-cfg:ssh'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.client is not None and self.client._has_data():
return True
if self.server is not None and self.server._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_cfg as meta
return meta._meta_table['Crypto.Ssh']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-crypto-sam-cfg:crypto'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.sam is not None and self.sam._has_data():
return True
if self.ssh is not None and self.ssh._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_cfg as meta
return meta._meta_table['Crypto']['meta_info']
|
|
# Copyright (c) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import collections
import copy
import functools
import itertools
from os import path
import jsonschema
import six
import yaml
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.plugins import exceptions as p_ex
from sahara.utils import files
def transform_exception(from_type, to_type, transform_func=None):
"""Decorator to transform exception types.
:param from_type: The type of exception to catch and transform.
:param to_type: The type of exception to raise instead.
:param transform_func: A function to transform from_type into
to_type, which must be of the form func(exc, to_type).
Defaults to:
lambda exc, new_type: new_type(exc.message)
"""
if not transform_func:
transform_func = lambda exc, new_type: new_type(exc.message)
def decorator(func):
@functools.wraps(func)
def handler(*args, **kwargs):
try:
func(*args, **kwargs)
except from_type as exc:
raise transform_func(exc, to_type)
return handler
return decorator
def validate_instance(instance, validators, reconcile=True, **kwargs):
"""Runs all validators against the specified instance.
:param instance: An instance to validate.
:param validators: A sequence of ImageValidators.
:param reconcile: If false, all validators will only verify that a
desired state is present, and fail if it is not. If true, all
validators will attempt to enforce the desired state if possible,
and succeed if this enforcement succeeds.
:raises ImageValidationError: If validation fails.
"""
with instance.remote() as remote:
for validator in validators:
validator.validate(remote, reconcile=reconcile, **kwargs)
class ImageArgument(object):
"""An argument used by an image manifest."""
SPEC_SCHEMA = {
"type": "object",
"items": {
"type": "object",
"properties": {
"target_variable": {
"type": "string",
"minLength": 1
},
"description": {
"type": "string",
"minLength": 1
},
"default": {
"type": "string",
"minLength": 1
},
"required": {
"type": "boolean",
"minLength": 1
},
"choices": {
"type": "array",
"minLength": 1,
"items": {
"type": "string"
}
}
}
}
}
@classmethod
def from_spec(cls, spec):
"""Constructs and returns a set of arguments from a specification.
:param spec: The specification for the argument set.
:return A dict of arguments built to the specification.
"""
jsonschema.validate(spec, cls.SPEC_SCHEMA)
arguments = {name: cls(name,
arg.get('description'),
arg.get('default'),
arg.get('required'),
arg.get('choices'))
for name, arg in six.iteritems(spec)}
reserved_names = ['distro', 'reconcile']
for name, arg in six.iteritems(arguments):
if name in reserved_names:
raise p_ex.ImageValidationSpecificationError(
_("The following argument names are reserved: "
"{names}").format(reserved_names))
if not arg.default and not arg.required:
raise p_ex.ImageValidationSpecificationError(
_("Argument {name} is not required and must specify a "
"default value.").format(name=arg.name))
if arg.choices and arg.default and arg.default not in arg.choices:
raise p_ex.ImageValidationSpecificationError(
_("Argument {name} specifies a default which is not one "
"of its choices.").format(name=arg.name))
return arguments
def __init__(self, name, description=None, default=None, required=False,
choices=None):
self.name = name
self.description = description
self.default = default
self.required = required
self.choices = choices
@six.add_metaclass(abc.ABCMeta)
class ImageValidator(object):
"""Validates the image spawned to an instance via a set of rules."""
@abc.abstractmethod
def validate(self, remote, reconcile=True, **kwargs):
"""Validates the image.
:param remote: A remote socket to the instance.
:param reconcile: If false, all validators will only verify that a
desired state is present, and fail if it is not. If true, all
validators will attempt to enforce the desired state if possible,
and succeed if this enforcement succeeds.
:raises ImageValidationError: If validation fails.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class SaharaImageValidatorBase(ImageValidator):
"""Base class for Sahara's native image validation."""
DISTRO_KEY = 'distro'
RECONCILE_KEY = 'reconcile'
ORDERED_VALIDATORS_SCHEMA = {
"type": "array",
"items": {
"type": "object",
"minProperties": 1,
"maxProperties": 1
}
}
_DISTRO_FAMILES = {
'centos': 'redhat',
'centos7': 'redhat',
'fedora': 'redhat',
'redhatenterpriseserver': 'redhat',
'ubuntu': 'debian'
}
@staticmethod
def get_validator_map(custom_validator_map=None):
"""Gets the map of validator name token to validator class.
:param custom_validator_map: A map of validator names and classes to
add to the ones Sahara provides by default. These will take
precedence over the base validators in case of key overlap.
:return A map of validator names and classes.
"""
default_validator_map = {
'package': SaharaPackageValidator,
'script': SaharaScriptValidator,
'any': SaharaAnyValidator,
'all': SaharaAllValidator,
'os_case': SaharaOSCaseValidator,
'argument_case': SaharaArgumentCaseValidator,
'argument_set': SaharaArgumentSetterValidator,
}
if custom_validator_map:
default_validator_map.update(custom_validator_map)
return default_validator_map
@classmethod
def from_yaml(cls, yaml_path, validator_map=None, resource_roots=None):
"""Constructs and returns a validator from the provided yaml file.
:param yaml_path: The relative path to a yaml file.
:param validator_map: A map of validator name to class.
:param resource_roots: The roots from which relative paths to
resources (scripts and such) will be referenced. Any resource will
be pulled from the first path in the list at which a file exists.
:return A SaharaImageValidator built to the yaml specification.
"""
validator_map = validator_map or {}
resource_roots = resource_roots or []
file_text = files.get_file_text(yaml_path)
spec = yaml.safe_load(file_text)
validator_map = cls.get_validator_map(validator_map)
return cls.from_spec(spec, validator_map, resource_roots)
@classmethod
def from_spec(cls, spec, validator_map, resource_roots):
"""Constructs and returns a validator from a specification object.
:param spec: The specification for the validator.
:param validator_map: A map of validator name to class.
:param resource_roots: The roots from which relative paths to
resources (scripts and such) will be referenced. Any resource will
be pulled from the first path in the list at which a file exists.
:return A validator built to the specification.
"""
pass
@classmethod
def from_spec_list(cls, specs, validator_map, resource_roots):
"""Constructs a list of validators from a list of specifications.
:param specs: A list of validator specifications, each of which
will be a dict of size 1, where the key represents the validator
type and the value respresents its specification.
:param validator_map: A map of validator name to class.
:param resource_roots: The roots from which relative paths to
resources (scripts and such) will be referenced. Any resource will
be pulled from the first path in the list at which a file exists.
:return: A list of validators.
"""
validators = []
for spec in specs:
validator_class, validator_spec = cls.get_class_from_spec(
spec, validator_map)
validators.append(validator_class.from_spec(
validator_spec, validator_map, resource_roots))
return validators
@classmethod
def get_class_from_spec(cls, spec, validator_map):
"""Gets the class and specification from a validator dict.
:param spec: A validator specification including its type: a dict of
size 1, where the key represents the validator type and the value
respresents its configuration.
:param validator_map: A map of validator name to class.
:return: A tuple of validator class and configuration.
"""
key, value = list(six.iteritems(spec))[0]
validator_class = validator_map.get(key, None)
if not validator_class:
raise p_ex.ImageValidationSpecificationError(
_("Validator type %s not found.") % validator_class)
return validator_class, value
class ValidationAttemptFailed(object):
"""An object representing a failed validation attempt.
Primarily for use by the SaharaAnyValidator, which must aggregate
failures for error exposition purposes.
"""
def __init__(self, exception):
self.exception = exception
def __bool__(self):
return False
def __nonzero__(self):
return False
def try_validate(self, remote, reconcile=True,
image_arguments=None, **kwargs):
"""Attempts to validate, but returns rather than raising on failure.
:param remote: A remote socket to the instance.
:param reconcile: If false, all validators will only verify that a
desired state is present, and fail if it is not. If true, all
validators will attempt to enforce the desired state if possible,
and succeed if this enforcement succeeds.
:param image_arguments: A dictionary of image argument values keyed by
argument name.
:return True if successful, ValidationAttemptFailed object if failed.
"""
try:
self.validate(
remote, reconcile=reconcile,
image_arguments=image_arguments, **kwargs)
return True
except p_ex.ImageValidationError as exc:
return self.ValidationAttemptFailed(exc)
class SaharaImageValidator(SaharaImageValidatorBase):
"""The root of any tree of SaharaImageValidators.
This validator serves as the root of the tree for SaharaImageValidators,
and provides any needed initialization (such as distro retrieval.)
"""
SPEC_SCHEMA = {
"title": "SaharaImageValidator",
"type": "object",
"properties": {
"validators": SaharaImageValidatorBase.ORDERED_VALIDATORS_SCHEMA
},
"required": ["validators"]
}
def get_argument_list(self):
return [argument for name, argument
in six.iteritems(self.arguments)]
@classmethod
def from_spec(cls, spec, validator_map, resource_roots):
"""Constructs and returns a validator from a specification object.
:param spec: The specification for the validator: a dict containing
the key "validators", which contains a list of validator
specifications.
:param validator_map: A map of validator name to class.
:param resource_roots: The roots from which relative paths to
resources (scripts and such) will be referenced. Any resource will
be pulled from the first path in the list at which a file exists.
:return A SaharaImageValidator containing all specified validators.
"""
jsonschema.validate(spec, cls.SPEC_SCHEMA)
arguments_spec = spec.get('arguments', {})
arguments = ImageArgument.from_spec(arguments_spec)
validators_spec = spec['validators']
validator = SaharaAllValidator.from_spec(
validators_spec, validator_map, resource_roots)
return cls(validator, arguments)
def __init__(self, validator, arguments):
"""Constructor method.
:param validator: A SaharaAllValidator containing the specified
validators.
"""
self.validator = validator
self.validators = validator.validators
self.arguments = arguments
@transform_exception(ex.RemoteCommandException, p_ex.ImageValidationError)
def validate(self, remote, reconcile=True,
image_arguments=None, **kwargs):
"""Attempts to validate the image.
Before deferring to contained validators, performs one-time setup
steps such as distro discovery.
:param remote: A remote socket to the instance.
:param reconcile: If false, all validators will only verify that a
desired state is present, and fail if it is not. If true, all
validators will attempt to enforce the desired state if possible,
and succeed if this enforcement succeeds.
:param image_arguments: A dictionary of image argument values keyed by
argument name.
:raises ImageValidationError: If validation fails.
"""
argument_values = {}
for name, argument in six.iteritems(self.arguments):
if name not in image_arguments:
if argument.required:
raise p_ex.ImageValidationError(
_("Argument {name} is required for image "
"processing.").format(name=name))
else:
argument_values[name] = argument.default
else:
value = image_arguments[name]
choices = argument.choices
if choices and value not in choices:
raise p_ex.ImageValidationError(
_("Value for argument {name} must be one of "
"{choices}.").format(name=name, choices=choices))
else:
argument_values[name] = value
argument_values[self.DISTRO_KEY] = remote.get_os_distrib()
self.validator.validate(remote, reconcile=reconcile,
image_arguments=argument_values)
class SaharaPackageValidator(SaharaImageValidatorBase):
"""A validator that checks package installation state on the instance."""
class Package(object):
def __init__(self, name, version=None):
self.name = name
self.version = version
def __str__(self):
return ("%s-%s" % (self.name, self.version)
if self.version else self.name)
_SINGLE_PACKAGE_SCHEMA = {
"oneOf": [
{
"type": "object",
"minProperties": 1,
"maxProperties": 1,
"additionalProperties": {
"type": "object",
"properties": {
"version": {
"type": "string",
"minLength": 1
},
}
},
},
{
"type": "string",
"minLength": 1
}
]
}
SPEC_SCHEMA = {
"title": "SaharaPackageValidator",
"oneOf": [
_SINGLE_PACKAGE_SCHEMA,
{
"type": "array",
"items": _SINGLE_PACKAGE_SCHEMA,
"minLength": 1
}
]
}
@classmethod
def _package_from_spec(cls, spec):
"""Builds a single package object from a specification.
:param spec: May be a string or single-length dictionary of name to
configuration values.
:return: A package object.
"""
if isinstance(spec, six.string_types):
return cls.Package(spec, None)
else:
package, properties = list(six.iteritems(spec))[0]
version = properties.get('version', None)
return cls.Package(package, version)
@classmethod
def from_spec(cls, spec, validator_map, resource_roots):
"""Builds a package validator from a specification.
:param spec: May be a string, a single-length dictionary of name to
configuration values, or a list containing any number of either or
both of the above. Configuration values may include:
version: The version of the package to check and/or install.
:param validator_map: A map of validator name to class.
:param resource_roots: The roots from which relative paths to
resources (scripts and such) will be referenced. Any resource will
be pulled from the first path in the list at which a file exists.
:return: A validator that will check that the specified package or
packages are installed.
"""
jsonschema.validate(spec, cls.SPEC_SCHEMA)
packages = ([cls._package_from_spec(package_spec)
for package_spec in spec]
if isinstance(spec, list)
else [cls._package_from_spec(spec)])
return cls(packages)
def __init__(self, packages):
self.packages = packages
@transform_exception(ex.RemoteCommandException, p_ex.ImageValidationError)
def validate(self, remote, reconcile=True,
image_arguments=None, **kwargs):
"""Attempts to validate package installation on the image.
Even if reconcile=True, attempts to verify previous package
installation offline before using networked tools to validate or
install new packages.
:param remote: A remote socket to the instance.
:param reconcile: If false, all validators will only verify that a
desired state is present, and fail if it is not. If true, all
validators will attempt to enforce the desired state if possible,
and succeed if this enforcement succeeds.
:param image_arguments: A dictionary of image argument values keyed by
argument name.
:raises ImageValidationError: If validation fails.
"""
env_distro = image_arguments[self.DISTRO_KEY]
env_family = self._DISTRO_FAMILES[env_distro]
check, install = self._DISTRO_TOOLS[env_family]
if not env_family:
raise p_ex.ImageValidationError(
_("Unknown distro: cannot verify or install packages."))
try:
check(self, remote)
except (ex.SubprocessException, ex.RemoteCommandException,
RuntimeError):
if reconcile:
install(self, remote)
check(self, remote)
else:
raise
def _dpkg_check(self, remote):
check_cmd = ("dpkg -s %s" %
" ".join(str(package) for package in self.packages))
return _sudo(remote, check_cmd)
def _rpm_check(self, remote):
check_cmd = ("rpm -q %s" %
" ".join(str(package) for package in self.packages))
return _sudo(remote, check_cmd)
def _yum_install(self, remote):
install_cmd = (
"yum install -y %s" %
" ".join(str(package) for package in self.packages))
_sudo(remote, install_cmd)
def _apt_install(self, remote):
install_cmd = (
"apt-get -y install %s" %
" ".join(str(package) for package in self.packages))
return _sudo(remote, install_cmd)
_DISTRO_TOOLS = {
"redhat": (_rpm_check, _yum_install),
"debian": (_dpkg_check, _apt_install)
}
class SaharaScriptValidator(SaharaImageValidatorBase):
"""A validator that runs a script on the instance."""
_DEFAULT_ENV_VARS = [SaharaImageValidatorBase.RECONCILE_KEY,
SaharaImageValidatorBase.DISTRO_KEY]
SPEC_SCHEMA = {
"title": "SaharaScriptValidator",
"oneOf": [
{
"type": "object",
"minProperties": 1,
"maxProperties": 1,
"additionalProperties": {
"type": "object",
"properties": {
"env_vars": {
"type": "array",
"items": {
"type": "string"
}
},
"output": {
"type": "string",
"minLength": 1
},
"inline": {
"type": "string",
"minLength": 1
}
},
}
},
{
"type": "string"
}
]
}
@classmethod
def from_spec(cls, spec, validator_map, resource_roots):
"""Builds a script validator from a specification.
:param spec: May be a string or a single-length dictionary of name to
configuration values. Configuration values include:
env_vars: A list of environment variable names to send to the
script.
output: A key into which to put the stdout of the script in the
image_arguments of the validation run.
:param validator_map: A map of validator name to class.
:param resource_roots: The roots from which relative paths to
resources (scripts and such) will be referenced. Any resource will
be pulled from the first path in the list at which a file exists.
:return: A validator that will run a script on the image.
"""
jsonschema.validate(spec, cls.SPEC_SCHEMA)
script_contents = None
if isinstance(spec, six.string_types):
script_path = spec
env_vars, output_var = cls._DEFAULT_ENV_VARS, None
else:
script_path, properties = list(six.iteritems(spec))[0]
env_vars = cls._DEFAULT_ENV_VARS + properties.get('env_vars', [])
output_var = properties.get('output', None)
script_contents = properties.get('inline')
if not script_contents:
for root in resource_roots:
file_path = path.join(root, script_path)
script_contents = files.try_get_file_text(file_path)
if script_contents:
break
if not script_contents:
raise p_ex.ImageValidationSpecificationError(
_("Script %s not found in any resource roots.") % script_path)
return SaharaScriptValidator(script_contents, env_vars, output_var)
def __init__(self, script_contents, env_vars=None, output_var=None):
"""Constructor method.
:param script_contents: A string representation of the script.
:param env_vars: A list of environment variables to send to the
script.
:param output_var: A key into which to put the stdout of the script in
the image_arguments of the validation run.
:return: A SaharaScriptValidator.
"""
self.script_contents = script_contents
self.env_vars = env_vars or []
self.output_var = output_var
@transform_exception(ex.RemoteCommandException, p_ex.ImageValidationError)
def validate(self, remote, reconcile=True,
image_arguments=None, **kwargs):
"""Attempts to validate by running a script on the image.
:param remote: A remote socket to the instance.
:param reconcile: If false, all validators will only verify that a
desired state is present, and fail if it is not. If true, all
validators will attempt to enforce the desired state if possible,
and succeed if this enforcement succeeds.
:param image_arguments: A dictionary of image argument values keyed by
argument name.
Note that the key SIV_RECONCILE will be set to 1 if the script
should reconcile and 0 otherwise; all scripts should act on this
input if possible. The key SIV_DISTRO will also contain the
distro representation, per `lsb_release -is`.
:raises ImageValidationError: If validation fails.
"""
arguments = copy.deepcopy(image_arguments)
arguments[self.RECONCILE_KEY] = 1 if reconcile else 0
script = "\n".join(["%(env_vars)s",
"bash <<_SIV_",
"%(script)s",
"_SIV_"])
env_vars = "\n".join("export %s=%s" % (key, value) for (key, value)
in six.iteritems(image_arguments)
if key in self.env_vars)
script = script % {"env_vars": env_vars,
"script": self.script_contents}
code, stdout = _sudo(remote, script)
if self.output_var:
image_arguments[self.output_var] = stdout
@six.add_metaclass(abc.ABCMeta)
class SaharaAggregateValidator(SaharaImageValidatorBase):
"""An abstract class representing an ordered list of other validators."""
SPEC_SCHEMA = SaharaImageValidator.ORDERED_VALIDATORS_SCHEMA
@classmethod
def from_spec(cls, spec, validator_map, resource_roots):
"""Builds the aggregate validator from a specification.
:param spec: A list of validator definitions, each of which is a
single-length dictionary of name to configuration values.
:param validator_map: A map of validator name to class.
:param resource_roots: The roots from which relative paths to
resources (scripts and such) will be referenced. Any resource will
be pulled from the first path in the list at which a file exists.
:return: An aggregate validator.
"""
jsonschema.validate(spec, cls.SPEC_SCHEMA)
validators = cls.from_spec_list(spec, validator_map, resource_roots)
return cls(validators)
def __init__(self, validators):
self.validators = validators
class SaharaAnyValidator(SaharaAggregateValidator):
"""A list of validators, only one of which must succeed."""
def _try_all(self, remote, reconcile=True,
image_arguments=None, **kwargs):
results = []
for validator in self.validators:
result = validator.try_validate(remote, reconcile=reconcile,
image_arguments=image_arguments,
**kwargs)
results.append(result)
if result:
break
return results
def validate(self, remote, reconcile=True,
image_arguments=None, **kwargs):
"""Attempts to validate any of the contained validators.
Note that if reconcile=True, this validator will first run all
contained validators using reconcile=False, and succeed immediately
should any pass validation. If all fail, it will only then run them
using reconcile=True, and again succeed immediately should any pass.
:param remote: A remote socket to the instance.
:param reconcile: If false, all validators will only verify that a
desired state is present, and fail if it is not. If true, all
validators will attempt to enforce the desired state if possible,
and succeed if this enforcement succeeds.
:param image_arguments: A dictionary of image argument values keyed by
argument name.
:raises ImageValidationError: If validation fails.
"""
results = self._try_all(remote, reconcile=False,
image_arguments=image_arguments)
if reconcile and not any(results):
results = self._try_all(remote, reconcile=True,
image_arguments=image_arguments)
if not any(results):
raise p_ex.AllValidationsFailedError(result.exception for result
in results)
class SaharaAllValidator(SaharaAggregateValidator):
"""A list of validators, all of which must succeed."""
def validate(self, remote, reconcile=True, image_arguments=None, **kwargs):
"""Attempts to validate all of the contained validators.
:param remote: A remote socket to the instance.
:param reconcile: If false, all validators will only verify that a
desired state is present, and fail if it is not. If true, all
validators will attempt to enforce the desired state if possible,
and succeed if this enforcement succeeds.
:param image_arguments: A dictionary of image argument values keyed by
argument name.
:raises ImageValidationError: If validation fails.
"""
for validator in self.validators:
validator.validate(remote, reconcile=reconcile,
image_arguments=image_arguments)
class SaharaOSCaseValidator(SaharaImageValidatorBase):
"""A validator which will take different actions depending on distro."""
_distro_tuple = collections.namedtuple('Distro', ['distro', 'validator'])
SPEC_SCHEMA = {
"type": "array",
"minLength": 1,
"items": {
"type": "object",
"minProperties": 1,
"maxProperties": 1,
"additionalProperties":
SaharaImageValidator.ORDERED_VALIDATORS_SCHEMA,
}
}
@classmethod
def from_spec(cls, spec, validator_map, resource_roots):
"""Builds an os_case validator from a specification.
:param spec: A list of single-length dictionaries. The key of each is
a distro or family name and the value under each key is a list of
validators (all of which must succeed.)
:param validator_map: A map of validator name to class.
:param resource_roots: The roots from which relative paths to
resources (scripts and such) will be referenced. Any resource will
be pulled from the first path in the list at which a file exists.
:return: A SaharaOSCaseValidator.
"""
jsonschema.validate(spec, cls.SPEC_SCHEMA)
distros = itertools.chain(*(six.iteritems(distro_spec)
for distro_spec in spec))
distros = [
cls._distro_tuple(key, SaharaAllValidator.from_spec(
value, validator_map, resource_roots))
for (key, value) in distros]
return cls(distros)
def __init__(self, distros):
"""Constructor method.
:param distros: A list of distro tuples (distro, list of validators).
"""
self.distros = distros
def validate(self, remote, reconcile=True,
image_arguments=None, **kwargs):
"""Attempts to validate depending on distro.
May match the OS by specific distro or by family (centos may match
"centos" or "redhat", for instance.) If multiple keys match the
distro, only the validators under the first matched key will be run.
If no keys match, no validators are run, and validation proceeds.
:param remote: A remote socket to the instance.
:param reconcile: If false, all validators will only verify that a
desired state is present, and fail if it is not. If true, all
validators will attempt to enforce the desired state if possible,
and succeed if this enforcement succeeds.
:param image_arguments: A dictionary of image argument values keyed by
argument name.
:raises ImageValidationError: If validation fails.
"""
env_distro = image_arguments[self.DISTRO_KEY]
family = self._DISTRO_FAMILES.get(env_distro)
matches = {env_distro, family} if family else {env_distro}
for distro, validator in self.distros:
if distro in matches:
validator.validate(
remote, reconcile=reconcile,
image_arguments=image_arguments)
break
class SaharaArgumentCaseValidator(SaharaImageValidatorBase):
"""A validator which will take different actions depending on distro."""
SPEC_SCHEMA = {
"type": "object",
"properties": {
"argument_name": {
"type": "string",
"minLength": 1
},
"cases": {
"type": "object",
"minProperties": 1,
"additionalProperties":
SaharaImageValidator.ORDERED_VALIDATORS_SCHEMA,
},
},
"additionalProperties": False,
"required": ["argument_name", "cases"]
}
@classmethod
def from_spec(cls, spec, validator_map, resource_roots):
"""Builds an argument_case validator from a specification.
:param spec: A dictionary with two items: "argument_name", containing
a string indicating the argument to be checked, and "cases", a
dictionary. The key of each item in the dictionary is a value
which may or may not match the argument value, and the value is
a list of validators to be run in case it does.
:param validator_map: A map of validator name to class.
:param resource_roots: The roots from which relative paths to
resources (scripts and such) will be referenced. Any resource will
be pulled from the first path in the list at which a file exists.
:return: A SaharaArgumentCaseValidator.
"""
jsonschema.validate(spec, cls.SPEC_SCHEMA)
argument_name = spec['argument_name']
cases = {key: SaharaAllValidator.from_spec(
value, validator_map, resource_roots)
for key, value in six.iteritems(spec['cases'])}
return cls(argument_name, cases)
def __init__(self, argument_name, cases):
"""Constructor method.
:param argument_name: The name of an argument.
:param cases: A dictionary of possible argument value to a
sub-validator to run in case of a match.
"""
self.argument_name = argument_name
self.cases = cases
def validate(self, remote, reconcile=True,
image_arguments=None, **kwargs):
"""Attempts to validate depending on argument value.
:param remote: A remote socket to the instance.
:param reconcile: If false, all validators will only verify that a
desired state is present, and fail if it is not. If true, all
validators will attempt to enforce the desired state if possible,
and succeed if this enforcement succeeds.
:param image_arguments: A dictionary of image argument values keyed by
argument name.
:raises ImageValidationError: If validation fails.
"""
arg = self.argument_name
if arg not in image_arguments:
raise p_ex.ImageValidationError(
_("Argument {name} not found.").format(name=arg))
value = image_arguments[arg]
if value in self.cases:
self.cases[value].validate(
remote, reconcile=reconcile,
image_arguments=image_arguments)
class SaharaArgumentSetterValidator(SaharaImageValidatorBase):
"""A validator which sets a specific argument to a specific value."""
SPEC_SCHEMA = {
"type": "object",
"properties": {
"argument_name": {
"type": "string",
"minLength": 1
},
"value": {
"type": "string",
"minLength": 1
},
},
"additionalProperties": False,
"required": ["argument_name", "value"]
}
@classmethod
def from_spec(cls, spec, validator_map, resource_roots):
"""Builds an argument_set validator from a specification.
:param spec: A dictionary with two items: "argument_name", containing
a string indicating the argument to be set, and "value", a value
to which to set that argument.
:param validator_map: A map of validator name to class.
:param resource_roots: The roots from which relative paths to
resources (scripts and such) will be referenced. Any resource will
be pulled from the first path in the list at which a file exists.
:return: A SaharaArgumentSetterValidator.
"""
jsonschema.validate(spec, cls.SPEC_SCHEMA)
argument_name = spec['argument_name']
value = spec['value']
return cls(argument_name, value)
def __init__(self, argument_name, value):
"""Constructor method.
:param argument_name: The name of an argument.
:param value: A value to which to set that argument.
"""
self.argument_name = argument_name
self.value = value
def validate(self, remote, reconcile=True,
image_arguments=None, **kwargs):
"""Attempts to validate depending on argument value.
:param remote: A remote socket to the instance.
:param reconcile: If false, all validators will only verify that a
desired state is present, and fail if it is not. If true, all
validators will attempt to enforce the desired state if possible,
and succeed if this enforcement succeeds.
:param image_arguments: A dictionary of image argument values keyed by
argument name.
"""
image_arguments[self.argument_name] = self.value
def _sudo(remote, cmd, **kwargs):
return remote.execute_command(cmd, run_as_root=True, **kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.