gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import tempfile
from abc import abstractmethod, abstractproperty
from contextlib import contextmanager
from six import binary_type, string_types
from twitter.common.collections import maybe_list
from twitter.common.lang import AbstractClass
from pants.backend.jvm.targets.java_agent import JavaAgent
from pants.backend.jvm.targets.jvm_binary import Duplicate, JarRules, Skip
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnit
from pants.java.jar.manifest import Manifest
from pants.util.contextutil import temporary_dir
class Jar(object):
"""Encapsulates operations to build up or update a jar file.
Upon construction the jar is conceptually opened for writes. The write methods are called to
add to the jar's contents and then changes are finalized with a call to close. If close is not
called the staged changes will be lost.
"""
class Error(Exception):
"""Indicates an error creating or updating a jar on disk."""
class Entry(AbstractClass):
"""An entry to be written to a jar."""
def __init__(self, dest):
self._dest = dest
@property
def dest(self):
"""The destination path of the entry in the jar."""
return self._dest
@abstractmethod
def materialize(self, scratch_dir):
"""Materialize this entry's source data into a filesystem path.
:param string scratch_dir: A temporary directory that may be used to do any work required
to materialize the entry as a source file. The caller is responsible for cleaning up
`scratch_dir` after the jar is closed.
:returns: The path to the source data.
"""
class FileSystemEntry(Entry):
"""An entry backed by an existing file on disk."""
def __init__(self, src, dest=None):
super(Jar.FileSystemEntry, self).__init__(dest)
self._src = src
def materialize(self, _):
return self._src
class MemoryEntry(Entry):
"""An entry backed by an in-memory sequence of bytes."""
def __init__(self, dest, contents):
super(Jar.MemoryEntry, self).__init__(dest)
self._contents = contents
def materialize(self, scratch_dir):
fd, path = tempfile.mkstemp(dir=scratch_dir)
try:
os.write(fd, self._contents)
finally:
os.close(fd)
return path
def __init__(self):
self._entries = []
self._jars = []
self._manifest = None
self._main = None
self._classpath = None
def main(self, main):
"""Specifies a Main-Class entry for this jar's manifest.
:param string main: a fully qualified class name
"""
if not main or not isinstance(main, string_types):
raise ValueError('The main entry must be a non-empty string')
self._main = main
def classpath(self, classpath):
"""Specifies a Class-Path entry for this jar's manifest.
:param list classpath: a list of paths
"""
self._classpath = maybe_list(classpath)
def write(self, src, dest=None):
"""Schedules a write of the file at ``src`` to the ``dest`` path in this jar.
If the ``src`` is a file, then ``dest`` must be specified.
If the ``src`` is a directory then by default all descendant files will be added to the jar as
entries carrying their relative path. If ``dest`` is specified it will be prefixed to each
descendant's relative path to form its jar entry path.
:param string src: the path to the pre-existing source file or directory
:param string dest: the path the source file or directory should have in this jar
"""
if not src or not isinstance(src, string_types):
raise ValueError('The src path must be a non-empty string, got %s of type %s.'
% (src, type(src)))
if dest and not isinstance(dest, string_types):
raise ValueError('The dest entry path must be a non-empty string, got %s of type %s.'
% (dest, type(dest)))
if not os.path.isdir(src) and not dest:
raise self.Error('Source file %s must have a jar destination specified' % src)
self._add_entry(self.FileSystemEntry(src, dest))
def writestr(self, path, contents):
"""Schedules a write of the file ``contents`` to the given ``path`` in this jar.
:param string path: the path to write the contents to in this jar
:param string contents: the raw byte contents of the file to write to ``path``
"""
if not path or not isinstance(path, string_types):
raise ValueError('The path must be a non-empty string')
if contents is None or not isinstance(contents, binary_type):
raise ValueError('The contents must be a sequence of bytes')
self._add_entry(self.MemoryEntry(path, contents))
def _add_entry(self, entry):
if Manifest.PATH == entry.dest:
self._manifest = entry
else:
self._entries.append(entry)
def writejar(self, jar):
"""Schedules all entries from the given ``jar``'s to be added to this jar save for the manifest.
:param string jar: the path to the pre-existing jar to graft into this jar
"""
if not jar or not isinstance(jar, string_types):
raise ValueError('The jar path must be a non-empty string')
self._jars.append(jar)
@contextmanager
def _render_jar_tool_args(self):
args = []
if self._main:
args.append('-main=%s' % self._main)
if self._classpath:
args.append('-classpath=%s' % ','.join(self._classpath))
with temporary_dir() as stage_dir:
if self._manifest:
args.append('-manifest=%s' % self._manifest.materialize(stage_dir))
if self._entries:
def as_cli_entry(entry):
src = entry.materialize(stage_dir)
return '%s=%s' % (src, entry.dest) if entry.dest else src
args.append('-files=%s' % ','.join(map(as_cli_entry, self._entries)))
if self._jars:
args.append('-jars=%s' % ','.join(self._jars))
yield args
class JarTask(NailgunTask):
"""A baseclass for tasks that need to create or update jars.
All subclasses will share the same underlying nailgunned jar tool and thus benefit from fast
invocations.
"""
_CONFIG_SECTION = 'jar-tool'
@staticmethod
def _flag(bool_value):
return 'true' if bool_value else 'false'
_DUPLICATE_ACTION_TO_NAME = {
Duplicate.SKIP: 'SKIP',
Duplicate.REPLACE: 'REPLACE',
Duplicate.CONCAT: 'CONCAT',
Duplicate.FAIL: 'THROW',
}
@classmethod
def _action_name(cls, action):
name = cls._DUPLICATE_ACTION_TO_NAME.get(action)
if name is None:
raise ValueError('Unrecognized duplicate action: %s' % action)
return name
@classmethod
def register_options(cls, register):
super(JarTask, cls).register_options(register)
cls.register_jvm_tool(register, 'jar-tool')
@classmethod
def prepare(cls, options, round_manager):
super(JarTask, cls).prepare(options, round_manager)
round_manager.require_data('resources_by_target')
round_manager.require_data('classes_by_target')
def __init__(self, *args, **kwargs):
super(JarTask, self).__init__(*args, **kwargs)
self.set_distribution(jdk=True)
# TODO(John Sirois): Consider poking a hole for custom jar-tool jvm args - namely for Xmx
# control.
@property
def config_section(self):
return self._CONFIG_SECTION
@contextmanager
def open_jar(self, path, overwrite=False, compressed=True, jar_rules=None):
"""Yields a Jar that will be written when the context exits.
:param string path: the path to the jar file
:param bool overwrite: overwrite the file at ``path`` if it exists; ``False`` by default; ie:
update the pre-existing jar at ``path``
:param bool compressed: entries added to the jar should be compressed; ``True`` by default
:param jar_rules: an optional set of rules for handling jar exclusions and duplicates
"""
jar = Jar()
try:
yield jar
except jar.Error as e:
raise TaskError('Failed to write to jar at %s: %s' % (path, e))
with jar._render_jar_tool_args() as args:
if args: # Don't build an empty jar
args.append('-update=%s' % self._flag(not overwrite))
args.append('-compress=%s' % self._flag(compressed))
jar_rules = jar_rules or JarRules.default()
args.append('-default_action=%s' % self._action_name(jar_rules.default_dup_action))
skip_patterns = []
duplicate_actions = []
for rule in jar_rules.rules:
if isinstance(rule, Skip):
skip_patterns.append(rule.apply_pattern)
elif isinstance(rule, Duplicate):
duplicate_actions.append('%s=%s' % (rule.apply_pattern.pattern,
self._action_name(rule.action)))
else:
raise ValueError('Unrecognized rule: %s' % rule)
if skip_patterns:
args.append('-skip=%s' % ','.join(p.pattern for p in skip_patterns))
if duplicate_actions:
args.append('-policies=%s' % ','.join(duplicate_actions))
args.append(path)
jvm_options = self.context.config.getlist('jar-tool', 'jvm_args', default=['-Xmx64M'])
self.runjava(self.tool_classpath('jar-tool'),
'com.twitter.common.jar.tool.Main',
jvm_options=jvm_options,
args=args,
workunit_name='jar-tool',
workunit_labels=[WorkUnit.TOOL, WorkUnit.JVM, WorkUnit.NAILGUN])
class JarBuilder(AbstractClass):
"""A utility to aid in adding the classes and resources associated with targets to a jar."""
@staticmethod
def _write_agent_manifest(agent, jar):
# TODO(John Sirois): refactor an agent model to support 'Boot-Class-Path' properly.
manifest = Manifest()
manifest.addentry(Manifest.MANIFEST_VERSION, '1.0')
if agent.premain:
manifest.addentry('Premain-Class', agent.premain)
if agent.agent_class:
manifest.addentry('Agent-Class', agent.agent_class)
if agent.can_redefine:
manifest.addentry('Can-Redefine-Classes', 'true')
if agent.can_retransform:
manifest.addentry('Can-Retransform-Classes', 'true')
if agent.can_set_native_method_prefix:
manifest.addentry('Can-Set-Native-Method-Prefix', 'true')
jar.writestr(Manifest.PATH, manifest.contents())
@abstractproperty
def _context(self):
"""Implementations must supply a context."""
def add_target(self, jar, target, recursive=False):
"""Adds the classes and resources for a target to an open jar.
:param jar: An open jar to add to.
:param target: The target to add generated classes and resources for.
:param bool recursive: `True` to add classes and resources for the target's transitive
internal dependency closure.
:returns: The list of targets that actually contributed classes or resources or both to the
jar.
"""
classes_by_target = self._context.products.get_data('classes_by_target')
resources_by_target = self._context.products.get_data('resources_by_target')
targets_added = []
def add_to_jar(tgt):
target_classes = classes_by_target.get(tgt)
target_resources = []
# TODO(pl): https://github.com/pantsbuild/pants/issues/206
resource_products_on_target = resources_by_target.get(tgt)
if resource_products_on_target:
target_resources.append(resource_products_on_target)
if tgt.has_resources:
target_resources.extend(resources_by_target.get(r) for r in tgt.resources)
if target_classes or target_resources:
targets_added.append(tgt)
def add_products(target_products):
if target_products:
for root, products in target_products.rel_paths():
for prod in products:
jar.write(os.path.join(root, prod), prod)
add_products(target_classes)
for resources_target in target_resources:
add_products(resources_target)
if isinstance(tgt, JavaAgent):
self._write_agent_manifest(tgt, jar)
if recursive:
target.walk(add_to_jar)
else:
add_to_jar(target)
return targets_added
def prepare_jar_builder(self):
"""Prepares a ``JarTask.JarBuilder`` for use during ``execute``.
This method should be called during task preparation to ensure the classes and resources needed
for jarring targets are mapped by upstream tasks that generate these.
"""
class PreparedJarBuilder(self.JarBuilder):
@property
def _context(me):
return self.context
return PreparedJarBuilder()
|
|
class WatchDogException(Exception):
"""Exception raised for errors in the Watch Dog.
Attributes:
expr -- input expression in which the error occurred
msg -- explanation of the error
"""
def __init__(self, expr, msg):
self.expr = expr
self.msg = msg
class WatchDogControl():
def __init__(self, io_server , alarm_queue, remote_devices,plc_click ):
self.alarm_queue = alarm_queue
self.remote_devices = remote_devices
self.io_server = io_server
self.plc_click = plc_click
def modbus_check_mode_switches( self,*arg ):
return_value = True
for j in self.remote_devices.keys():
i = self.remote_devices[j]
if i["type"] == "CLICK" :
plc = self.plc_click[i["type"]]
if plc.check_mode_switch( self.io_server, i ) == False:
return False
return True
def modbus_read_wd_flag( self,*arg ): #variable arguments put in so that function can be called by chain flow
for j in self.remote_devices.keys():
i = self.remote_devices[j]
if i["type"] == "CLICK":
plc = self.plc_click[i["type"]]
plc.read_wd_flag( self.io_server, i )
return "CONTINUE"
def modbus_write_wd_flag( self,*arg ): #variable arguments put in so that function can be called by chain flow
for j in self.remote_devices.keys():
i = self.remote_devices[j]
if i["type"] == "CLICK":
plc = self.plc_click[i["type"]]
plc.write_wd_flag( self.io_server, i )
return "CONTINUE"
class IO_mgr():
def __init__(self,redis_server, io_server, plc_interface, remote_devices, gpio_input_devices, analog_devices, counter_devices ):
self.redis = redis_server
self.io_server = io_server
self.gpio_input_devices = gpio_input_devices
self.analog_devices = analog_devices
self.counter_devices = counter_devices
self.plc_interface = plc_interface
self.remote_devices = remote_devices
self.counter_time_ref = time.time()
def clear_gpio_in( self,*arg):
for redis_key, device in self.gpio_input_devices.items():
self.redis.hset("GPIO_IN",redis_key,0)
return "CONTINUE"
def measure_gpio_in( self ,*arg):
for redis_key, device in self.gpio_input_devices.items():
if device["type"] == "CLICK":
remote = device["remote"]
address = self.remote_devices[remote]["address"]
plc = self.plc_interface["CLICK"]
plc.measure_gpio_in( self.redis, self.io_server, address, device, redis_key )
return "CONTINUE"
def measure_gpio_in_pin( self, redis_key ):
device = self.gpio_input_devices[redis_key]
if device["type"] == "CLICK":
remote = device["remote"]
address = self.remote_devices[remote]["address"]
plc = self.plc_interface[ device["type"] ]
return plc.measure_gpio_in( self.redis, self.io_server, address, device, redis_key )
else:
return None
def measure_analog( self,*arg ):
for redis_key, device in self.analog_devices.items():
if device["type"] == "CLICK":
remote = device["remote"]
address = self.remote_devices[remote]["address"]
plc = self.plc_interface["CLICK"]
plc.measure_analog( self.redis, self.io_server, address, device,redis_key )
return "CONTINUE"
def measure_analog_pin( self, redis_key ):
device = self.analog_devices[redis_key]
if device["type"] == "CLICK":
remote = device["remote"]
address = self.remote_devices[remote]["address"]
plc = self.plc_interface[ device["type"] ]
return plc.measure_analog( self.redis, self.io_server, address, device, redis_key )
def measure_counters( self,*arg ):
deltat = time.time() - self.counter_time_ref
self.counter_time_ref = time.time()
for redis_key,device in self.counter_devices.items():
if device["type"] == "CLICK":
plc_interface = self.plc_interface["CLICK"]
remote = device["remote"]
address = self.remote_devices[ remote]["address"]
plc_interface.measure_counters( self.redis,self.io_server, address, device, redis_key, deltat )
class IrrigationIo():
def __init__(self, io_server, alarm_queue, remote_devices, master_valve_list,remote_io, plc_map ):
self.io_server = io_server
self.alarm_queue = alarm_queue
self.remote_devices = remote_devices
self.master_valve_list = master_valve_list
self.plc_map = plc_map
self.remote_io = remote_io
def disable_all_sprinklers( self,*arg ):
return_value = True
for j in self.remote_devices.keys():
i = self.remote_devices[j]
if i["type"] == "CLICK" :
self.pcl_map[i["type"]].disable_all_sprinklers( i, self.io_server )
def turn_on_master_valves( self,*arg ):
for master_valve in self.master_valve_list:
if master_valve["type"] == "CLICK":
address = remote_devices[ master_valve["remote"] ]["address"]
self.plc_map[master_valve["type"]].turn_on_master_valve( self.io_server, address, master_valve )
def turn_off_master_valves( self,*arg ):
for master_valve in self.master_valve_list:
if master_valve["type"] == "CLICK":
address = remote_devices[ master_valve["remote"] ]["address"]
self.plc_map[master_valve["type"]].turn_off_master_valve( self.io_server, address, master_valve )
def turn_on_cleaning_valves( self,*arg ):
for master_valve in self.master_valve_list:
if master_valve["type"] == "CLICK":
address = remote_devices[ master_valve["remote"] ]["address"]
self.plc_map[master_valve["type"]].turn_on_cleaning_valve( self.io_server, address, master_valve )
def turn_off_cleaning_valves( self,*arg ):
for master_valve in self.master_valve_list:
if master_valve["type"] == "CLICK":
address = remote_devices[ master_valve["remote"] ]["address"]
self.plc_map[master_valve["type"]].turn_off_cleaning_valve( self.io_server, address, master_valve )
#
# Clearing Duration counter is done through a falling edge
# going from 1 to 0 generates the edge
def clear_duration_counters( self,*arg ):
for j in self.remote_io.keys():
i = self.remote_io[j]
if i["type"] == "CLICK":
address = self.remote_devices[j]["address"]
self.plc_map[remote_dev["type"]].clear_duration_counters( i,address,self.io_server )
def load_duration_counters( self, time_duration ,*arg):
duration = (time_duration*60)+15 # convert minutes to seconds
for j in self.remote_io.keys():
i = self.remote_io[j]
if i["type"] == "CLICK":
address = self.remote_devices[j]["address"]
self.plc_map[remote_dev["type"]].load_duration_counters(i ,address,self.io_server ,duration)
def turn_off_io( self , io_setup ):
# io_setup is a list of dict { "remote":xx , "bits":[1,2,3,4]
self.turn_off_master_valves_server()
for i in io_setup:
remote = i["remote"]
remote_dev = self.remote_devices[remote]
address = remote_dev["address"]
bits = i["bits"] # list of outputs on remote to turn off
if remote_dev["type"] == "CLICK":
self.plc_map[remote_dev["type"]].turn_on_off( address, bits )
def turn_on_valve( self ,io_setup ):
for i in io_setup:
remote = io_setup["remote"]
remote_dev = self.remote_devices[remote]
address = remote_dev["address"]
bits = i["bits"] # list of outputs on remote to turn off
if remote_dev["type"] == "CLICK":
self.plc_map[remote_dev["type"]].turn_on_io( address, bits )
def turn_on_io( self ,io_setup ):
self.turn_on_master_valves_server()
for i in io_setup:
remote = i["remote"]
remote_dev = self.remote_devices[remote]
address = remote_dev["address"]
bits = i["bits"] # list of outputs on remote to turn off
if remote_dev["type"] == "CLICK":
self.plc_map[remote_dev["type"]].turn_on_io( address, bits )
if __name__ == "__main__":
import time
import redis
import modbus_UDP
import json
import base64
import os
import click
from remote_devices import *
plc_click = click.PLC_Click()
redis = redis.StrictRedis( host = "192.168.1.82", port=6379, db = 0 )
modbus_udp = modbus_UDP.ModbusUDPClient("192.168.1.84")
alarm_queue = AlarmQueue( redis )
watch_dog_control = WatchDogControl( modbus_udp, alarm_queue, remote_devices, {"CLICK":plc_click} )
irrigation_control = IrrigationIo( modbus_udp, alarm_queue, remote_devices,master_valve_list,irrigation_io, {"CLICK":plc_click} )
io_mgr = IO_mgr( redis, modbus_udp, {"CLICK":plc_click}, remote_devices, gpio_input_devices, analog_devices, counter_devices )
if watch_dog_control.modbus_check_mode_switches() == True:
print "plc with mode set properly"
else:
print "plc with mode not properly set"
print "watch dog read", watch_dog_control.modbus_read_wd_flag()
print "watch dog write",watch_dog_control.modbus_write_wd_flag()
print "watch dog read", watch_dog_control.modbus_read_wd_flag()
#print modbus_udp.read_bit(100,23)
#modbus_udp.write_bit(100,23,1)
#print modbus_udp.read_bit(100,23)
#modbus_udp.write_bit(100,23,0)
#print modbus_udp.read_bit(100,23)
#print "disable_all_sprinkers", irrigation_control.disable_all_sprinklers()
#print modbus_udp.read_bit( 100, plc_click.click_bit_address["C1"])
#print "turn on master valve"
irrigation_control.turn_on_master_valves()
irrigation_control.turn_on_cleaning_valves()
time.sleep(2)
io_mgr.measure_analog()
#irrigation_control.turn_off_master_valves()
time.sleep(2)
# irrigation_control.disable_all_sprinklers()
# irrigation_control.clear_duration_counters()
# irrigation_control.load_duration_counters( 15 )
io_mgr.clear_gpio_in()
print io_mgr.measure_gpio_in_pin( "master_valve_set_switch" )
io_mgr.measure_gpio_in()
irrigation_control.turn_off_master_valves()
irrigation_control.turn_off_cleaning_valves()
time.sleep(2)
print io_mgr.measure_analog_pin("coil_current" )
#irrigation_control.turn_on_master_valves()
#time.sleep(2)
#io_mgr.measure_analog()
#irrigation_control.turn_off_master_valves()
#time.sleep(2)
# time.sleep(2)# io_mgr.measure_analog()
io_mgr.measure_counters()
|
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
from __future__ import print_function
import numpy as np
import sys
import os
import inspect
import gc
import pytest
import functools
from distutils.version import LooseVersion
from ..util.check_environment import has_backend
skipif = pytest.mark.skipif
def SkipTest(*args, **kwargs):
"""Backport for raising SkipTest that gives a better traceback."""
__tracebackhide__ = True
import pytest
return pytest.skip(*args, **kwargs)
def _safe_rep(obj, short=False):
"""Helper for assert_* ports"""
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < 80:
return result
return result[:80] + ' [truncated]...'
def _safe_str(obj):
"""Helper for assert_* ports"""
try:
return str(obj)
except Exception:
return object.__str__(obj)
def _format_msg(msg, std_msg):
"""Helper for assert_* ports"""
if msg is None:
msg = std_msg
else:
try:
msg = '%s : %s' % (std_msg, msg)
except UnicodeDecodeError:
msg = '%s : %s' % (_safe_str(std_msg), _safe_str(msg))
return msg
def nottest(func):
"""Decorator to mark a function or method as *not* a test
"""
func.__test__ = False
return func
def assert_raises(exp, func, *args, **kwargs):
"""Backport"""
try:
func(*args, **kwargs)
except exp:
return
std_msg = '%s not raised' % (_safe_rep(exp))
raise AssertionError(_format_msg(None, std_msg))
def assert_in(member, container, msg=None):
"""Backport"""
if member in container:
return
std_msg = '%s not found in %s' % (_safe_rep(member), _safe_rep(container))
raise AssertionError(_format_msg(msg, std_msg))
def assert_true(x, msg=None):
"""Backport"""
if x:
return
std_msg = '%s is not True' % (_safe_rep(x),)
raise AssertionError(_format_msg(msg, std_msg))
def assert_equal(x, y, msg=None):
"""Backport"""
if x == y:
return
std_msg = '%s not equal to %s' % (_safe_rep(x), _safe_rep(y))
raise AssertionError(_format_msg(msg, std_msg))
def assert_not_equal(x, y, msg=None):
"""Backport"""
if x != y:
return
std_msg = '%s equal to %s' % (_safe_rep(x), _safe_rep(y))
raise AssertionError(_format_msg(msg, std_msg))
def assert_not_in(member, container, msg=None):
"""Backport"""
if member not in container:
return
std_msg = '%s found in %s' % (_safe_rep(member), _safe_rep(container))
raise AssertionError(_format_msg(msg, std_msg))
def assert_is(expr1, expr2, msg=None):
"""Backport"""
if expr1 is not expr2:
std_msg = '%s is not %s' % (_safe_rep(expr1), _safe_rep(expr2))
raise AssertionError(_format_msg(msg, std_msg))
class raises(object):
"""Helper class to test exception raising"""
def __init__(self, exc):
self.exc = exc
def __enter__(self):
return self
def __exit__(self, exc_typ, exc, tb):
if isinstance(exc, self.exc):
return True
elif exc is None:
raise AssertionError("Expected %s (no exception raised)" %
self.exc.__name__)
else:
raise AssertionError("Expected %s, got %s instead (%s)" %
(self.exc.__name__, type(exc).__name__, exc))
###############################################################################
# GL stuff
def has_pyopengl():
try:
from OpenGL import GL # noqa, analysis:ignore
except ImportError:
return False
else:
return True
def requires_pyopengl():
skip = not has_pyopengl()
return skipif(skip, reason='Requires PyOpenGL')
def requires_ssl():
bad = os.getenv('CIBW_BUILDING', 'false') == 'true'
return skipif(bad, reason='Requires proper SSL support')
###############################################################################
# App stuff
def has_application(backend=None, has=(), capable=()):
"""Determine if a suitable app backend exists"""
from ..app.backends import BACKEND_NAMES
# avoid importing other backends if we don't need to
if backend is None:
for backend in BACKEND_NAMES:
if has_backend(backend, has=has, capable=capable):
good = True
msg = backend
break
else:
good = False
msg = 'Requires application backend'
else:
good, why = has_backend(backend, has=has, capable=capable,
out=['why_not'])
if not good:
msg = 'Requires %s: %s' % (backend, why)
else:
msg = backend
return good, msg
def composed(*decs):
def deco(f):
for dec in reversed(decs):
f = dec(f)
return f
return deco
def garbage_collect(f):
# Pytest expects things like the name of the functions not to change
# Therefore, we must use the functools.wraps decorator on our deco
@functools.wraps(f)
def deco(*args, **kwargs):
gc.collect()
try:
return f(*args, **kwargs)
finally:
gc.collect()
return deco
def requires_application(backend=None, has=(), capable=(), force_gc=True):
"""Return a decorator for tests that require an application"""
good, msg = has_application(backend, has, capable)
dec_backend = skipif(not good, reason="Skipping test: %s" % msg)
try:
import pytest
except Exception:
return dec_backend
dec_app = pytest.mark.vispy_app_test
funcs = [dec_app, dec_backend]
if force_gc:
funcs.append(garbage_collect)
return composed(*funcs)
def requires_img_lib():
"""Decorator for tests that require an image library"""
from ..io import _check_img_lib
if sys.platform.startswith('win'):
has_img_lib = False # PIL breaks tests on windows (!)
else:
has_img_lib = not all(c is None for c in _check_img_lib())
return skipif(not has_img_lib, reason='imageio or PIL required')
def has_ipython(version='3.0'):
"""function that checks the presence of IPython"""
# typecast version to a string, in case an integer is given
version = str(version)
try:
import IPython # noqa
except Exception:
return False, "IPython library not found"
else:
if LooseVersion(IPython.__version__) >= LooseVersion(version):
return True, "IPython present"
else:
message = (
"current IPython version: (%s) is "
"older than expected version: (%s)") % \
(IPython.__version__, version)
return False, message
def requires_ipython(version='3.0'):
ipython_present, message = has_ipython(version)
return skipif(not ipython_present, reason=message)
def requires_numpydoc():
try:
import numpydoc # noqa
except Exception:
present = False
else:
present = True
return skipif(not present, reason='numpydoc is required')
###############################################################################
# Visuals stuff
def _has_scipy(min_version):
try:
assert isinstance(min_version, str)
import scipy # noqa, analysis:ignore
from distutils.version import LooseVersion
this_version = LooseVersion(scipy.__version__)
if this_version < min_version:
return False
except Exception:
return False
else:
return True
def requires_scipy(min_version='0.13'):
return skipif(not _has_scipy(min_version),
reason='Requires Scipy version >= %s' % min_version)
def _bad_glfw_decorate(app):
return app.backend_name == 'Glfw' and \
app.backend_module.glfw.__version__ == (3, 3, 1)
@nottest
def TestingCanvas(bgcolor='black', size=(100, 100), dpi=None, decorate=None,
**kwargs):
"""Avoid importing scene until necessary."""
# On Windows decorations can force windows to be an incorrect size
# (e.g., instead of 100x100 they will be 100x248), having no
# decorations works around this
from ..scene import SceneCanvas
class TestingCanvas(SceneCanvas):
def __init__(self, bgcolor, size, dpi, decorate, **kwargs):
self._entered = False
self._wanted_vp = None
if decorate is None:
# deal with GLFW's problems
from vispy.app import use_app
app = use_app()
if _bad_glfw_decorate(app):
decorate = True
else:
decorate = False
SceneCanvas.__init__(self, bgcolor=bgcolor, size=size,
dpi=dpi, decorate=decorate,
**kwargs)
def __enter__(self):
SceneCanvas.__enter__(self)
# sometimes our window can be larger than our requsted draw
# area (e.g. on Windows), and this messes up our tests that
# typically use very small windows. Here we "fix" it.
scale = np.array(self.physical_size) / np.array(self.size, float)
scale = int(np.round(np.mean(scale)))
self._wanted_vp = 0, 0, size[0] * scale, size[1] * scale
self.context.set_state(clear_color=self._bgcolor)
self.context.set_viewport(*self._wanted_vp)
self._entered = True
return self
def draw_visual(self, visual, event=None):
if not self._entered:
return
SceneCanvas.draw_visual(self, visual, event)
self.context.finish()
return TestingCanvas(bgcolor, size, dpi, decorate, **kwargs)
@nottest
def save_testing_image(image, location):
from ..gloo.util import _screenshot
from ..util import make_png
if image == "screenshot":
image = _screenshot(alpha=False)
with open(location + '.png', 'wb') as fid:
fid.write(make_png(image))
@nottest
def run_tests_if_main():
"""Run tests in a given file if it is run as a script"""
local_vars = inspect.currentframe().f_back.f_locals
if not local_vars.get('__name__', '') == '__main__':
return
# we are in a "__main__"
fname = local_vars['__file__']
# Run ourselves. post-mortem debugging!
try:
import faulthandler
faulthandler.enable()
except Exception:
pass
import __main__
try:
import pytest
pytest.main(['-s', '--tb=short', fname])
except ImportError:
print('==== Running tests in script\n==== %s' % fname)
run_tests_in_object(__main__)
print('==== Tests pass')
def run_tests_in_object(ob):
# Setup
for name in dir(ob):
if name.lower().startswith('setup'):
print('Calling %s' % name)
getattr(ob, name)()
# Exec
for name in sorted(dir(ob), key=lambda x: x.lower()): # consistent order
val = getattr(ob, name)
if name.startswith('_'):
continue
elif callable(val) and (name[:4] == 'test' or name[-4:] == 'test'):
print('Running test-func %s ... ' % name, end='')
try:
val()
print('ok')
except Exception as err:
if 'skiptest' in err.__class__.__name__.lower():
print('skip')
else:
raise
elif isinstance(val, type) and 'Test' in name:
print('== Running test-class %s' % name)
run_tests_in_object(val())
print('== Done with test-class %s' % name)
# Teardown
for name in dir(ob):
if name.lower().startswith('teardown'):
print('Calling %s' % name)
getattr(ob, name)()
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
from django import shortcuts
from horizon import views
from horizon.templatetags.horizon import has_permissions
class MultiTableMixin(object):
"""A generic mixin which provides methods for handling DataTables."""
data_method_pattern = "get_%s_data"
def __init__(self, *args, **kwargs):
super(MultiTableMixin, self).__init__(*args, **kwargs)
self.table_classes = getattr(self, "table_classes", [])
self._data = {}
self._tables = {}
self._data_methods = defaultdict(list)
self.get_data_methods(self.table_classes, self._data_methods)
def _get_data_dict(self):
if not self._data:
for table in self.table_classes:
data = []
name = table._meta.name
func_list = self._data_methods.get(name, [])
for func in func_list:
data.extend(func())
self._data[name] = data
return self._data
def get_data_methods(self, table_classes, methods):
for table in table_classes:
name = table._meta.name
if table._meta.mixed_data_type:
for data_type in table._meta.data_types:
func = self.check_method_exist(self.data_method_pattern,
data_type)
if func:
type_name = table._meta.data_type_name
methods[name].append(self.wrap_func(func,
type_name,
data_type))
else:
func = self.check_method_exist(self.data_method_pattern,
name)
if func:
methods[name].append(func)
def wrap_func(self, data_func, type_name, data_type):
def final_data():
data = data_func()
self.assign_type_string(data, type_name, data_type)
return data
return final_data
def check_method_exist(self, func_pattern="%s", *names):
func_name = func_pattern % names
func = getattr(self, func_name, None)
if not func or not callable(func):
cls_name = self.__class__.__name__
raise NotImplementedError("You must define a %s method "
"in %s." % (func_name, cls_name))
else:
return func
def assign_type_string(self, data, type_name, data_type):
for datum in data:
setattr(datum, type_name, data_type)
def get_tables(self):
if not self.table_classes:
raise AttributeError('You must specify one or more DataTable '
'classes for the "table_classes" attribute '
'on %s.' % self.__class__.__name__)
if not self._tables:
for table in self.table_classes:
if not has_permissions(self.request.user,
table._meta):
continue
func_name = "get_%s_table" % table._meta.name
table_func = getattr(self, func_name, None)
if table_func is None:
tbl = table(self.request, **self.kwargs)
else:
tbl = table_func(self, self.request, **self.kwargs)
self._tables[table._meta.name] = tbl
return self._tables
def get_context_data(self, **kwargs):
context = super(MultiTableMixin, self).get_context_data(**kwargs)
tables = self.get_tables()
for name, table in tables.items():
context["%s_table" % name] = table
return context
def has_prev_data(self, table):
return False
def has_more_data(self, table):
return False
def needs_filter_first(self, table):
return False
def handle_table(self, table):
name = table.name
data = self._get_data_dict()
self._tables[name].data = data[table._meta.name]
self._tables[name].needs_filter_first = \
self.needs_filter_first(table)
self._tables[name]._meta.has_more_data = self.has_more_data(table)
self._tables[name]._meta.has_prev_data = self.has_prev_data(table)
handled = self._tables[name].maybe_handle()
return handled
def get_server_filter_info(self, request, table=None):
if not table:
table = self.get_table()
filter_action = table._meta._filter_action
if filter_action is None or filter_action.filter_type != 'server':
return None
param_name = filter_action.get_param_name()
filter_string = request.POST.get(param_name)
filter_string_session = request.session.get(param_name, "")
changed = (filter_string is not None
and filter_string != filter_string_session)
if filter_string is None:
filter_string = filter_string_session
filter_field_param = param_name + '_field'
filter_field = request.POST.get(filter_field_param)
filter_field_session = request.session.get(filter_field_param)
if filter_field is None and filter_field_session is not None:
filter_field = filter_field_session
filter_info = {
'action': filter_action,
'value_param': param_name,
'value': filter_string,
'field_param': filter_field_param,
'field': filter_field,
'changed': changed
}
return filter_info
def handle_server_filter(self, request, table=None):
"""Update the table server filter information in the session.
Returns True if the filter has been changed.
"""
if not table:
table = self.get_table()
filter_info = self.get_server_filter_info(request, table)
if filter_info is None:
return False
request.session[filter_info['value_param']] = filter_info['value']
if filter_info['field_param']:
request.session[filter_info['field_param']] = filter_info['field']
return filter_info['changed']
def update_server_filter_action(self, request, table=None):
"""Update the table server side filter action.
It is done based on the current filter. The filter info may be stored
in the session and this will restore it.
"""
if not table:
table = self.get_table()
filter_info = self.get_server_filter_info(request, table)
if filter_info is not None:
action = filter_info['action']
setattr(action, 'filter_string', filter_info['value'])
if filter_info['field_param']:
setattr(action, 'filter_field', filter_info['field'])
class MultiTableView(MultiTableMixin, views.HorizonTemplateView):
"""Generic view to handle multiple DataTable classes in a single view.
Each DataTable class must be a :class:`~horizon.tables.DataTable` class
or its subclass.
Three steps are required to use this view: set the ``table_classes``
attribute with a tuple of the desired
:class:`~horizon.tables.DataTable` classes;
define a ``get_{{ table_name }}_data`` method for each table class
which returns a set of data for that table; and specify a template for
the ``template_name`` attribute.
"""
def construct_tables(self):
tables = self.get_tables().values()
# Early out before data is loaded
for table in tables:
preempted = table.maybe_preempt()
if preempted:
return preempted
# Load data into each table and check for action handlers
for table in tables:
handled = self.handle_table(table)
if handled:
return handled
# If we didn't already return a response, returning None continues
# with the view as normal.
return None
def get(self, request, *args, **kwargs):
handled = self.construct_tables()
if handled:
return handled
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
# GET and POST handling are the same
return self.get(request, *args, **kwargs)
class DataTableView(MultiTableView):
"""A class-based generic view to handle basic DataTable processing.
Three steps are required to use this view: set the ``table_class``
attribute with the desired :class:`~horizon.tables.DataTable` class;
define a ``get_data`` method which returns a set of data for the
table; and specify a template for the ``template_name`` attribute.
Optionally, you can override the ``has_more_data`` method to trigger
pagination handling for APIs that support it.
"""
table_class = None
context_object_name = 'table'
template_name = 'horizon/common/_data_table_view.html'
def _get_data_dict(self):
if not self._data:
self.update_server_filter_action(self.request)
self._data = {self.table_class._meta.name: self.get_data()}
return self._data
def get_data(self):
return []
def get_tables(self):
if not self._tables:
self._tables = {}
if has_permissions(self.request.user,
self.table_class._meta):
self._tables[self.table_class._meta.name] = self.get_table()
return self._tables
def get_table(self):
# Note: this method cannot be easily memoized, because get_context_data
# uses its cached value directly.
if not self.table_class:
raise AttributeError('You must specify a DataTable class for the '
'"table_class" attribute on %s.'
% self.__class__.__name__)
if not hasattr(self, "table"):
self.table = self.table_class(self.request, **self.kwargs)
return self.table
def get_context_data(self, **kwargs):
context = super(DataTableView, self).get_context_data(**kwargs)
if hasattr(self, "table"):
context[self.context_object_name] = self.table
return context
def post(self, request, *args, **kwargs):
# If the server side table filter changed then go back to the first
# page of data. Otherwise GET and POST handling are the same.
if self.handle_server_filter(request):
return shortcuts.redirect(self.get_table().get_absolute_url())
return self.get(request, *args, **kwargs)
def get_filters(self, filters=None, filters_map=None):
"""Converts a string given by the user into a valid api filter value.
:filters: Default filter values.
{'filter1': filter_value, 'filter2': filter_value}
:filters_map: mapping between user input and valid api filter values.
{'filter_name':{_("true_value"):True, _("false_value"):False}
"""
filters = filters or {}
filters_map = filters_map or {}
filter_action = self.table._meta._filter_action
if filter_action:
filter_field = self.table.get_filter_field()
if filter_action.is_api_filter(filter_field):
filter_string = self.table.get_filter_string().strip()
if filter_field and filter_string:
filter_map = filters_map.get(filter_field, {})
# We use the filter_string given by the user and
# look for valid values in the filter_map that's why
# we apply lower()
filters[filter_field] = filter_map.get(
filter_string.lower(), filter_string)
return filters
class MixedDataTableView(DataTableView):
"""A class-based generic view to handle DataTable with mixed data types.
Basic usage is the same as DataTableView.
Three steps are required to use this view:
#. Set the ``table_class`` attribute with desired
:class:`~horizon.tables.DataTable` class. In the class the
``data_types`` list should have at least two elements.
#. Define a ``get_{{ data_type }}_data`` method for each data type
which returns a set of data for the table.
#. Specify a template for the ``template_name`` attribute.
"""
table_class = None
context_object_name = 'table'
def _get_data_dict(self):
if not self._data:
table = self.table_class
self._data = {table._meta.name: []}
for data_type in table.data_types:
func_name = "get_%s_data" % data_type
data_func = getattr(self, func_name, None)
if data_func is None:
cls_name = self.__class__.__name__
raise NotImplementedError("You must define a %s method "
"for %s data type in %s." %
(func_name, data_type, cls_name))
data = data_func()
self.assign_type_string(data, data_type)
self._data[table._meta.name].extend(data)
return self._data
def assign_type_string(self, data, type_string):
for datum in data:
setattr(datum, self.table_class.data_type_name,
type_string)
def get_table(self):
self.table = super(MixedDataTableView, self).get_table()
if not self.table._meta.mixed_data_type:
raise AttributeError('You must have at least two elements in '
'the data_types attribute '
'in table %s to use MixedDataTableView.'
% self.table._meta.name)
return self.table
class PagedTableMixin(object):
def __init__(self, *args, **kwargs):
super(PagedTableMixin, self).__init__(*args, **kwargs)
self._has_prev_data = False
self._has_more_data = False
def has_prev_data(self, table):
return self._has_prev_data
def has_more_data(self, table):
return self._has_more_data
def _get_marker(self):
try:
meta = self.table_class._meta
except AttributeError:
meta = self.table_classes[0]._meta
prev_marker = self.request.GET.get(meta.prev_pagination_param, None)
if prev_marker:
return prev_marker, "asc"
else:
marker = self.request.GET.get(meta.pagination_param, None)
if marker:
return marker, "desc"
return None, "desc"
|
|
"""Functions to visualize matrices of data."""
import itertools
import colorsys
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import pandas as pd
from scipy.spatial import distance
from scipy.cluster import hierarchy
from .axisgrid import Grid
from .palettes import cubehelix_palette
from .utils import despine, axis_ticklabels_overlap
from .external.six.moves import range
def _index_to_label(index):
"""Convert a pandas index or multiindex to an axis label
"""
if isinstance(index, pd.MultiIndex):
return "-".join(map(str, index.names))
else:
return index.name
def _index_to_ticklabels(index):
"""Convert a pandas index or multiindex into ticklabels
"""
if isinstance(index, pd.MultiIndex):
return ["-".join(map(str, i)) for i in index.values]
else:
return index.values
class _HeatMapper(object):
"""Draw a heatmap plot of a matrix with nice labels and colormaps."""
def __init__(self, data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cbar, cbar_kws,
xticklabels=True, yticklabels=True, mask=None):
"""Initialize the plotting object."""
# We always want to have a DataFrame with semantic information
# and an ndarray to pass to matplotlib
if isinstance(data, pd.DataFrame):
plot_data = data.values
else:
plot_data = np.asarray(data)
data = pd.DataFrame(plot_data)
# Reverse the rows so the plot looks like the matrix
plot_data = plot_data[::-1]
data = data.ix[::-1]
plot_data = np.ma.masked_where(mask, plot_data)
# Get good names for the rows and columns
if isinstance(xticklabels, bool) and xticklabels:
self.xticklabels = _index_to_ticklabels(data.columns)
elif isinstance(xticklabels, bool) and not xticklabels:
self.xticklabels = ['' for _ in range(data.shape[1])]
else:
self.xticklabels = xticklabels
xlabel = _index_to_label(data.columns)
if isinstance(yticklabels, bool) and yticklabels:
self.yticklabels = _index_to_ticklabels(data.index)
elif isinstance(yticklabels, bool) and not yticklabels:
self.yticklabels = ['' for _ in range(data.shape[0])]
else:
self.yticklabels = yticklabels[::-1]
ylabel = _index_to_label(data.index)
# Get good names for the axis labels
self.xlabel = xlabel if xlabel is not None else ""
self.ylabel = ylabel if ylabel is not None else ""
# Determine good default values for the colormapping
self._determine_cmap_params(plot_data, vmin, vmax,
cmap, center, robust)
# Save other attributes to the object
self.data = data
self.plot_data = plot_data
self.annot = annot
self.fmt = fmt
self.annot_kws = {} if annot_kws is None else annot_kws
self.cbar = cbar
self.cbar_kws = {} if cbar_kws is None else cbar_kws
def _determine_cmap_params(self, plot_data, vmin, vmax,
cmap, center, robust):
"""Use some heuristics to set good defaults for colorbar and range."""
calc_data = plot_data.data[~np.isnan(plot_data.data)]
if vmin is None:
vmin = np.percentile(calc_data, 2) if robust else calc_data.min()
if vmax is None:
vmax = np.percentile(calc_data, 98) if robust else calc_data.max()
# Simple heuristics for whether these data should have a divergent map
divergent = ((vmin < 0) and (vmax > 0)) or center is not None
# Now set center to 0 so math below makes sense
if center is None:
center = 0
# A divergent map should be symmetric around the center value
if divergent:
vlim = max(abs(vmin - center), abs(vmax - center))
vmin, vmax = -vlim, vlim
self.divergent = divergent
# Now add in the centering value and set the limits
vmin += center
vmax += center
self.vmin = vmin
self.vmax = vmax
# Choose default colormaps if not provided
if cmap is None:
if divergent:
self.cmap = "RdBu_r"
else:
self.cmap = cubehelix_palette(light=.95, as_cmap=True)
else:
self.cmap = cmap
def _annotate_heatmap(self, ax, mesh):
"""Add textual labels with the value in each cell."""
xpos, ypos = np.meshgrid(ax.get_xticks(), ax.get_yticks())
for x, y, val, color in zip(xpos.flat, ypos.flat,
mesh.get_array(), mesh.get_facecolors()):
_, l, _ = colorsys.rgb_to_hls(*color[:3])
text_color = ".15" if l > .5 else "w"
val = ("{:" + self.fmt + "}").format(val)
ax.text(x, y, val, color=text_color,
ha="center", va="center", **self.annot_kws)
def plot(self, ax, cax, kws):
"""Draw the heatmap on the provided Axes."""
# Remove all the Axes spines
despine(ax=ax, left=True, bottom=True)
# Draw the heatmap
mesh = ax.pcolormesh(self.plot_data, vmin=self.vmin, vmax=self.vmax,
cmap=self.cmap, **kws)
# Set the axis limits
ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))
# Add row and column labels
nx, ny = self.data.T.shape
ax.set(xticks=np.arange(nx) + .5, yticks=np.arange(ny) + .5)
xtl = ax.set_xticklabels(self.xticklabels)
ytl = ax.set_yticklabels(self.yticklabels, rotation="vertical")
# Possibly rotate them if they overlap
plt.draw()
if axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation="vertical")
if axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation="horizontal")
# Add the axis labels
ax.set(xlabel=self.xlabel, ylabel=self.ylabel)
# Annotate the cells with the formatted values
if self.annot:
self._annotate_heatmap(ax, mesh)
# Possibly add a colorbar
if self.cbar:
ticker = mpl.ticker.MaxNLocator(6)
cb = ax.figure.colorbar(mesh, cax, ax,
ticks=ticker, **self.cbar_kws)
cb.outline.set_linewidth(0)
def heatmap(data, vmin=None, vmax=None, cmap=None, center=None, robust=False,
annot=False, fmt=".2g", annot_kws=None,
linewidths=.5, linecolor="white",
cbar=True, cbar_kws=None, cbar_ax=None,
square=False, ax=None, xticklabels=True, yticklabels=True,
mask=None,
**kwargs):
"""Plot rectangular data as a color-encoded matrix.
This function tries to infer a good colormap to use from the data, but
this is not guaranteed to work, so take care to make sure the kind of
colormap (sequential or diverging) and its limits are appropriate.
This is an Axes-level function and will draw the heatmap into the
currently-active Axes if none is provided to the ``ax`` argument. Part of
this Axes space will be taken and used to plot a colormap, unless ``cbar``
is False or a separate Axes is provided to ``cbar_ax``.
Parameters
----------
data : rectangular dataset
2D dataset that can be coerced into an ndarray. If a Pandas DataFrame
is provided, the index/column information will be used to label the
columns and rows.
vmin, vmax : floats, optional
Values to anchor the colormap, otherwise they are inferred from the
data and other keyword arguments. When a diverging dataset is inferred,
one of these values may be ignored.
cmap : matplotlib colormap name or object, optional
The mapping from data values to color space. If not provided, this
will be either a cubehelix map (if the function infers a sequential
dataset) or ``RdBu_r`` (if the function infers a diverging dataset).
center : float, optional
The value at which to center the colormap. Passing this value implies
use of a diverging colormap.
robust : bool, optional
If True and ``vmin`` or ``vmax`` are absent, the colormap range is
computed with robust quantiles instead of the extreme values.
annot : bool, optional
If True, write the data value in each cell.
fmt : string, optional
String formatting code to use when ``annot`` is True.
annot_kws : dict of key, value mappings, optional
Keyword arguments for ``ax.text`` when ``annot`` is True.
linewidths : float, optional
Width of the lines that divide each cell.
linecolor : color, optional
Color of the lines that divide each cell.
cbar : boolean, optional
Whether to draw a colorbar.
cbar_kws : dict of key, value mappings, optional
Keyword arguments for `fig.colorbar`.
cbar_ax : matplotlib Axes, optional
Axes in which to draw the colorbar, otherwise take space from the
main Axes.
square : boolean, optional
If True, set the Axes aspect to "equal" so each cell will be
square-shaped.
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active
Axes.
xtickabels : list-like or bool, optional
If True, plot the column names of the dataframe. If False, don't plot
the column names. If list-like, plot these alternate labels as the
xticklabels
yticklabels : list-like or bool, optional
If True, plot the row names of the dataframe. If False, don't plot
the row names. If list-like, plot these alternate labels as the
yticklabels
kwargs : other keyword arguments
All other keyword arguments are passed to ``ax.pcolormesh``.
Returns
-------
ax : matplotlib Axes
Axes object with the heatmap.
"""
# Initialize the plotter object
plotter = _HeatMapper(data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cbar, cbar_kws, xticklabels, yticklabels,
mask)
# Add the pcolormesh kwargs here
kwargs["linewidths"] = linewidths
kwargs["edgecolor"] = linecolor
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect("equal")
plotter.plot(ax, cbar_ax, kwargs)
return ax
class _DendrogramPlotter(object):
"""Object for drawing tree of similarities between data rows/columns"""
def __init__(self, data, linkage, metric, method, axis, label, rotate):
"""Plot a dendrogram of the relationships between the columns of data
Parameters
----------
data : pandas.DataFrame
Rectangular data
"""
self.axis = axis
if self.axis == 1:
data = data.T
if isinstance(data, pd.DataFrame):
array = data.values
else:
array = np.asarray(data)
data = pd.DataFrame(array)
self.array = array
self.data = data
self.shape = self.data.shape
self.metric = metric
self.method = method
self.axis = axis
self.label = label
self.rotate = rotate
if linkage is None:
self.linkage = self.calculated_linkage
else:
self.linkage = linkage
self.dendrogram = self.calculate_dendrogram()
# Dendrogram ends are always at multiples of 5, who knows why
ticks = 10 * np.arange(self.data.shape[0]) + 5
if self.label:
ticklabels = _index_to_ticklabels(self.data.index)
ticklabels = [ticklabels[i] for i in self.reordered_ind]
if self.rotate:
self.xticks = []
self.yticks = ticks
self.xticklabels = []
self.yticklabels = ticklabels
self.ylabel = _index_to_label(self.data.index)
self.xlabel = ''
else:
self.xticks = ticks
self.yticks = []
self.xticklabels = ticklabels
self.yticklabels = []
self.ylabel = ''
self.xlabel = _index_to_label(self.data.index)
else:
self.xticks, self.yticks = [], []
self.yticklabels, self.xticklabels = [], []
self.xlabel, self.ylabel = '', ''
if self.rotate:
self.X = self.dendrogram['dcoord']
self.Y = self.dendrogram['icoord']
else:
self.X = self.dendrogram['icoord']
self.Y = self.dendrogram['dcoord']
def _calculate_linkage_scipy(self):
if np.product(self.shape) >= 10000:
UserWarning('This will be slow... (gentle suggestion: '
'"pip install fastcluster")')
pairwise_dists = distance.squareform(
distance.pdist(self.array, metric=self.metric))
linkage = hierarchy.linkage(pairwise_dists, method=self.method)
del pairwise_dists
return linkage
def _calculate_linkage_fastcluster(self):
import fastcluster
# Fastcluster has a memory-saving vectorized version, but only
# with certain linkage methods, and mostly with euclidean metric
vector_methods = ('single', 'centroid', 'median', 'ward')
euclidean_methods = ('centroid', 'median', 'ward')
euclidean = self.metric == 'euclidean' and self.method in \
euclidean_methods
if euclidean or self.method == 'single':
return fastcluster.linkage_vector(self.array,
method=self.method,
metric=self.metric)
else:
pairwise_dists = distance.pdist(self.array, metric=self.metric)
linkage = fastcluster.linkage(pairwise_dists, method=self.method)
del pairwise_dists
return linkage
@property
def calculated_linkage(self):
try:
return self._calculate_linkage_fastcluster()
except ImportError:
return self._calculate_linkage_scipy()
def calculate_dendrogram(self):
"""Calculates a dendrogram based on the linkage matrix
Made a separate function, not a property because don't want to
recalculate the dendrogram every time it is accessed.
Returns
-------
dendrogram : dict
Dendrogram dictionary as returned by scipy.cluster.hierarchy
.dendrogram. The important key-value pairing is
"reordered_ind" which indicates the re-ordering of the matrix
"""
return hierarchy.dendrogram(self.linkage, no_plot=True,
color_list=['k'], color_threshold=-np.inf)
@property
def reordered_ind(self):
"""Indices of the matrix, reordered by the dendrogram"""
return self.dendrogram['leaves']
def plot(self, ax):
"""Plots a dendrogram of the similarities between data on the axes
Parameters
----------
ax : matplotlib.axes.Axes
Axes object upon which the dendrogram is plotted
"""
for x, y in zip(self.X, self.Y):
ax.plot(x, y, color='k', linewidth=.5)
if self.rotate and self.axis == 0:
ax.invert_xaxis()
ax.yaxis.set_ticks_position('right')
ymax = min(map(min, self.Y)) + max(map(max, self.Y))
ax.set_ylim(0, ymax)
ax.invert_yaxis()
else:
xmax = min(map(min, self.X)) + max(map(max, self.X))
ax.set_xlim(0, xmax)
despine(ax=ax, bottom=True, left=True)
ax.set(xticks=self.xticks, yticks=self.yticks,
xlabel=self.xlabel, ylabel=self.ylabel)
xtl = ax.set_xticklabels(self.xticklabels)
ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')
# Force a draw of the plot to avoid matplotlib window error
plt.draw()
if len(ytl) > 0 and axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation="horizontal")
if len(xtl) > 0 and axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation="vertical")
return self
def dendrogram(data, linkage=None, axis=1, label=True, metric='euclidean',
method='average', rotate=False, ax=None):
"""Draw a tree diagram of relationships within a matrix
Parameters
----------
data : pandas.DataFrame
Rectangular data
linkage : numpy.array, optional
Linkage matrix
axis : int, optional
Which axis to use to calculate linkage. 0 is rows, 1 is columns.
label : bool, optional
If True, label the dendrogram at leaves with column or row names
metric : str, optional
Distance metric. Anything valid for scipy.spatial.distance.pdist
method : str, optional
Linkage method to use. Anything valid for
scipy.cluster.hierarchy.linkage
rotate : bool, optional
When plotting the matrix, whether to rotate it 90 degrees
counter-clockwise, so the leaves face right
ax : matplotlib axis, optional
Axis to plot on, otherwise uses current axis
Returns
-------
dendrogramplotter : _DendrogramPlotter
A Dendrogram plotter object.
Notes
-----
Access the reordered dendrogram indices with
dendrogramplotter.reordered_ind
"""
plotter = _DendrogramPlotter(data, linkage=linkage, axis=axis,
metric=metric, method=method,
label=label, rotate=rotate)
if ax is None:
ax = plt.gca()
return plotter.plot(ax=ax)
class ClusterGrid(Grid):
def __init__(self, data, pivot_kws=None, z_score=None, standard_scale=None,
figsize=None, row_colors=None, col_colors=None):
"""Grid object for organizing clustered heatmap input on to axes"""
if isinstance(data, pd.DataFrame):
self.data = data
else:
self.data = pd.DataFrame(data)
self.data2d = self.format_data(self.data, pivot_kws, z_score,
standard_scale)
if figsize is None:
width, height = 10, 10
figsize = (width, height)
self.fig = plt.figure(figsize=figsize)
to_rgb = mpl.colors.colorConverter.to_rgb
if row_colors is not None:
row_colors = list(map(to_rgb, row_colors))
self.row_colors = row_colors
if col_colors is not None:
col_colors = list(map(to_rgb, col_colors))
self.col_colors = col_colors
width_ratios = self.dim_ratios(self.row_colors,
figsize=figsize,
axis=1)
height_ratios = self.dim_ratios(self.col_colors,
figsize=figsize,
axis=0)
nrows = 3 if self.col_colors is None else 4
ncols = 3 if self.row_colors is None else 4
self.gs = gridspec.GridSpec(nrows, ncols, wspace=0.01, hspace=0.01,
width_ratios=width_ratios,
height_ratios=height_ratios)
self.ax_row_dendrogram = self.fig.add_subplot(self.gs[nrows - 1, 0:2],
axisbg="white")
self.ax_col_dendrogram = self.fig.add_subplot(self.gs[0:2, ncols - 1],
axisbg="white")
self.ax_row_colors = None
self.ax_col_colors = None
if self.row_colors is not None:
self.ax_row_colors = self.fig.add_subplot(
self.gs[nrows - 1, ncols - 2])
if self.col_colors is not None:
self.ax_col_colors = self.fig.add_subplot(
self.gs[nrows - 2, ncols - 1])
self.ax_heatmap = self.fig.add_subplot(self.gs[nrows - 1, ncols - 1])
# colorbar for scale to left corner
self.cax = self.fig.add_subplot(self.gs[0, 0])
self.dendrogram_row = None
self.dendrogram_col = None
def format_data(self, data, pivot_kws, z_score=None,
standard_scale=None):
"""Extract variables from data or use directly."""
# Either the data is already in 2d matrix format, or need to do a pivot
if pivot_kws is not None:
data2d = data.pivot(**pivot_kws)
else:
data2d = data
if z_score is not None and standard_scale is not None:
raise ValueError(
'Cannot perform both z-scoring and standard-scaling on data')
if z_score is not None:
data2d = self.z_score(data2d, z_score)
if standard_scale is not None:
data2d = self.standard_scale(data2d, standard_scale)
return data2d
@staticmethod
def z_score(data2d, axis=1):
"""Standarize the mean and variance of the data axis
Parameters
----------
data2d : pandas.DataFrame
Data to normalize
axis : int
Which axis to normalize across. If 0, normalize across rows, if 1,
normalize across columns.
Returns
-------
normalized : pandas.DataFrame
Noramlized data with a mean of 0 and variance of 1 across the
specified axis.
"""
if axis == 1:
z_scored = data2d
else:
z_scored = data2d.T
z_scored = (z_scored - z_scored.mean()) / z_scored.var()
if axis == 1:
return z_scored
else:
return z_scored.T
@staticmethod
def standard_scale(data2d, axis=1):
"""Divide the data by the difference between the max and min
Parameters
----------
data2d : pandas.DataFrame
Data to normalize
axis : int
Which axis to normalize across. If 0, normalize across rows, if 1,
normalize across columns.
vmin : int
If 0, then subtract the minimum of the data before dividing by
the range.
Returns
-------
standardized : pandas.DataFrame
Noramlized data with a mean of 0 and variance of 1 across the
specified axis.
>>> import numpy as np
>>> d = np.arange(5, 8, 0.5)
>>> ClusterGrid.standard_scale(d)
array([ 0. , 0.2, 0.4, 0.6, 0.8, 1. ])
"""
# Normalize these values to range from 0 to 1
if axis == 1:
standardized = data2d
else:
standardized = data2d.T
subtract = standardized.min()
standardized = (standardized - subtract) / (
standardized.max() - standardized.min())
if axis == 1:
return standardized
else:
return standardized.T
def dim_ratios(self, side_colors, axis, figsize, side_colors_ratio=0.05):
"""Get the proportions of the figure taken up by each axes
"""
figdim = figsize[axis]
# Get resizing proportion of this figure for the dendrogram and
# colorbar, so only the heatmap gets bigger but the dendrogram stays
# the same size.
dendrogram = min(2. / figdim, .2)
# add the colorbar
colorbar_width = .8 * dendrogram
colorbar_height = .2 * dendrogram
if axis == 0:
ratios = [colorbar_width, colorbar_height]
else:
ratios = [colorbar_height, colorbar_width]
if side_colors is not None:
# Add room for the colors
ratios += [side_colors_ratio]
# Add the ratio for the heatmap itself
ratios += [.8]
return ratios
@staticmethod
def color_list_to_matrix_and_cmap(colors, ind, axis=0):
"""Turns a list of colors into a numpy matrix and matplotlib colormap
These arguments can now be plotted using heatmap(matrix, cmap)
and the provided colors will be plotted.
Parameters
----------
colors : list of matplotlib colors
Colors to label the rows or columns of a dataframe.
ind : list of ints
Ordering of the rows or columns, to reorder the original colors
by the clustered dendrogram order
axis : int
Which axis this is labeling
Returns
-------
matrix : numpy.array
A numpy array of integer values, where each corresponds to a color
from the originally provided list of colors
cmap : matplotlib.colors.ListedColormap
"""
# check for nested lists/color palettes.
# Will fail if matplotlib color is list not tuple
if any(issubclass(type(x), list) for x in colors):
all_colors = set(itertools.chain(*colors))
n = len(colors)
m = len(colors[0])
else:
all_colors = set(colors)
n = 1
m = len(colors)
colors = [colors]
color_to_value = dict((col, i) for i, col in enumerate(all_colors))
matrix = np.array([color_to_value[c]
for color in colors for c in color])
shape = (n, m)
matrix = matrix.reshape(shape)
matrix = matrix[:, ind]
if axis == 0:
# row-side:
matrix = matrix.T
cmap = mpl.colors.ListedColormap(all_colors)
return matrix, cmap
def savefig(self, *args, **kwargs):
if 'bbox_inches' not in kwargs:
kwargs['bbox_inches'] = 'tight'
self.fig.savefig(*args, **kwargs)
def plot_dendrograms(self, row_cluster, col_cluster, metric, method,
row_linkage, col_linkage):
# Plot the row dendrogram
if row_cluster:
self.dendrogram_row = dendrogram(
self.data2d, metric=metric, method=method, label=False, axis=0,
ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage)
else:
self.ax_row_dendrogram.set_xticks([])
self.ax_row_dendrogram.set_yticks([])
# PLot the column dendrogram
if col_cluster:
self.dendrogram_col = dendrogram(
self.data2d, metric=metric, method=method, label=False,
axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage)
else:
self.ax_col_dendrogram.set_xticks([])
self.ax_col_dendrogram.set_yticks([])
despine(ax=self.ax_row_dendrogram, bottom=True, left=True)
despine(ax=self.ax_col_dendrogram, bottom=True, left=True)
def plot_colors(self, **kws):
"""Plots color labels between the dendrogram and the heatmap
Parameters
----------
heatmap_kws : dict
Keyword arguments heatmap
"""
# Remove any custom colormap
kws = kws.copy()
kws.pop('cmap', None)
if self.row_colors is not None:
matrix, cmap = self.color_list_to_matrix_and_cmap(
self.row_colors, self.dendrogram_row.reordered_ind, axis=0)
heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,
xticklabels=False, yticklabels=False,
**kws)
else:
despine(self.ax_row_colors, left=True, bottom=True)
if self.col_colors is not None:
matrix, cmap = self.color_list_to_matrix_and_cmap(
self.col_colors, self.dendrogram_col.reordered_ind, axis=1)
heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,
xticklabels=False, yticklabels=False,
**kws)
else:
despine(self.ax_col_colors, left=True, bottom=True)
def plot_matrix(self, colorbar_kws, mask, **kws):
try:
xind = self.dendrogram_col.reordered_ind
except AttributeError:
xind = np.arange(self.data2d.shape[1])
try:
yind = self.dendrogram_row.reordered_ind
except AttributeError:
yind = np.arange(self.data2d.shape[0])
self.data2d = self.data2d.iloc[yind, xind]
heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.cax,
cbar_kws=colorbar_kws, mask=mask, **kws)
self.ax_heatmap.yaxis.set_ticks_position('right')
self.ax_heatmap.yaxis.set_label_position('right')
def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,
row_linkage, col_linkage, mask, **kws):
colorbar_kws = {} if colorbar_kws is None else colorbar_kws
self.plot_dendrograms(row_cluster, col_cluster, metric, method,
row_linkage=row_linkage, col_linkage=col_linkage)
self.plot_colors(**kws)
self.plot_matrix(colorbar_kws, mask, **kws)
return self
def clustermap(data, pivot_kws=None, method='average', metric='euclidean',
z_score=None, standard_scale=None, figsize=None, cbar_kws=None,
row_cluster=True, col_cluster=True,
row_linkage=None, col_linkage=None,
row_colors=None, col_colors=None, mask=None, **kwargs):
"""Plot a hierarchically clustered heatmap of a pandas DataFrame
Parameters
----------
data: pandas.DataFrame
Rectangular data for clustering. Cannot contain NAs.
pivot_kws : dict, optional
If `data` is a tidy dataframe, can provide keyword arguments for
pivot to create a rectangular dataframe.
method : str, optional
Linkage method to use for calculating clusters.
See scipy.cluster.hierarchy.linkage documentation for more information:
http://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
metric : str, optional
Distance metric to use for the data. See
scipy.spatial.distance.pdist documentation for more options
http://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.pdist.html
z_score : int or None, optional
Either 0 (rows) or 1 (columns). Whether or not to calculate z-scores
for the rows or the columns. Z scores are: z = (x - mean)/std, so
values in each row (column) will get the mean of the row (column)
subtracted, then divided by the standard deviation of the row (column).
This ensures that each row (column) has mean of 0 and variance of 1.
standard_scale : int or None, optional
Either 0 (rows) or 1 (columns). Whether or not to standardize that
dimension, meaning for each row or column, subtract the minimum and
divide each by its maximum.
figsize: tuple of two ints, optional
Size of the figure to create.
cbar_kws : dict, optional
Keyword arguments to pass to ``cbar_kws`` in ``heatmap``, e.g. to
add a label to the colorbar.
{row,col}_cluster : bool, optional
If True, cluster the {rows, columns}.
{row,col}_linkage : numpy.array, optional
Precomputed linkage matrix for the rows or columns. See
scipy.cluster.hierarchy.linkage for specific formats.
{row,col}_colors : list-like, optional
List of colors to label for either the rows or columns. Useful to
evaluate whether samples within a group are clustered together. Can
use nested lists for multiple color levels of labeling.
mask : boolean numpy.array, optional
A boolean array indicating where to mask the data so it is not
plotted on the heatmap. Only used for visualizing, not for calculating.
kwargs : other keyword arguments
All other keyword arguments are passed to ``sns.heatmap``
Returns
-------
clustergrid : ClusterGrid
A ClusterGrid instance.
Notes
----
The returned object has a ``savefig`` method that should be used if you
want to save the figure object without clipping the dendrograms.
To access the reordered row indices, use:
``clustergrid.dendrogram_row.reordered_ind``
Column indices, use:
``clustergrid.dendrogram_col.reordered_ind``
"""
plotter = ClusterGrid(data, pivot_kws=pivot_kws, figsize=figsize,
row_colors=row_colors, col_colors=col_colors,
z_score=z_score, standard_scale=standard_scale)
return plotter.plot(metric=metric, method=method,
colorbar_kws=cbar_kws,
row_cluster=row_cluster, col_cluster=col_cluster,
row_linkage=row_linkage, col_linkage=col_linkage,
mask=mask,
**kwargs)
|
|
#===============================================================================
# Copyright 2009 Matt Chaput
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
"""
The classes in this module encode and decode posting information for a field.
The field format essentially determines what information is stored about each
occurance of a term.
"""
from collections import defaultdict
from struct import pack, unpack, calcsize
from cStringIO import StringIO
from whoosh.analysis import unstopped
from whoosh.system import _INT_SIZE, _USHORT_SIZE, _FLOAT_SIZE
from whoosh.util import varint, read_varint, float_to_byte, byte_to_float
# Format base class
class Format(object):
"""Abstract base class representing a storage format for a field or vector.
Format objects are responsible for writing and reading the low-level
representation of a field. It controls what kind/level of information
to store about the indexed fields.
"""
posting_size = -1
textual = True
__inittypes__ = dict(analyzer=object, field_boost=float)
def __init__(self, analyzer, field_boost = 1.0, **options):
"""
:param analyzer: The analysis.Analyzer object to use to index this field.
See the analysis module for more information. If this value
is None, the field is not indexed/searchable.
:param field_boost: A constant boost factor to scale to the score
of all queries matching terms in this field.
"""
self.analyzer = analyzer
self.field_boost = field_boost
self.options = options
def __eq__(self, other):
return self.__class__ is other.__class__ and self.__dict__ == other.__dict__
def __repr__(self):
return "%s(%r, boost = %s)" % (self.__class__.__name__,
self.analyzer, self.field_boost)
def clean(self):
if self.analyzer and hasattr(self.analyzer, "clean"):
self.analyzer.clean()
def word_values(self, value, **kwargs):
"""Takes the text value to be indexed and yields a series of
("tokentext", frequency, valuestring) tuples, where frequency is the
number of times "tokentext" appeared in the value, and valuestring is
encoded field-specific posting value for the token. For example, in a
Frequency format, the value string would be the same as frequency; in
a Positions format, the value string would encode a list of token
positions at which "tokentext" occured.
:param value: The unicode text to index.
"""
raise NotImplementedError
def analyze(self, unicodestring, mode='', **kwargs):
"""Returns a :class:`whoosh.analysis.Token` iterator from the given
unicode string.
:param unicodestring: the string to analyzer.
:param mode: a string indicating the purpose for which the unicode
string is being analyzed, i.e. 'index' or 'query'.
"""
if not self.analyzer:
raise Exception("%s format has no analyzer" % self.__class__)
return self.analyzer(unicodestring, mode=mode, **kwargs)
def encode(self, value):
"""Returns the given value encoded as a string.
"""
raise NotImplementedError
def supports(self, name):
"""Returns True if this format supports interpreting its posting
value as 'name' (e.g. "frequency" or "positions").
"""
return hasattr(self, "decode_" + name)
def decoder(self, name):
"""Returns the bound method for interpreting value as 'name',
where 'name' is for example "frequency" or "positions". This
object must have a corresponding Format.decode_<name>() method.
"""
return getattr(self, "decode_" + name)
def decode_as(self, astype, valuestring):
"""Interprets the encoded value string as 'astype', where 'astype' is
for example "frequency" or "positions". This object must have a
corresponding decode_<astype>() method.
"""
return self.decoder(astype)(valuestring)
# Concrete field classes
class Existence(Format):
"""Only indexes whether a given term occurred in a given document; it does
not store frequencies or positions. This is useful for fields that should be
searchable but not scorable, such as file path.
Supports: frequency, weight (always reports frequency = 1).
"""
posting_size = 0
__inittypes__ = dict(analyzer=object, field_boost=float)
def __init__(self, analyzer, field_boost = 1.0, **options):
self.analyzer = analyzer
self.field_boost = field_boost
self.options = options
def word_values(self, value, **kwargs):
return ((w, 1, '') for w
in set(t.text for t in unstopped(self.analyzer(value, **kwargs))))
def encode(self, value):
return ''
def decode_frequency(self, valuestring):
return 1
def decode_weight(self, valuestring):
return self.field_boost
class Frequency(Format):
"""Stores frequency information for each posting.
Supports: frequency, weight.
"""
posting_size = _INT_SIZE
__inittypes__ = dict(analyzer=object, field_boost=float, boost_as_freq=bool)
def __init__(self, analyzer, field_boost = 1.0, boost_as_freq = False, **options):
"""
:param analyzer: The analysis.Analyzer object to use to index this field.
See the analysis module for more information. If this value
is None, the field is not indexed/searchable.
:param field_boost: A constant boost factor to scale to the score
of all queries matching terms in this field.
:param boost_as_freq: if True, take the integer value of each token's
boost attribute and use it as the token's frequency.
"""
self.analyzer = analyzer
self.field_boost = field_boost
self.boost_as_freq = boost_as_freq
self.options = options
def word_values(self, value, **kwargs):
seen = defaultdict(int)
if self.boost_as_freq:
for t in unstopped(self.analyzer(value, boosts = True, **kwargs)):
seen[t.text] += int(t.boost)
else:
for t in unstopped(self.analyzer(value, **kwargs)):
seen[t.text] += 1
encode = self.encode
return ((w, freq, encode(freq)) for w, freq in seen.iteritems())
def encode(self, freq):
return pack("!I", freq)
def decode_frequency(self, valuestring):
return unpack("!I", valuestring)[0]
def decode_weight(self, valuestring):
freq = unpack("!I", valuestring)[0]
return freq * self.field_boost
class DocBoosts(Frequency):
"""A Field that stores frequency and per-document boost information for each posting.
Supports: frequency, weight.
"""
posting_size = _INT_SIZE + 1
def word_values(self, value, doc_boost = 1.0, **kwargs):
seen = defaultdict(int)
for t in unstopped(self.analyzer(value, **kwargs)):
seen[t.text] += 1
encode = self.encode
return ((w, freq, encode((freq, doc_boost))) for w, freq in seen.iteritems())
def encode(self, freq_docboost):
freq, docboost = freq_docboost
return pack("!I", freq) + float_to_byte(docboost)
def decode_docboosts(self, valuestring):
freq = unpack("!I", valuestring[:_INT_SIZE])[0]
docboost = byte_to_float(valuestring[-1])
return (freq, docboost)
def decode_frequency(self, valuestring):
return unpack("!I", valuestring[0:_INT_SIZE])[0]
def decode_weight(self, valuestring):
freq = unpack("!I", valuestring[:_INT_SIZE])[0]
docboost = byte_to_float(valuestring[-1])
return freq * docboost * self.field_boost
# Vector formats
class Positions(Format):
"""A vector that stores position information in each posting, to allow phrase
searching and "near" queries.
Supports: frequency, weight, positions, position_boosts (always reports position
boost = 1.0).
"""
def word_values(self, value, start_pos = 0, **kwargs):
seen = defaultdict(list)
for t in unstopped(self.analyzer(value, positions = True, start_pos = start_pos, **kwargs)):
seen[t.text].append(start_pos + t.pos)
encode = self.encode
return ((w, len(poslist), encode(poslist)) for w, poslist in seen.iteritems())
def encode(self, positions):
# positions = [pos1, pos2, ...]
codes = []
base = 0
for pos in positions:
codes.append(varint(pos - base))
base = pos
return pack("!I", len(positions)) + "".join(codes)
def decode_positions(self, valuestring):
read = StringIO(valuestring).read
freq = unpack("!I", read(_INT_SIZE))[0]
position = 0
positions = []
for _ in xrange(freq):
position = read_varint(read) + position
positions.append(position)
return positions
def decode_frequency(self, valuestring):
return unpack("!I", valuestring[:_INT_SIZE])[0]
def decode_weight(self, valuestring):
return self.decode_frequency(valuestring) * self.field_boost
def decode_position_boosts(self, valuestring):
return [(pos, 1) for pos in self.decode_positions(valuestring)]
class Characters(Positions):
"""Stores token position and character start and end information
for each posting.
Supports: frequency, weight, positions, position_boosts (always reports position
boost = 1.0), characters.
"""
def word_values(self, value, start_pos = 0, start_char = 0, **kwargs):
seen = defaultdict(list)
for t in unstopped(self.analyzer(value, positions = True, chars = True,
start_pos = start_pos, start_char = start_char, **kwargs)):
seen[t.text].append((t.pos, start_char + t.startchar, start_char + t.endchar))
encode = self.encode
return ((w, len(ls), encode(ls)) for w, ls in seen.iteritems())
def encode(self, posns_chars):
# posns_chars = [(pos, startchar, endchar), ...]
codes = []
posbase = 0
charbase = 0
for pos, startchar, endchar in posns_chars:
codes.append(varint(pos - posbase))
posbase = pos
codes.extend((varint(startchar - charbase), varint(endchar - startchar)))
charbase = endchar
return pack("!I", len(posns_chars)) + "".join(codes)
def decode_characters(self, valuestring):
read = StringIO(valuestring).read
freq = unpack("!I", read(_INT_SIZE))[0]
position = 0
endchar = 0
posns_chars = []
for _ in xrange(freq):
position = read_varint(read) + position
startchar = endchar + read_varint(read)
endchar = startchar + read_varint(read)
posns_chars.append((position, startchar, endchar))
return posns_chars
def decode_positions(self, valuestring):
return [pos for pos, startchar, endchar
in self.decode_characters(valuestring)]
class PositionBoosts(Positions):
"""A format that stores positions and per-position boost information
in each posting.
Supports: frequency, weight, positions, position_boosts.
"""
def word_values(self, value, start_pos = 0, **kwargs):
seen = defaultdict(iter)
for t in unstopped(self.analyzer(value, positions = True, boosts = True,
start_pos = start_pos, **kwargs)):
pos = t.pos
boost = t.boost
seen[t.text].append((pos, boost))
encode = self.encode
return ((w, len(poslist), encode(poslist)) for w, poslist in seen.iteritems())
def encode(self, posns_boosts):
# posns_boosts = [(pos, boost), ...]
codes = []
base = 0
summedboost = 0
for pos, boost in posns_boosts:
summedboost += boost
codes.extend((varint(pos - base), float_to_byte(boost)))
base = pos
return pack("!If", len(posns_boosts), summedboost) + "".join(codes)
def decode_position_boosts(self, valuestring):
f = StringIO(valuestring)
read = f.read
freq = unpack("!I", read(_INT_SIZE))[0]
# Skip summed boost
f.seek(_FLOAT_SIZE, 1)
position = 0
posns_boosts = []
for _ in xrange(freq):
position = read_varint(read) + position
boost = byte_to_float(read(1))
posns_boosts.append((position, boost))
return posns_boosts
def decode_positions(self, valuestring):
f = StringIO(valuestring)
read, seek = f.read, f.seek
freq = unpack("!I", read(_INT_SIZE))[0]
# Skip summed boost
seek(_FLOAT_SIZE, 1)
position = 0
positions = []
for _ in xrange(freq):
position = read_varint(read) + position
# Skip boost
seek(1, 1)
positions.append(position)
return positions
def decode_weight(self, valuestring):
freq, summedboost = unpack("!If", valuestring[:_INT_SIZE + _FLOAT_SIZE])
return freq * summedboost
class CharacterBoosts(Characters):
"""A format that stores positions, character start and end, and
per-position boost information in each posting.
Supports: frequency, weight, positions, position_boosts, characters,
character_boosts.
"""
def word_values(self, value, start_pos = 0, start_char = 0, **kwargs):
seen = defaultdict(iter)
for t in unstopped(self.analyzer(value, positions = True, characters = True,
boosts = True,
start_pos = start_pos, start_char = start_char,
**kwargs)):
seen[t.text].append((t.pos,
start_char + t.startchar, start_char + t.endchar,
t.boost))
encode = self.encode
return ((w, len(poslist), encode(poslist)) for w, poslist in seen.iteritems())
def encode(self, posns_chars_boosts):
# posns_chars_boosts = [(pos, startchar, endchar, boost), ...]
codes = []
posbase = 0
charbase = 0
summedboost = 0
for pos, startchar, endchar, boost in posns_chars_boosts:
summedboost += boost
codes.append(varint(pos - posbase))
posbase = pos
codes.extend((varint(startchar - charbase),
varint(endchar - startchar),
float_to_byte(boost)))
charbase = endchar
return pack("!If", len(posns_chars_boosts), summedboost) + "".join(codes)
def decode_character_boosts(self, valuestring):
f = StringIO(valuestring)
read = f.read
freq = unpack("!I", read(_INT_SIZE))[0]
# Skip summed boost
f.seek(_FLOAT_SIZE, 1)
position = 0
endchar = 0
posns_chars = []
for _ in xrange(freq):
position = read_varint(read) + position
startchar = endchar + read_varint(read)
endchar = startchar + read_varint(read)
boost = byte_to_float(read(1))
posns_chars.append((position, startchar, endchar, boost))
return posns_chars
def decode_characters(self, valuestring):
return [(pos, startchar, endchar) for pos, startchar, endchar, boost
in self.decode_character_boosts(valuestring)]
def decode_position_boosts(self, valuestring):
return [(pos, boost) for pos, startchar, endchar, boost
in self.decode_character_boosts(valuestring)]
|
|
# ATDM Bayesian networks
# Jens Raaby, May 2013
"""
This file contains the code for Bayesian Networks with Binary Random Variables
Ancestral sampling and markov blanket sampling are implemented
"""
# We use the built in random generator and ordered dictionary libraries
import random
from collections import OrderedDict
class BN:
""" A simple Bayesian network class.
Stores nodes in a dictionary (str name -> BinaryRandomVariable)
Edges are encoded in the nodes themselves
"""
def __init__(self):
# This dictionary keeps nodes, ordered by insertion
self._nodes = OrderedDict()
# This dictionary keeps the ancestral ordering for the nodes
self.ordering = None
def add_node(self, name, parents, pt):
"""Creates a random variable and inserts as a node"""
if name in self._nodes:
raise Exception("A random variable already exists with the name (%s)" % name)
# we validate the parents, while adding references to list 'ps'
# ps must be a list so the order is maintained
ps = list()
for p in parents:
if p not in self._nodes:
raise Exception("Error adding %s: Parent %s does not exist" % (name,p))
else:
ps.append(self._nodes[p])
# create the node
n = BinaryRandomVariable(name,ps,pt)
# we add child references to the parents
for p in ps:
p.add_child(name)
# insert the node
self._nodes[name] = n
def get_binary_random_variable(self, name):
"""Returns the node for a given random variable"""
return self._nodes[name]
def observe_node(self,node,value):
"""Sets the value of the given node"""
self._nodes[node].observe(value)
def observe_nodes(self,node_values={}):
"""Sets several values to the given observed values.
Input is a dict from node name to observation (True or False)"""
for n,v in node_values.items():
self.observe_node(n,v)
def sample_node(self,node):
"""Samples the given node using a random number"""
return self._nodes[node].sample()
def joint_probability(self):
"""
Compute joint probability of all nodes.
This is done using the equation:
p(\mathbf{X}) = \prod_{i=1}^{N} p(\mathbf{X}_i | pa_i)
"""
if self.ordering is None:
self.validate()
px = 1.0
# we iterate over the nodes in ancestral order
for k in self.ordering:
node = self._nodes[k]
if node.is_root():
# no ancestors involved
px *= node.p()
else:
# generate the probability conditions for all parents being true:
conditions = tuple([True for i in xrange(len(node.parents))])
# get the probability of this node given all parents sampled True
px *= node.p(conditions)
return px
def print_joint_probability(self):
"""Computes and prints the joint probability for the network"""
jp = self.joint_probability()
print "Joint probability: \n\tp(%s) = %s" % (','.join([n for n in self._nodes.keys()]), jp)
return jp
def ancestral_sample(self, selected=None):
"""Assigns values to all variables using ancestral sampling
If selected contains a list of nodes, then only their assignments will be returned."""
if self.ordering is None:
self.validate()
if selected is None:
for k in self.ordering:
node = self._nodes[k]
node.sample()
return [(name,n.sample()) for (name,n) in self._nodes.items() ]
else:
# if only interested in a subset of nodes,
# stop sampling process when they have been sampled
remaining = list(selected)
for k in self.ordering:
node = self._nodes[k]
node.sample()
if k in remaining:
remaining.remove(k)
# check if further sampling needed
if len(remaining) == 0:
return [(name, self.sample_node(name)) for name in selected ]
def print_ancestral_sample(self):
sampling = self.ancestral_sample()
print "Ancestral sample: \n \t %s" % '\n\t '.join(["%s: %s"%(n,v) for (n,v) in sampling])
return sampling
def print_multi_sample(self,N):
"""
Performs N ancestral samples and computes the frequencies of each state.
The results are printed (along with proportions of the total)
"""
stats = {}
for n in xrange(N):
sample = self.ancestral_sample()
count = stats.setdefault(tuple(sample),0)
stats[tuple(sample)] += 1
self.reset_sampling()
stats = OrderedDict(sorted(stats.items(), key=lambda x: x[1],reverse=True))
print "Frequencies after %s samples: \n\t" %N, "\t".join(["%80s: %4s (%4s)\n" % (sample,stats[sample],stats[sample]/float(N)) for sample in stats.keys() ])
return stats
def markov_blanket(self,node):
"""
Identifies the markov blanket of a given variable.
This is the set of the parents, the children and the co-parents
of the children.
"""
if self.ordering is None:
self.validate()
n = self._nodes[node] # raises exception for missing node
mb = set()
for p in n.parents:
mb.add(p.name)
for c in n.children:
mb.add(c)
cps = ([p.name for p in self._nodes[c].parents])
for cp in cps:
if not cp == node:
mb.add(cp)
return mb
def markov_blanket_sampling(self,node,assignments):
"""Generates a sample for a given variable given the
markov blanket assignments.
Takes the variable name, and the assignments to variables.
The blanket variables should be assigned, but this isn't checked
"""
n = self._nodes[node]
mb = self.markov_blanket(node)
# set the interest node to true for the sampling
n.observe(True)
print n,n._set
# Set the assignments
self.observe_nodes(assignments)
numerator = (n.get_probability())
for p in n.children:
# print p,": %s" % self.get_node(p).get_probability()
numerator *= self.get_node(p).get_probability()
# print numerator
# set the variable to false
n.observe(False)
p_false = 1 - n.get_probability()
# print "Prob false %s" % p_false
p_not = n.get_probability() * self.product([self.get_node(p).get_probability() for p in n.children])
# set the variable to true
n.observe(True)
# print "Prob true %s" % n.get_probability()
p = n.get_probability() * self.product([self.get_node(p).get_probability() for p in n.children])
denominator = p_not + p
p_n_given_mb = (numerator/denominator)
print "p(%s=True | %s) = %s" % (node,assignments, p_n_given_mb)
rp = random.uniform(0.0,1.0)
val = rp < p_n_given_mb
n.observe(val)
return val
def rejection_sampling(self,evidence={},N=100):
"""
If any variables are observed, then any samples
which do not agree are ignored until we find one that does
Note that if no variables are observed this is equivalent to
ancestral_sample()
evidence is the dictionary of assignments (can be empty)
N is the number of samples to generate
"""
print "Rejection sampling given evidence: \n\t"
print "\n".join(["%20s: %10s" % (v,a) for v,a in evidence.items()])
e_nodes = evidence.keys()
stats = {}
N_orig = N
N_attempts = 0
while N>0:
failed = False
self.reset_sampling()
s = self.ancestral_sample()
# verify against evidence:
for n in e_nodes:
samples = dict((x,y) for x, y in s)
if not samples[n] == evidence[n]:
failed = True
if not failed:
count = stats.setdefault(tuple(s),0)
stats[tuple(s)] += 1
N -= 1
N_attempts +=1
stats = OrderedDict(sorted(stats.items(), key=lambda x: x[1],reverse=True))
print "Rejected %s samples out of %s" %(N_attempts-N_orig,N_attempts)
print "Sample frequencies after %s samples: \n\t" %N_orig, "\t".join(["%80s: %4s (%4s)\n" % (sample,stats[sample],stats[sample]/float(N_orig)) for sample in stats.keys() ])
return stats
def reset_sampling(self):
"""
Removes all observations from the network
"""
for (name,rv) in self._nodes.items():
rv.reset()
def reset_node(self,node):
self._nodes[node].reset()
def print_ancestral_order(self):
print "Ancestral ordering: \n\t", self.ordering
# Utility functions
def number_nodes(self):
return len(self._nodes)
def validate(self):
"""
Sets the ancestral order for all the nodes
"""
if not self.ordering is None:
return
print "Setting ancestral order for network"
self.ordering = {}
nextID = 1
roots = []
for i in self._nodes:
if self._nodes[i].is_root():
roots.append(i)
self.ordering[i] = nextID
self._nodes[i].set_order(nextID)
nextID += 1
# order the next level of nodes
self.order_nodes(roots,nextID)
def order_nodes(self,parents,nextID):
""" Recursive method for setting ancestral order for
all the descendant nodes for the given parents"""
nextlevel = []
for n in parents:
for c in self._nodes[n].children:
# only assign once for each node
if not c in nextlevel:
nextlevel.append(c)
# order the nextlevel:
for p in nextlevel:
self.ordering[p] = nextID
self._nodes[p].set_order(nextID)
nextID += 1
# recursive call:
if len(nextlevel) > 1:
self.order_nodes(nextlevel,nextID)
def node_sampled(self,node):
"""
Return true is the given node is sampled or observed
"""
return not self._nodes[node]._set is None
def get_node(self,node):
return self._nodes[node]
def product(self,numbers):
"""
"""
r = 1.0
for n in numbers:
r *= n
return r
def __str__(self):
return "BN (%s nodes)" % self.number_nodes()
class BinaryRandomVariable:
""" A BinaryRandomVariable is a random variable that can take 2 states:
- True or False - with some probability p(True)
The variable can have N parents, in which case the probability table (PT)
must have size 2^N. That is, you must enumerate the probability of this variable
being true, given all the combinations of the parent variables
"""
def __init__(self, name, parents=list(), pt={}):
self.name = name
self.parents = parents
# number of children can vary, so make it an array
self.children = []
# verify the PT dimensions
# Since this is a binary variable there are 2^N (N = num. parents)
if not (len(pt) == 2**len(parents) ):
raise Exception("Wrong size probability table for %s parents, should be %s" % (len(parents),2**len(parents)))
# store the probability table, set the initial state to indicate unsampled
self.pt = pt
self._set = None
def is_root(self):
return len(self.parents) == 0
def is_leaf(self):
return len(self.children) == 0
def set_order(self,order_index):
"""Stores the topographical order of this node locally
We assume that all probability tuples are ordered using this order field.
It's mainly for bookkeeping that we store this
"""
self._order = order_index
def observe(self,value):
"""
Observes the value of this node with the given value.
"""
if not isinstance(value,bool):
raise Exception("Binary variables can only be observed as True or False")
self._set = value
def p(self,conditions={}):
"""
Get the probability of this event, given conditions
(if this is a root node, then just returns p(true))
The conditions should be a tuple of truth values ordered by parents.
Note the order of the conditions is assumed to be the same as the order
used when creating the random variable.
"""
if self.is_root():
return self.pt[(True,)]
else:
# we do still have a risk that the parents were supplied out of order
return self.pt[conditions]
def sample(self):
"""Take a sample for this node. Generated using PRNG and ancestral sampling"""
# generate a random probability
rp = random.uniform(0.0,1.0)
# if the node was already sampled, we just return that value
if not self._set is None:
return self._set
if self.is_root():
self._set = rp < self.pt[(True,)]
return self._set
# when there are parents:
samples = [None for p in self.parents]
for i in xrange(len(self.parents)):
samples[i] = self.parents[i].sample()
# look up the probability based on the parents samples
conditions = tuple(samples)
self._set = rp < self.pt[conditions]
return self._set
def get_probability(self):
"""
Similar to sample(), but just returns probability based on parents and current set state
"""
if self.is_root():
return self.pt[(True,)]
# when there are parents:
samples = [None for p in self.parents]
for i in xrange(len(self.parents)):
samples[i] = self.parents[i].sample()
# look up the probability based on the parents samples
conditions = tuple(samples)
# if this variable is set, then return the prob:
if not self._set is None:
if self._set:
return self.pt[conditions]
else:
return 1-self.pt[conditions]
# otherwise just the prob that it is true given the ancestors:
return self.pt[conditions]
def reset(self):
"""Clear any sampled observation"""
self._set = None
def parents_orders(self):
"""Get the ancestral ordering for the parents.
Not currently used, but might be useful in future"""
return [p._order for p in self.parents]
def add_child(self, child):
"""Adds a child to the node. Never adds duplicates"""
if not child in set(self.children):
self.children.append(child)
def __str__(self):
if self.is_root():
return "Root: p(%s) = %s" % (self.name,self.pt[(True,)])
return "Node: p(%s | %s)" % (self.name, [p.name for p in self.parents])
|
|
"""
File: DaqInScan03.py
Library Call Demonstrated: mcculw.ul.daq_in_scan()
mcculw.ul.get_tc_values()
Purpose: Synchronously scans Analog channels, Digital ports,
and Thermocouple channels in the foreground.
Demonstration: Collects data on Analog Channels 5, FirstPortA,
CJCs 0, 1 and Thermocouple channels 0, 1.
Other Library Calls: mcculw.ul.win_buf_alloc()
mcculw.ul.win_buf_free()
mcculw.ul.d_config_port()
mcculw.ul.c_config_scan()
Special Requirements: Device must support mcculw.ul.daq_in_scan() and
temperature inputs.
Thermocouples must be wired to temperature channels
selected.
"""
from __future__ import absolute_import, division, print_function
from builtins import * # @UnusedWildImport
import tkinter as tk
from tkinter import messagebox
from enum import Enum
from ctypes import cast, POINTER, c_ushort
from mcculw import ul
from mcculw.enums import (DigitalPortType, ChannelType, ULRange, CounterMode,
CounterDebounceTime, CounterEdgeDetection,
DigitalIODirection, TempScale, ErrorCode,
CounterTickSize)
from mcculw.ul import ULError
from mcculw.device_info import DaqDeviceInfo
try:
from ui_examples_util import UIExample, show_ul_error
except ImportError:
from .ui_examples_util import UIExample, show_ul_error
class DaqInScan03(UIExample):
def __init__(self, master=None):
super(DaqInScan03, self).__init__(master)
# By default, the example detects all available devices and selects the
# first device listed.
# If use_device_detection is set to False, the board_num property needs
# to match the desired board number configured with Instacal.
use_device_detection = True
self.board_num = 0
self.num_chans = 6
self.chan_list = []
self.chan_type_list = []
self.gain_list = []
try:
if use_device_detection:
self.configure_first_detected_device()
self.device_info = DaqDeviceInfo(self.board_num)
if (self.device_info.supports_daq_input
and self.device_info.supports_temp_input):
self.init_scan_channel_info()
self.create_widgets()
else:
self.create_unsupported_widgets()
except ULError:
self.create_unsupported_widgets(True)
def init_scan_channel_info(self):
# For accurate thermocouple readings, the CJC channels and TC channels
# must be associated properly. The TC channels must immediately follow
# their associated CJCs in the channel list. Other channel types may be
# placed in the channel list as long as they do not fall between a CJC
# channel and its associated thermocouple channel.
# Add an analog input channel
self.chan_list.append(4)
self.chan_type_list.append(ChannelType.ANALOG)
self.gain_list.append(ULRange.BIP10VOLTS)
# Add a digital input channel
self.chan_list.append(DigitalPortType.FIRSTPORTA)
self.chan_type_list.append(ChannelType.DIGITAL8)
self.gain_list.append(ULRange.NOTUSED)
# Add a CJC channel
self.chan_list.append(0)
self.chan_type_list.append(ChannelType.CJC)
self.gain_list.append(ULRange.NOTUSED)
# Add a TC channel
self.chan_list.append(0)
self.chan_type_list.append(ChannelType.TC)
self.gain_list.append(ULRange.NOTUSED)
# Add a CJC channel
self.chan_list.append(1)
self.chan_type_list.append(ChannelType.CJC)
self.gain_list.append(ULRange.NOTUSED)
# Add a TC channel
self.chan_list.append(1)
self.chan_type_list.append(ChannelType.TC)
self.gain_list.append(ULRange.NOTUSED)
def start_scan(self):
rate = 100
points_per_channel = 10
total_count = points_per_channel * self.num_chans
# Allocate a buffer for the scan
memhandle = ul.win_buf_alloc(total_count)
# Check if the buffer was successfully allocated
if not memhandle:
messagebox.showerror("Error", "Failed to allocate memory")
self.start_button["state"] = tk.NORMAL
return
try:
# Configure the digital port for input
ul.d_config_port(self.board_num, DigitalPortType.FIRSTPORTA,
DigitalIODirection.IN)
# Configure the counter channel
ul.c_config_scan(self.board_num, 0, CounterMode.STOP_AT_MAX,
CounterDebounceTime.DEBOUNCE_NONE, 0,
CounterEdgeDetection.RISING_EDGE,
CounterTickSize.TICK20PT83ns, 0)
# Run the scan
ul.daq_in_scan(self.board_num, self.chan_list, self.chan_type_list,
self.gain_list, self.num_chans, rate, 0, total_count,
memhandle, 0)
# Convert the TC values (optional parameter omitted)
err, temp_data_array = ul.get_tc_values(
self.board_num, self.chan_list, self.chan_type_list,
self.num_chans, memhandle, 0, points_per_channel,
TempScale.CELSIUS)
if err == ErrorCode.OUTOFRANGE:
messagebox.showwarning("Warning",
"Temperature data is out of range")
# Cast the memhandle to a ctypes pointer
# Note: the ctypes array will only be valid until win_buf_free
# is called.
array = cast(memhandle, POINTER(c_ushort))
# Display the values
self.display_values(array, temp_data_array, total_count)
except ULError as e:
show_ul_error(e)
finally:
# Free the allocated memory
ul.win_buf_free(memhandle)
self.start_button["state"] = tk.NORMAL
def display_values(self, array, temp_data_array, total_count):
channel_text = {}
# Add a string to the dictionary for each channel (excluding CJC
# channels)
for chan_num in range(0, self.num_chans):
if self.chan_type_list[chan_num] != ChannelType.CJC:
channel_text[chan_num] = ""
# Add (up to) the first 10 values for each channel to the text
# (excluding CJC channels)
chan_num = 0
temp_array_index = 0
for data_index in range(0, min(self.num_chans * 10, total_count)):
if self.chan_type_list[chan_num] != ChannelType.CJC:
if self.chan_type_list[chan_num] == ChannelType.TC:
channel_text[chan_num] += str(
round(temp_data_array[temp_array_index], 3)) + "\n"
temp_array_index += 1
else:
channel_text[chan_num] += str(array[data_index]) + "\n"
if chan_num == self.num_chans - 1:
chan_num = 0
else:
chan_num += 1
# Update the labels for each channel
for chan_num in channel_text:
self.data_labels[chan_num]["text"] = channel_text[chan_num]
def start(self):
self.start_button["state"] = tk.DISABLED
self.start_scan()
def create_widgets(self):
'''Create the tkinter UI'''
self.device_label = tk.Label(self)
self.device_label.pack(fill=tk.NONE, anchor=tk.NW)
self.device_label["text"] = ('Board Number ' + str(self.board_num)
+ ": " + self.device_info.product_name
+ " (" + self.device_info.unique_id + ")")
main_frame = tk.Frame(self)
main_frame.pack(fill=tk.X, anchor=tk.NW)
self.results_group = tk.LabelFrame(
self, text="Results", padx=3, pady=3)
self.results_group.pack(fill=tk.X, anchor=tk.NW, padx=3, pady=3)
self.data_frame = tk.Frame(self.results_group)
self.data_frame.grid()
chan_header_label = tk.Label(
self.data_frame, justify=tk.LEFT, padx=3)
chan_header_label["text"] = "Channel:"
chan_header_label.grid(row=0, column=0)
type_header_label = tk.Label(
self.data_frame, justify=tk.LEFT, padx=3)
type_header_label["text"] = "Type:"
type_header_label.grid(row=1, column=0)
range_header_label = tk.Label(
self.data_frame, justify=tk.LEFT, padx=3)
range_header_label["text"] = "Range:"
range_header_label.grid(row=2, column=0)
self.data_labels = {}
column = 0
for chan_num in range(0, self.num_chans):
# Don't display the CJC channels
if self.chan_type_list[chan_num] != ChannelType.CJC:
chan_label = tk.Label(
self.data_frame, justify=tk.LEFT, padx=3)
chan_num_item = self.chan_list[chan_num]
if isinstance(chan_num_item, Enum):
chan_label["text"] = self.chan_list[chan_num].name
else:
chan_label["text"] = str(self.chan_list[chan_num])
chan_label.grid(row=0, column=column)
type_label = tk.Label(
self.data_frame, justify=tk.LEFT, padx=3)
type_label["text"] = self.chan_type_list[chan_num].name
type_label.grid(row=1, column=column)
range_label = tk.Label(
self.data_frame, justify=tk.LEFT, padx=3)
range_label["text"] = self.gain_list[chan_num].name
range_label.grid(row=2, column=column)
data_label = tk.Label(
self.data_frame, justify=tk.LEFT, padx=3)
data_label.grid(row=3, column=column)
self.data_labels[chan_num] = data_label
column += 1
button_frame = tk.Frame(self)
button_frame.pack(fill=tk.X, side=tk.RIGHT, anchor=tk.SE)
self.start_button = tk.Button(button_frame)
self.start_button["text"] = "Start"
self.start_button["command"] = self.start
self.start_button.grid(row=0, column=0, padx=3, pady=3)
quit_button = tk.Button(button_frame)
quit_button["text"] = "Quit"
quit_button["command"] = self.master.destroy
quit_button.grid(row=0, column=1, padx=3, pady=3)
if __name__ == "__main__":
# Start the example
DaqInScan03(master=tk.Tk()).mainloop()
|
|
import numpy as np
import matplotlib.pyplot as plt; plt.ioff()
import copy
from .class_utils import *
from .utils import *
from astropy.cosmology import Planck15
import astropy.constants as co
c = co.c.value # speed of light, in m/s
G = co.G.value # gravitational constant in SI units
Msun = co.M_sun.value # solar mass, in kg
Mpc = 1e6*co.pc.value # 1 Mpc, in m
arcsec2rad = np.pi/(180.*3600.)
rad2arcsec =3600.*180./np.pi
deg2rad = np.pi/180.
rad2deg = 180./np.pi
__all__ = ['LensRayTrace','GenerateLensingGrid','thetaE','get_caustics','CausticsSIE']
def LensRayTrace(xim,yim,lens,Dd,Ds,Dds):
"""
Wrapper to pass off lensing calculations to any number of functions
defined below, accumulating lensing offsets from multiple lenses
and shear as we go.
"""
# Ensure lens is a list, for convenience
lens = list(np.array([lens]).flatten())
ximage = xim.copy()
yimage = yim.copy()
for i,ilens in enumerate(lens):
if ilens.__class__.__name__ == 'SIELens': ilens.deflect(xim,yim,Dd,Ds,Dds)
elif ilens.__class__.__name__ == 'ExternalShear': ilens.deflect(xim,yim,lens[0])
ximage += ilens.deflected_x; yimage += ilens.deflected_y
return ximage,yimage
def GenerateLensingGrid(data=None,xmax=None,emissionbox=[-5,5,-5,5],fieldres=None,emitres=None):
"""
Routine to generate two grids for lensing. The first will be a lower-resolution
grid with resolution determined by fieldres and size determined
by xmax. The second is a much higher resolution grid which will be used for
the lensing itself, with resolution determined by emitres and size
determined from emissionbox - i.e., emissionbox should contain the coordinates
which conservatively encompass the real emission, so we only have to lens that part
of the field at high resolution.
Since we're going to be FFT'ing with these coordinates, the resolution isn't
directly set-able. For the low-res full-field map, it instead is set to the next-higher
power of 2 from what would be expected from having ~4 resolution elements across
the synthesized beam.
Inputs:
data:
A Visdata object, used to determine the resolutions of
the two grids (based on the image size or maximum uvdistance in the dataset)
xmax:
Field size for the low-resolution grid in arcsec, which will extend from
(-xmax,-xmax) to (+xmax,+xmax), e.g. (-30,-30) to (+30,+30)arcsec. Should be
at least a bit bigger than the primary beam. Not needed for images.
emissionbox:
A 1x4 list of [xmin,xmax,ymin,ymax] defining a box (in arcsec) which contains
the source emission. Coordinates should be given in arcsec relative to the
pointing/image center.
fieldres,emitres:
Resolutions of the coarse, full-field and fine (lensed) field, in arcsec.
If not given, suitable values will be calculated from the visibilities.
fieldres is unnecessary for images.
Returns:
If there are any Visdata objects in the datasets, returns:
xmapfield,ymapfield:
2xN matrices containing x and y coordinates for the full-field, lower-resolution
grid, in arcsec.
xmapemission,ymapemission:
2xN matrices containing x and y coordinates for the smaller, very high resolution
grid, in arcsec.
indices:
A [4x1] array containing the indices of xmapfield,ymapfield which overlap with
the high resolution grid.
"""
# Factors higher-resolution than (1/2*max(uvdist)) to make the field and emission grids
Nover_field = 4.
Nover_emission = 8.
# Allow multiple visdata objects to be passed, pick the highest resolution point of all
uvmax = 0.
try:
for vis in data:
uvmax = max(uvmax,vis.uvdist.max())
except TypeError:
uvmax = data.uvdist.max()
# Calculate resolutions of the grids
if fieldres is None: fieldres = (2*Nover_field*uvmax)**-1.
else: fieldres *= arcsec2rad
if emitres is None: emitres = (2*Nover_emission*uvmax)**-1.
else: emitres *= arcsec2rad
# Calculate the field grid size as a power of 2.
Nfield = 2**np.ceil(np.log2(2*np.abs(xmax)*arcsec2rad/fieldres))
# Calculate the grid coordinates for the larger field.
fieldcoords = np.linspace(-np.abs(xmax),np.abs(xmax),int(Nfield))
xmapfield,ymapfield = np.meshgrid(fieldcoords,fieldcoords)
# Calculate the indices where the high-resolution lensing grid meets the larger field grid
indices = np.round(np.interp(np.asarray(emissionbox),fieldcoords,np.arange(Nfield)))
indices = [int(ind) for ind in indices] # cast to int's for newer numpy versions
# Calculate the grid coordinates for the high-res lensing grid; grids meet at indices. Some pixel-shifting reqd.
Nemx = 1 + np.abs(indices[1]-indices[0])*np.ceil((fieldcoords[1]-fieldcoords[0])/(2*emitres*rad2arcsec))
Nemy = 1 + np.abs(indices[3]-indices[2])*np.ceil((fieldcoords[1]-fieldcoords[0])/(2*emitres*rad2arcsec))
xemcoords = np.linspace(fieldcoords[indices[0]],fieldcoords[indices[1]],int(Nemx))
yemcoords = np.linspace(fieldcoords[indices[2]],fieldcoords[indices[3]],int(Nemy))
xmapemission,ymapemission = np.meshgrid(xemcoords,yemcoords)
xmapemission -= (xmapemission[0,1]-xmapemission[0,0])
ymapemission -= abs((ymapemission[1,0]-ymapemission[0,0]))
return xmapfield,ymapfield,xmapemission,ymapemission,indices
def thetaE(ML,zL,zS,cosmo=Planck15):
"""
Calculate the Einstein radius in arcsec of a lens of mass ML,
assuming redshifts zL and zS. If cosmo is None, Planck15
is assumed. ML is in solar masses.
"""
Dd = cosmo.angular_diameter_distance(zL).value # in Mpc
Ds = cosmo.angular_diameter_distance(zS).value
Dds= cosmo.angular_diameter_distance_z1z2(zL,zS).value
thE = np.sqrt((4*G*ML*Msun*Dds) / (c**2 * Dd*Ds*Mpc)) * rad2arcsec
return thE
def get_caustics(lens,Dd,Ds,Dds,highresbox=[-2.,2.,-2.,2.],numres=0.01):
"""
Routine to calculate the locations for the lensing caustics.
If lens is either a single SIELens or a [SIELens,ExternalShear],
we calculate the caustics analytically, otherwise it has to be
a numerical calculation.
Inputs:
lens:
Either a single SIELens object, or a list of lens/shear
objects. If a single SIELens or one lens and shear, we
calculate the caustic locations analytically. If there's
more than one lens, we calculate the caustics numerically.
In this case you may want to play with the highresbox
and numres parameters, which affect how precisely the
caustics are calculated.
Dd,Ds,Dds:
Angular diameter distances to the lens, source, and lens-source,
respectively.
highresbox:
List of four coordinates, [xmin, xmax, ymin, ymax], within
which the caustics lie. You want this box to fully contain
the caustics. A good rule of thumb would be to pad the
positions of your lenses with the Einstein radius of the most
massive lens.
numres:
Resolution (in arcsec) of the highresbox above. A smaller
number here will make the caustics look nicer, because there
are more points to connect to make the caustics. This matters
most for the outer (ellipse-shaped, usually) caustic.
Returns:
2xN list:
Arrays containing the x and y coordinates for the caustics that
exist, with x and y in arcsec under the same convention as the
rest of the code (+y = north, +x = east). You can plot them with,
e.g.,
Standard matplotlib axis object:
for caustic in caustics:
ax.plot(caustic[:,0],caustic[:,1],ls='-',marker='',lw=1)
aplpy FITSFigure for fits image plotting:
ax = aplpy.FITSFigure('myimage.fits')
myfitshead = astropy.fits.open('myimage.fits')[0].header
ref_ra, ref_dec = myfitshead['CRVAL1'], myfitshead['CRVAL2']
for i in range(caustics.shape[0]):
ax.show_lines([np.vstack([caustics[:,0]+ref_ra,caustics[:,1]+ref_dec])],color='k',lw=1)
"""
# Figure out if we can do this analytically
lens = list(np.array([lens]).flatten())
for l in lens: l._altered = True
lens = [copy.deepcopy(l) for l in lens]
whichlens = [isinstance(l,SIELens) for l in lens]
whichshear = [isinstance(l,ExternalShear) for l in lens]
if sum(whichlens) == 1:
if sum(whichshear) == 1:
return CausticsSIE(lens[0],Dd,Ds,Dds,lens[1])
else:
return CausticsSIE(lens[0],Dd,Ds,Dds,Shear=None)
else: # we calculate them numerically
# first try to figure out where the caustics are going to be
# based on position & einstein radii
cximage = np.arange(highresbox[0],highresbox[1],numres)
cyimage = np.arange(highresbox[2],highresbox[3],numres)
cximage, cyimage = np.meshgrid(cximage,cyimage)
xsource,ysource = LensRayTrace(cximage,cyimage,lens,Dd,Ds,Dds)
jxy, jxx = np.gradient(xsource); jyy, jyx = np.gradient(ysource)
A = jxx*jyy - jxy*jyx
# it's pretty dumb that we have to do this...
tmpfig = plt.figure(); dummyax = tmpfig.add_subplot(111)
cset = dummyax.contour(xsource,ysource,A,levels=[0.])
plt.close(tmpfig)
contours = cset.collections[0].get_paths()
caustics = []
for contour in contours:
xcon,ycon = contour.vertices[:,0], contour.vertices[:,1]
caustic = np.vstack([xcon,ycon]).T
caustics.append(caustic)
for l in lens: l._altered = True
return caustics
def CausticsSIE(SIELens,Dd,Ds,Dds,Shear=None):
"""
Routine to calculate and return the analytical solutions for the caustics
of an SIE Lens, following Kormann+94.
Inputs:
SIELens:
An SIELens object for which to calculate the caustics.
Dd,Ds,Dds:
Angular diameter distances to the lens, source, and lens-source, respectively.
Shear:
An ExternalShear object describing the shear of the lens.
Returns:
2xN list:
Arrays containing the x and y coordinates for the caustics that exist (i.e.,
will have [[xr,yr]] for the radial caustic only if lens ellipticity==0, otherwise
will have [[xr,yr],[xt,yt]] for radial+diamond caustics)
"""
# Following Kormann+ 1994 for the lensing. Easier to work with axis ratio than ellipticity
f = 1. - SIELens.e['value']
fprime = np.sqrt(1. - f**2.)
# K+94 parameterize lens in terms of LOS velocity dispersion; calculate here in m/s
sigma_lens = ((SIELens.M['value']*Ds*G*Msun*c**2.)/(4.*np.pi**2. * Dd*Dds*Mpc))**(1./4.)
# Einstein radius, for normalizing the size of the caustics, b in notation of Keeton+00
b = 4 * np.pi * (sigma_lens/c)**2. * (Dds/Ds) * rad2arcsec
# Caustics calculated over a full 0,2pi angle range
phi = np.linspace(0,2*np.pi,2000)
# K+94, eq 21c; needed for diamond caustic
Delta = np.sqrt(np.cos(phi)**2. + f**2. * np.sin(phi)**2.)
if ((Shear is None) or (np.isclose(Shear.shear['value'],0.))):
# Need to account for when ellipticity=0, as caustic equations have cancelling infinities
# In that case, Delta==1 and there's only one (radial and circular) caustic
if np.isclose(f,1.):
xr,yr = -b*np.cos(phi)+SIELens.x['value'], -b*np.sin(phi)+SIELens.y['value']
caustic = np.atleast_3d([xr,yr])
return caustic.reshape(caustic.shape[2],caustic.shape[0],caustic.shape[1])
else:
# Calculate the radial caustic coordinates
xr = (b*np.sqrt(f)/fprime)*np.arcsinh(np.cos(phi)*fprime/f)
yr = (-b*np.sqrt(f)/fprime)*np.arcsin(np.sin(phi)*fprime)
# Now rotate & shift the caustic to match the PA & loc of the lens
r,th = cart2pol(xr,yr)
xr,yr = pol2cart(r,th+SIELens.PA['value']*deg2rad)
xr += SIELens.x['value']
yr += SIELens.y['value']
# Calculate the tangential caustic coordinates
xt = b*(((np.sqrt(f)/Delta) * np.cos(phi)) - ((np.sqrt(f)/fprime)*np.arcsinh(fprime/f * np.cos(phi))))
yt = -b*(((np.sqrt(f)/Delta) * np.sin(phi)) - ((np.sqrt(f)/fprime)*np.arcsin(fprime * np.sin(phi))))
# ... and rotate it to match the lens
r,th = cart2pol(xt,yt)
xt,yt = pol2cart(r,th+SIELens.PA['value']*deg2rad)
xt += SIELens.x['value']
yt += SIELens.y['value']
return [np.array([xr,yr]).T,np.array([xt,yt]).T]
else: # Blerg, complicated expressions... Keeton+00, but at least radial pseudo-caustic doesn't depend on shear
s, sa = Shear.shear['value'], (Shear.shearangle['value']-SIELens.PA['value'])*deg2rad
if np.isclose(f,1.):
rcrit = b * (1.+s*np.cos(2*(phi-sa)))/(1.-s**2.)
xr = -b*np.cos(phi) + SIELens.y['value']
yr = b*np.sin(phi) - SIELens.x['value']
xt = (np.cos(phi) + s*np.cos(phi-2*sa))*rcrit + xr - SIELens.y['value']
yt = (np.sin(-phi) - s*np.sin(-phi+2*sa))*rcrit + yr + SIELens.x['value']
r,th = cart2pol(yt,xt)
xt,yt = pol2cart(r,th+SIELens.PA['value']*deg2rad)
xt += SIELens.x['value']
yt += SIELens.y['value']
r,th = cart2pol(xr,yr)
xr,yr = pol2cart(r,th+SIELens.PA['value']*deg2rad)
return [np.array([xr,yr]).T,np.array([xt,yt]).T]
else:
rcrit = np.sqrt(2.*f)*b*(1.+s*np.cos(2.*(phi-sa))) / ((1.-s**2.)*np.sqrt((1+f**2.) - (1-f**2.)*np.cos(2*phi)))
xi = np.sqrt((2*(1-f**2.)) / ((1+f**2.)-(1-f**2.)*np.cos(2*phi)))
xr = -(b*np.sqrt(f)/fprime)*np.arctanh(xi*np.sin(phi))
yr = (b*np.sqrt(f)/fprime)*np.arctan(xi*np.cos(phi))
xt = (np.sin(phi)-s*np.sin(phi-2*sa))*rcrit + xr
yt = (np.cos(np.pi-phi)+s*np.cos(np.pi-phi+2*sa))*rcrit + yr
r,th = cart2pol(xt,yt)
xt,yt = pol2cart(r,th+SIELens.PA['value']*deg2rad)
xt += SIELens.x['value']
yt += SIELens.y['value']
r,th = cart2pol(xr,yr)
xr,yr = pol2cart(r,th+SIELens.PA['value']*deg2rad)
xr += SIELens.x['value']
yr += SIELens.y['value']
return [np.array([xr,yr]).T,np.array([xt,yt]).T]
|
|
# -*- coding: utf-8 -*-
### BEGIN LICENSE
# Copyright (C) 2010-2012 Kevin Mehall <[email protected]>
#This program is free software: you can redistribute it and/or modify it
#under the terms of the GNU General Public License version 3, as published
#by the Free Software Foundation.
#
#This program is distributed in the hope that it will be useful, but
#WITHOUT ANY WARRANTY; without even the implied warranties of
#MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
#PURPOSE. See the GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License along
#with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
# edited j.brian 10-7-16
import sys
import os, time
import gobject
gobject.threads_init()
# optional Launchpad integration
# this shouldn't crash if not found as it is simply used for bug reporting
try:
import LaunchpadIntegration
launchpad_available = True
except:
launchpad_available = False
import pygst
pygst.require('0.10')
import gst
import cgi
import math
import webbrowser
import os
import urllib2
import json
from util import *
from gobject_worker import GObjectWorker
from plugin import load_plugins
from pandora import *
from radio import vlc
from radio.config import preferences
#from beaglebone import Beaglebone
def radioRootPath():
if sys.platform == "darwin":
return os.path.expanduser("~/Library/Application Support/radio/img")
elif sys.platform.startswith("win"):
return os.path.join(os.environ['APPDATA'], "radio/img")
else:
return os.path.expanduser("~/.radio/img")
def openBrowser(url):
print "Opening %s" % url
webbrowser.open(url)
try:
os.wait() # workaround for http://bugs.python.org/issue5993
except:
pass
def get_album_art(url, song, index):
if not os.path.isdir(os.path.realpath('../Radio/static/cache')):
os.mkdir(os.path.realpath('../Radio/static/cache'))
outfile = open(os.path.join(os.path.realpath('../Radio/static/cache'), str(index) + ".jpg"), "wb")
outfile.write(urllib2.urlopen(url).read())
outfile.close()
art = str(index) + ".jpg"
return art, song, index
class Pithos(object):
def __init__(self, radiologger):
self.radiologger = radiologger
self.loop = gobject.MainLoop()
self.prefs = preferences.Prefs()
self.default_client_id = "android-generic"
self.default_one_client_id = "pandora-one"
self.default_album_art = None
self.song_thumbnail = None
self.songChanged = False
#global launchpad_available
#if False and launchpad_available: # Disable this
# see https://wiki.ubuntu.com/UbuntuDevelopment/Internationalisation/Coding for more information
# about LaunchpadIntegration
# helpmenu = self.builder.get_object('menu_options')
# if helpmenu:
# LaunchpadIntegration.set_sourcepackagename('pithos')
# LaunchpadIntegration.add_items(helpmenu, 0, False, True)
# else:
# launchpad_available = False
self.init_core()
self.beaglebone = Beaglebone(self, self.radiologger, self.player)
self.beaglebone.greenOn()
self.plugins = {}
load_plugins()
self.stations_model = []
self.songs_model = []
self.pandora = make_pandora(self.radiologger)
self.set_proxy()
self.set_audio_quality()
self.pandora_connect()
def init_core(self):
self.player = gst.element_factory_make("playbin2", "player")
#self.player.props.flags |= (1 << 7) # enable progressive download (GST_PLAY_FLAG_DOWNLOAD)
self.time_format = gst.Format(gst.FORMAT_TIME)
self.bus = self.player.get_bus()
self.bus.add_signal_watch()
self.bus.connect("message::eos", self.on_gst_eos)
self.bus.connect("message::buffering", self.on_gst_buffering)
self.bus.connect("message::error", self.on_gst_error)
self.player.connect("notify::volume", self.on_gst_volume)
self.player.connect("notify::source", self.on_gst_source)
self.stations_dlg = None
self.playing = False
self.current_song_index = None
self.current_station = None
self.current_station_name = None
self.current_station_id = self.prefs.getLastStationId()
self.buffer_percent = 100
self.auto_retrying_auth = False
self.have_stations = False
self.playcount = 0
self.gstreamer_errorcount_1 = 0
self.gstreamer_errorcount_2 = 0
self.gstreamer_error = ''
self.waiting_for_playlist = False
self.start_new_playlist = False
self.worker = GObjectWorker(self.radiologger)
self.songWorker = GObjectWorker(self.radiologger)
self.art_worker = GObjectWorker(self.radiologger)
def worker_run(self, fn, args=(), callback=None, message=None, context='net'):
if context and message:
self.radiologger.log(message, "INFO")
if isinstance(fn, str):
fn = getattr(self.pandora, fn)
def cb(v=None):
if callback:
if v is None:
callback()
else:
callback(v)
def eb(e):
def retry_cb():
self.auto_retrying_auth = False
if fn is not self.pandora.connect:
self.worker_run(fn, args, callback, message, context)
if isinstance(e, PandoraAuthTokenInvalid) and not self.auto_retrying_auth:
self.auto_retrying_auth = True
self.radiologger.log("Automatic reconnect after invalid auth token", "INFO")
self.pandora_connect("Reconnecting to pandora...", retry_cb)
elif isinstance(e, PandoraAPIVersionError):
self.api_update_dialog()
elif isinstance(e, PandoraError):
self.error_dialog(e.message, retry_cb, submsg=e.submsg)
else:
self.radiologger.log(e.traceback, "WARNING")
self.worker.send(fn, args, cb, eb)
def song_worker_run(self, fn, args=(), callback=None, message=None, context='net'):
if context and message:
self.radiologger.log(message, "INFO")
if isinstance(fn, str):
fn = getattr(self.pandora, fn)
def cb(v=None):
if callback:
if v is None:
callback()
else:
callback(v)
def eb(e):
def retry_cb():
self.auto_retrying_auth = False
if fn is not self.pandora.connect:
self.worker_run(fn, args, callback, message, context)
if isinstance(e, PandoraAuthTokenInvalid) and not self.auto_retrying_auth:
self.auto_retrying_auth = True
self.radiologger.log("Automatic reconnect after invalid auth token", "INFO")
self.pandora_connect("Reconnecting to pandora...", retry_cb)
elif isinstance(e, PandoraAPIVersionError):
self.api_update_dialog()
elif isinstance(e, PandoraError):
self.error_dialog(e.message, retry_cb, submsg=e.submsg)
else:
self.radiologger.log(e.traceback, "WARNING")
self.songWorker.send(fn, args, cb, eb)
def get_proxy(self):
""" Get HTTP proxy, first trying preferences then system proxy """
proxy = self.prefs.getPandoraProxy()
if proxy != "":
return proxy
system_proxies = urllib.getproxies()
if 'http' in system_proxies:
return system_proxies['http']
return None
def set_proxy(self):
# proxy preference is used for all Pithos HTTP traffic
# control proxy preference is used only for Pandora traffic and
# overrides proxy
#
# If neither option is set, urllib2.build_opener uses urllib.getproxies()
# by default
handlers = []
global_proxy = self.prefs.getPandoraProxy()
if global_proxy != "":
handlers.append(urllib2.ProxyHandler({'http': global_proxy, 'https': global_proxy}))
global_opener = urllib2.build_opener(*handlers)
urllib2.install_opener(global_opener)
control_opener = global_opener
control_proxy = self.prefs.getPandoraControlProxy()
if control_proxy != "":
control_opener = urllib2.build_opener(urllib2.ProxyHandler({'http': control_proxy, 'https': control_proxy}))
self.worker_run('set_url_opener', (control_opener,))
def set_audio_quality(self):
self.worker_run('set_audio_quality', (self.prefs.getPandoraAudioQuality(),))
def pandora_connect(self, message="Logging in to pandora...", callback=None):
pandoraOne = self.prefs.getPandoraOne()
if pandoraOne != "off" and pandoraOne != "False":
client = self.prefs.getPandoraClient(self.default_one_client_id)
else:
client = self.prefs.getPandoraClient(self.default_client_id)
# Allow user to override client settings
#force_client = self.prefs.getPandoraForceClient()
#if force_client in client_keys:
# client = client_keys[force_client]
#elif force_client and force_client[0] == '{':
# try:
# client = json.loads(force_client)
# except:
# logging.error("Could not parse force_client json")
args = (
client[0],
self.prefs.getPandoraUsername(),
self.prefs.getPandoraPassword(),
)
def pandora_ready(*ignore):
self.radiologger.log("Pandora connected", "INFO")
self.beaglebone.greenOff()
self.process_stations(self)
if callback:
callback()
self.worker_run('connect', args, pandora_ready, message, 'login')
def process_stations(self, *ignore):
self.stations_model = []
self.current_station = None
self.current_station_name = None
selected = None
for i in self.pandora.stations:
self.beaglebone.greenOn()
if i.isQuickMix and i.isCreator:
self.stations_model.append((i, "QuickMix"))
self.beaglebone.greenOff()
self.stations_model.append((None, 'sep'))
for i in self.pandora.stations:
self.beaglebone.greenOn()
if not (i.isQuickMix and i.isCreator):
self.stations_model.append((i, i.name))
if i.id == self.current_station_id:
self.radiologger.log("Restoring saved station: id = %s" % (i.id), "INFO")
selected = i
self.current_station_name = i.name
self.beaglebone.greenOff()
if not selected:
selected = self.stations_model[0][0]
self.current_station_name = self.stations_model[0][1]
self.station_changed(selected, reconnecting=self.have_stations)
self.have_stations = True
def getStations(self):
stations = False
if self.have_stations:
return self.stations_model
return stations
def getVolume(self):
return self.player.get_property('volume')
def getCurrentStation(self):
return self.current_station_name
def getSongArt(self):
return self.current_song.artRadio
def getSong(self):
song = False
try:
self.current_song.title
song = True
return song
except:
return song
def getSongs(self):
return self.songs_model
def getSongIndex(self):
return self.current_song.index
@property
def current_song(self):
if self.current_song_index is not None:
return self.songs_model[self.current_song_index][0]
def start_song(self, song_index):
songs_remaining = len(self.songs_model) - song_index
if songs_remaining <= 0:
# We don't have this song yet. Get a new playlist.
return self.get_playlist(start=True)
elif songs_remaining == 1:
# Preload next playlist so there's no delay
self.get_playlist()
prev = self.current_song
self.stop()
self.beaglebone.blueOff()
self.current_song_index = song_index
if prev:
self.update_song_row(prev)
if not self.current_song.is_still_valid():
self.current_song.message = "Playlist expired"
self.update_song_row()
return self.next_song()
if self.current_song.tired or self.current_song.rating == RATE_BAN:
return self.next_song()
self.buffer_percent = 100
def playSong():
self.player.set_property("uri", self.current_song.audioUrl)
self.play()
self.songChanged = False
self.beaglebone.blueOn()
self.playcount += 1
self.current_song.start_time = time.time()
#self.songs_treeview.scroll_to_cell(song_index, use_align=True, row_align=1.0)
#self.songs_treeview.set_cursor(song_index, None, 0)
self.radiologger.log("Radio - %s by %s" % (self.current_song.title, self.current_song.artist), "INFO")
self.loop.run()
def cb(v=None):
if self.loop.is_running():
self.loop.quit()
#self.loop = gobject.MainLoop()
self.song_worker_run(playSong, (), cb)
self.radiologger.log("Starting song: index: %i" % (song_index), "INFO")
#self.emit('song-changed', self.current_song)
def getSongTitle(self):
return self.current_song.title
def getSongArtist(self):
return self.current_song.artist
def getSongAlbum(self):
return self.current_song.album
def next_song(self, *ignore):
self.start_song(self.current_song_index + 1)
self.songChanged = True
def songChange(self):
return self.songChanged
def isPlaying(self):
return self.playing
def user_play(self, *ignore):
self.play()
def play(self):
if not self.playing:
self.playing = True
self.player.set_state(gst.STATE_PLAYING)
self.player.get_state(timeout=1)
self.update_song_row()
def user_pause(self, *ignore):
# todo: make blue light flash
self.pause()
def pause(self):
self.playing = False
self.player.set_state(gst.STATE_PAUSED)
self.update_song_row()
if self.loop.is_running():
self.loop.quit()
def stop(self):
prev = self.current_song
if prev and prev.start_time:
prev.finished = True
try:
prev.duration = self.player.query_duration(self.time_format, None)[0] / 1000000000
prev.position = self.player.query_position(self.time_format, None)[0] / 1000000000
except gst.QueryError:
prev.duration = prev.position = None
self.playing = False
self.player.set_state(gst.STATE_NULL)
if self.loop.is_running():
self.loop.quit()
def playpause(self, *ignore):
if self.playing:
self.pause()
else:
self.play()
def playpause_notify(self, *ignore):
if self.playing:
self.user_pause()
else:
self.user_play()
def get_playlist(self, start=False):
self.beaglebone.redOff()
self.start_new_playlist = self.start_new_playlist or start
if self.waiting_for_playlist:
return
if self.gstreamer_errorcount_1 >= self.playcount and self.gstreamer_errorcount_2 >= 1:
self.radiologger.log("Too many gstreamer errors. Not retrying", "WARNING")
self.beaglebone.redOn()
self.waiting_for_playlist = 1
self.error_dialog(self.gstreamer_error, self.get_playlist)
return
def art_callback(t=None):
picContent, song, index = t
if index < len(self.songs_model) and self.songs_model[index][0] is song: # in case the playlist has been reset
self.radiologger.log("Downloaded album art for %i" % song.index, "INFO")
song.artRadio = picContent.encode('ascii', 'ignore')
self.songs_model[index][3] = picContent
self.update_song_row(song)
def callback(l):
start_index = len(self.songs_model)
for i in l:
self.beaglebone.greenOn()
i.index = len(self.songs_model)
self.songs_model.append([i, '', '', self.default_album_art])
self.update_song_row(i)
i.art_pixbuf = None
if i.artRadio:
self.art_worker.send(get_album_art, (i.artRadio, i, i.index), art_callback)
self.beaglebone.greenOff()
if self.start_new_playlist:
self.start_song(start_index)
self.gstreamer_errorcount_2 = self.gstreamer_errorcount_1
self.gstreamer_errorcount_1 = 0
self.playcount = 0
self.waiting_for_playlist = False
self.start_new_playlist = False
self.waiting_for_playlist = True
self.worker_run(self.current_station.get_playlist, (), callback, "Getting songs...")
def error_dialog(self, message, retry_cb, submsg=None):
self.beaglebone.redOn()
#dialog = self.builder.get_object("error_dialog")
#dialog.props.text = message
#dialog.props.secondary_text = submsg
#response = dialog.run()
#dialog.hide()
#if response == 2:
# self.gstreamer_errorcount_2 = 0
# logging.info("Manual retry")
# return retry_cb()
#elif response == 3:
# self.show_preferences()
def fatal_error_dialog(self, message, submsg):
self.beaglebon.redOn()
dialog = self.builder.get_object("fatal_error_dialog")
dialog.props.text = message
dialog.props.secondary_text = submsg
response = dialog.run()
dialog.hide()
self.quit()
def api_update_dialog(self):
dialog = self.builder.get_object("api_update_dialog")
response = dialog.run()
if response:
openBrowser("http://kevinmehall.net/p/pithos/itbroke?utm_source=pithos&utm_medium=app&utm_campaign=%s" % VERSION)
self.quit()
def station_index(self, station):
return [i[0] for i in self.stations_model].index(station)
def station_changed(self, station, reconnecting=False):
# print station, type(station)
if station is self.current_station:
return
for availableStation in self.stations_model:
self.beaglebone.greenOn()
try:
if availableStation[0].id == station:
station = availableStation[0]
self.current_station_name = availableStation[1]
# print self.current_station_name
self.beaglebone.greenOff()
except:
self.beaglebone.greenOff()
self.waiting_for_playlist = False
if not reconnecting:
self.stop()
self.beaglebone.blueOff()
self.current_song_index = None
self.songs_model = []
self.radiologger.log("Selecting station %s; total = %i" % (station.id, len(self.stations_model)), "INFO")
self.current_station_id = station.id
self.current_station = station
if not reconnecting:
self.get_playlist(start=True)
#self.stations_combo.set_active(self.station_index(station))
def on_gst_eos(self, bus, message):
if self.loop.is_running():
self.loop.quit()
self.radiologger.log("EOS", "INFO")
self.next_song()
def on_gst_error(self, bus, message):
err, debug = message.parse_error()
self.radiologger.log("Gstreamer error: %s, %s, %s" % (err, debug, err.code), "ERROR")
if self.current_song:
self.current_song.message = "Error: " + str(err)
if err.code is int(gst.CORE_ERROR_MISSING_PLUGIN):
self.radiologger.log("Missing codec: GStreamer is missing a plugin", "ERROR")
return
self.gstreamer_error = str(err)
self.gstreamer_errorcount_1 += 1
self.next_song()
def on_gst_buffering(self, bus, message):
percent = message.parse_buffering()
self.buffer_percent = percent
if percent < 100:
self.player.set_state(gst.STATE_PAUSED)
elif self.playing:
self.player.set_state(gst.STATE_PLAYING)
self.update_song_row()
def set_volume_cb(self, volume):
# Convert to the cubic scale that the volume slider uses
scaled_volume = math.pow(volume, 1.0 / 3.0)
self.volume.handler_block_by_func(self.on_volume_change_event)
self.volume.set_property("value", scaled_volume)
self.volume.handler_unblock_by_func(self.on_volume_change_event)
self.preferences['volume'] = volume
def on_gst_volume(self, player, volumespec):
pass
#vol = self.player.get_property('volume')
#gobject.idle_add(self.set_volume_cb, vol)
def on_gst_source(self, player, params):
""" Setup httpsoupsrc to match Pithos proxy settings """
soup = player.props.source.props
proxy = self.get_proxy()
if proxy and hasattr(soup, 'proxy'):
scheme, user, password, hostport = parse_proxy(proxy)
soup.proxy = hostport
soup.proxy_id = user
soup.proxy_pw = password
def song_text(self, song):
title = cgi.escape(song.title)
artist = cgi.escape(song.artist)
album = cgi.escape(song.album)
msg = []
if song is self.current_song:
try:
dur_int = self.player.query_duration(self.time_format, None)[0]
dur_str = self.format_time(dur_int)
pos_int = self.player.query_position(self.time_format, None)[0]
pos_str = self.format_time(pos_int)
msg.append("%s / %s" % (pos_str, dur_str))
if not self.playing:
msg.append("Paused")
except gst.QueryError:
pass
if self.buffer_percent < 100:
msg.append("Buffering (%i%%)" % self.buffer_percent)
if song.message:
msg.append(song.message)
msg = " - ".join(msg)
if not msg:
msg = " "
return "<b><big>%s</big></b>\nby <b>%s</b>\n<small>from <i>%s</i></small>\n<small>%s</small>" % (title, artist, album, msg)
def song_icon(self, song):
pass
"""if song.tired:
return gtk.STOCK_JUMP_TO
if song.rating == RATE_LOVE:
return gtk.STOCK_ABOUT
if song.rating == RATE_BAN:
return gtk.STOCK_CANCEL"""
def update_song_row(self, song=None):
if song is None:
song = self.current_song
if song:
self.songs_model[song.index][1] = self.song_text(song)
self.songs_model[song.index][2] = self.song_icon(song)
return self.playing
def format_time(self, time_int):
time_int = time_int / 1000000000
s = time_int % 60
time_int /= 60
m = time_int % 60
time_int /= 60
h = time_int
if h:
return "%i:%02i:%02i" % (h, m, s)
else:
return "%i:%02i" % (m, s)
def selected_song(self):
sel = self.songs_treeview.get_selection().get_selected()
if sel:
return self.songs_treeview.get_model().get_value(sel[1], 0)
def love_song(self, song=None):
song = song or self.current_song
def callback(l):
self.update_song_row(song)
self.emit('song-rating-changed', song)
self.worker_run(song.rate, (RATE_LOVE,), callback, "Loving song...")
def ban_song(self, song=None):
song = song or self.current_song
def callback(l):
self.update_song_row(song)
self.emit('song-rating-changed', song)
self.worker_run(song.rate, (RATE_BAN,), callback, "Banning song...")
if song is self.current_song:
self.next_song()
def unrate_song(self, song=None):
song = song or self.current_song
def callback(l):
self.update_song_row(song)
self.emit('song-rating-changed', song)
self.worker_run(song.rate, (RATE_NONE,), callback, "Removing song rating...")
def tired_song(self, song=None):
song = song or self.current_song
def callback(l):
self.update_song_row(song)
self.emit('song-rating-changed', song)
self.worker_run(song.set_tired, (), callback, "Putting song on shelf...")
if song is self.current_song:
self.next_song()
def bookmark_song(self, song=None):
song = song or self.current_song
self.worker_run(song.bookmark, (), None, "Bookmarking...")
def bookmark_song_artist(self, song=None):
song = song or self.current_song
self.worker_run(song.bookmark_artist, (), None, "Bookmarking...")
def on_menuitem_love(self, widget):
self.love_song(self.selected_song())
def on_menuitem_ban(self, widget):
self.ban_song(self.selected_song())
def on_menuitem_unrate(self, widget):
self.unrate_song(self.selected_song())
def on_menuitem_tired(self, widget):
self.tired_song(self.selected_song())
def on_menuitem_info(self, widget):
song = self.selected_song()
openBrowser(song.songDetailURL)
def on_menuitem_bookmark_song(self, widget):
self.bookmark_song(self.selected_song())
def on_menuitem_bookmark_artist(self, widget):
self.bookmark_song_artist(self.selected_song())
def on_treeview_button_press_event(self, treeview, event):
x = int(event.x)
y = int(event.y)
thisTime = event.time
pthinfo = treeview.get_path_at_pos(x, y)
if pthinfo is not None:
path, col, cellx, celly = pthinfo
treeview.grab_focus()
treeview.set_cursor(path, col, 0)
if event.button == 3:
rating = self.selected_song().rating
self.song_menu_love.set_property("visible", rating != RATE_LOVE)
self.song_menu_unlove.set_property("visible", rating == RATE_LOVE)
self.song_menu_ban.set_property("visible", rating != RATE_BAN)
self.song_menu_unban.set_property("visible", rating == RATE_BAN)
self.song_menu.popup(None, None, None, event.button, thisTime)
return True
if event.button == 1 and event.type == gtk.gdk._2BUTTON_PRESS:
self.radiologger.log("Double clicked on song %s", self.selected_song().index, "INFO")
if self.selected_song().index <= self.current_song_index:
return False
self.start_song(self.selected_song().index)
def on_volume_change_event(self, volumebutton, value):
# Use a cubic scale for volume. This matches what PulseAudio uses.
volume = math.pow(value, 3)
self.player.set_property("volume", volume)
self.preferences['volume'] = volume
def station_properties(self, *ignore):
openBrowser(self.current_station.info_url)
#def report_bug(self, *ignore):
# openBrowser("https://bugs.launchpad.net/pithos")
def stations_dialog(self, *ignore):
if self.stations_dlg:
self.stations_dlg.present()
else:
self.stations_dlg = StationsDialog.NewStationsDialog(self)
self.stations_dlg.show_all()
def refresh_stations(self, *ignore):
self.worker_run(self.pandora.get_stations, (), self.process_stations, "Refreshing stations...")
def on_destroy(self, widget, data=None):
"""on_destroy - called when the PithosWindow is close. """
self.stop()
self.beaglebone.blueOff()
self.preferences['last_station_id'] = self.current_station_id
self.prefs_dlg.save()
gtk.main_quit()
try:
import Adafruit_BBIO.ADC as adc
import Adafruit_BBIO.GPIO as gpio
beaglebone = True
except:
beaglebone = False
import threading
from random import choice
class Beaglebone(Pithos):
def __init__(self, pithos, radiologger, player):
self.volumePot = "AIN5"
self.stationPot = "AIN3"
self.radiologger = radiologger
self.player = player
self.common = "P8_10"
self.red = "P8_12"
self.green = "P8_14"
self.blue = "P8_16"
self.pithos = pithos
if beaglebone:
adc.setup()
self.radioPowerAndVolume()
gpio.setup(self.common, gpio.OUT)
gpio.setup(self.red, gpio.OUT)
gpio.setup(self.green, gpio.OUT)
gpio.setup(self.blue, gpio.OUT)
gpio.output(self.common, gpio.LOW)
gpio.output(self.red, gpio.LOW)
gpio.output(self.green, gpio.LOW)
gpio.output(self.blue, gpio.LOW)
def radioPowerAndVolume(self):
def getVolumeAndStationValue():
prevStation = 0
while True:
sample = 0
volReading = 0
statReading = 0
while sample < 10:
volReading += adc.read(self.volumePot)
time.sleep(0.01)
statReading += adc.read(self.stationPot)
sample += 1
time.sleep(0.05)
volReading = volReading / 10.0
statReading = statReading * 100
currStation = statReading
#print statReading, currStation
if currStation > prevStation + 2 or currStation < prevStation - 2:
#print prevStation, currStation
if self.pithos.have_stations:
stationIds = []
stations = self.pithos.getStations()
for myStation in stations:
try:
stationIds.append(myStation[0].id)
except:
#probably just the seperator
pass
newStation = choice(stationIds)
print newStation, type(newStation)
self.pithos.station_changed(newStation)
prevStation = currStation
volume = volReading
volString = "%.2f" % round(volume, 2)
previousVolume = self.player.get_property('volume')
prevVolString = "%.2f" % round(previousVolume, 2)
if volString != prevVolString:
# print previousVolume, volume
self.player.set_property('volume', volume)
thread = threading.Thread(target=getVolumeAndStationValue, args=())
thread.start()
def redOn(self):
if beaglebone:
gpio.output(self.red, gpio.HIGH)
def redOff(self):
if beaglebone:
gpio.output(self.red, gpio.LOW)
def greenOn(self):
if beaglebone:
gpio.output(self.green, gpio.HIGH)
def greenOff(self):
if beaglebone:
gpio.output(self.green, gpio.LOW)
def blueOn(self):
if beaglebone:
gpio.output(self.blue, gpio.HIGH)
def blueOff(self):
if beaglebone:
gpio.output(self.blue, gpio.LOW)
|
|
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Backup code.
"""
import json
from xml.dom import minidom
import mock
from oslo_utils import timeutils
import webob
# needed for stubs to work
import cinder.backup
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder import test
from cinder.tests.api import fakes
from cinder.tests import utils
# needed for stubs to work
import cinder.volume
LOG = logging.getLogger(__name__)
class BackupsAPITestCase(test.TestCase):
"""Test Case for backups API."""
def setUp(self):
super(BackupsAPITestCase, self).setUp()
self.volume_api = cinder.volume.API()
self.backup_api = cinder.backup.API()
self.context = context.get_admin_context()
self.context.project_id = 'fake'
self.context.user_id = 'fake'
@staticmethod
def _create_backup(volume_id=1,
display_name='test_backup',
display_description='this is a test backup',
container='volumebackups',
status='creating',
snapshot=False,
incremental=False,
parent_id=None,
size=0, object_count=0, host='testhost'):
"""Create a backup object."""
backup = {}
backup['volume_id'] = volume_id
backup['user_id'] = 'fake'
backup['project_id'] = 'fake'
backup['host'] = host
backup['availability_zone'] = 'az1'
backup['display_name'] = display_name
backup['display_description'] = display_description
backup['container'] = container
backup['status'] = status
backup['fail_reason'] = ''
backup['size'] = size
backup['object_count'] = object_count
backup['snapshot'] = snapshot
backup['incremental'] = incremental
backup['parent_id'] = parent_id
return db.backup_create(context.get_admin_context(), backup)['id']
@staticmethod
def _get_backup_attrib(backup_id, attrib_name):
return db.backup_get(context.get_admin_context(),
backup_id)[attrib_name]
def test_show_backup(self):
volume_id = utils.create_volume(self.context, size=5,
status='creating')['id']
backup_id = self._create_backup(volume_id)
LOG.debug('Created backup with id %s' % backup_id)
req = webob.Request.blank('/v2/fake/backups/%s' %
backup_id)
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual(res_dict['backup']['availability_zone'], 'az1')
self.assertEqual(res_dict['backup']['container'], 'volumebackups')
self.assertEqual(res_dict['backup']['description'],
'this is a test backup')
self.assertEqual(res_dict['backup']['name'], 'test_backup')
self.assertEqual(res_dict['backup']['id'], backup_id)
self.assertEqual(res_dict['backup']['object_count'], 0)
self.assertEqual(res_dict['backup']['size'], 0)
self.assertEqual(res_dict['backup']['status'], 'creating')
self.assertEqual(res_dict['backup']['volume_id'], volume_id)
db.backup_destroy(context.get_admin_context(), backup_id)
db.volume_destroy(context.get_admin_context(), volume_id)
def test_show_backup_xml_content_type(self):
volume_id = utils.create_volume(self.context, size=5,
status='creating')['id']
backup_id = self._create_backup(volume_id)
req = webob.Request.blank('/v2/fake/backups/%s' % backup_id)
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
dom = minidom.parseString(res.body)
backup = dom.getElementsByTagName('backup')
name = backup.item(0).getAttribute('name')
container_name = backup.item(0).getAttribute('container')
self.assertEqual(container_name.strip(), "volumebackups")
self.assertEqual(name.strip(), "test_backup")
db.backup_destroy(context.get_admin_context(), backup_id)
db.volume_destroy(context.get_admin_context(), volume_id)
def test_show_backup_with_backup_NotFound(self):
req = webob.Request.blank('/v2/fake/backups/9999')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 404)
self.assertEqual(res_dict['itemNotFound']['code'], 404)
self.assertEqual(res_dict['itemNotFound']['message'],
'Backup 9999 could not be found.')
def test_list_backups_json(self):
backup_id1 = self._create_backup()
backup_id2 = self._create_backup()
backup_id3 = self._create_backup()
req = webob.Request.blank('/v2/fake/backups')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual(len(res_dict['backups'][0]), 3)
self.assertEqual(res_dict['backups'][0]['id'], backup_id1)
self.assertEqual(res_dict['backups'][0]['name'], 'test_backup')
self.assertEqual(len(res_dict['backups'][1]), 3)
self.assertEqual(res_dict['backups'][1]['id'], backup_id2)
self.assertEqual(res_dict['backups'][1]['name'], 'test_backup')
self.assertEqual(len(res_dict['backups'][2]), 3)
self.assertEqual(res_dict['backups'][2]['id'], backup_id3)
self.assertEqual(res_dict['backups'][2]['name'], 'test_backup')
db.backup_destroy(context.get_admin_context(), backup_id3)
db.backup_destroy(context.get_admin_context(), backup_id2)
db.backup_destroy(context.get_admin_context(), backup_id1)
def test_list_backups_xml(self):
backup_id1 = self._create_backup()
backup_id2 = self._create_backup()
backup_id3 = self._create_backup()
req = webob.Request.blank('/v2/fake/backups')
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
dom = minidom.parseString(res.body)
backup_list = dom.getElementsByTagName('backup')
self.assertEqual(backup_list.item(0).attributes.length, 2)
self.assertEqual(backup_list.item(0).getAttribute('id'),
backup_id1)
self.assertEqual(backup_list.item(1).attributes.length, 2)
self.assertEqual(backup_list.item(1).getAttribute('id'),
backup_id2)
self.assertEqual(backup_list.item(2).attributes.length, 2)
self.assertEqual(backup_list.item(2).getAttribute('id'),
backup_id3)
db.backup_destroy(context.get_admin_context(), backup_id3)
db.backup_destroy(context.get_admin_context(), backup_id2)
db.backup_destroy(context.get_admin_context(), backup_id1)
def test_list_backups_detail_json(self):
backup_id1 = self._create_backup()
backup_id2 = self._create_backup()
backup_id3 = self._create_backup()
req = webob.Request.blank('/v2/fake/backups/detail')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual(len(res_dict['backups'][0]), 12)
self.assertEqual(res_dict['backups'][0]['availability_zone'], 'az1')
self.assertEqual(res_dict['backups'][0]['container'],
'volumebackups')
self.assertEqual(res_dict['backups'][0]['description'],
'this is a test backup')
self.assertEqual(res_dict['backups'][0]['name'],
'test_backup')
self.assertEqual(res_dict['backups'][0]['id'], backup_id1)
self.assertEqual(res_dict['backups'][0]['object_count'], 0)
self.assertEqual(res_dict['backups'][0]['size'], 0)
self.assertEqual(res_dict['backups'][0]['status'], 'creating')
self.assertEqual(res_dict['backups'][0]['volume_id'], '1')
self.assertEqual(len(res_dict['backups'][1]), 12)
self.assertEqual(res_dict['backups'][1]['availability_zone'], 'az1')
self.assertEqual(res_dict['backups'][1]['container'],
'volumebackups')
self.assertEqual(res_dict['backups'][1]['description'],
'this is a test backup')
self.assertEqual(res_dict['backups'][1]['name'],
'test_backup')
self.assertEqual(res_dict['backups'][1]['id'], backup_id2)
self.assertEqual(res_dict['backups'][1]['object_count'], 0)
self.assertEqual(res_dict['backups'][1]['size'], 0)
self.assertEqual(res_dict['backups'][1]['status'], 'creating')
self.assertEqual(res_dict['backups'][1]['volume_id'], '1')
self.assertEqual(len(res_dict['backups'][2]), 12)
self.assertEqual(res_dict['backups'][2]['availability_zone'], 'az1')
self.assertEqual(res_dict['backups'][2]['container'],
'volumebackups')
self.assertEqual(res_dict['backups'][2]['description'],
'this is a test backup')
self.assertEqual(res_dict['backups'][2]['name'],
'test_backup')
self.assertEqual(res_dict['backups'][2]['id'], backup_id3)
self.assertEqual(res_dict['backups'][2]['object_count'], 0)
self.assertEqual(res_dict['backups'][2]['size'], 0)
self.assertEqual(res_dict['backups'][2]['status'], 'creating')
self.assertEqual(res_dict['backups'][2]['volume_id'], '1')
db.backup_destroy(context.get_admin_context(), backup_id3)
db.backup_destroy(context.get_admin_context(), backup_id2)
db.backup_destroy(context.get_admin_context(), backup_id1)
def test_list_backups_detail_using_filters(self):
backup_id1 = self._create_backup(display_name='test2')
backup_id2 = self._create_backup(status='available')
backup_id3 = self._create_backup(volume_id=4321)
req = webob.Request.blank('/v2/fake/backups/detail?name=test2')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(len(res_dict['backups']), 1)
self.assertEqual(res.status_int, 200)
self.assertEqual(res_dict['backups'][0]['id'], backup_id1)
req = webob.Request.blank('/v2/fake/backups/detail?status=available')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(len(res_dict['backups']), 1)
self.assertEqual(res.status_int, 200)
self.assertEqual(res_dict['backups'][0]['id'], backup_id2)
req = webob.Request.blank('/v2/fake/backups/detail?volume_id=4321')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(len(res_dict['backups']), 1)
self.assertEqual(res.status_int, 200)
self.assertEqual(res_dict['backups'][0]['id'], backup_id3)
db.backup_destroy(context.get_admin_context(), backup_id3)
db.backup_destroy(context.get_admin_context(), backup_id2)
db.backup_destroy(context.get_admin_context(), backup_id1)
def test_list_backups_detail_xml(self):
backup_id1 = self._create_backup()
backup_id2 = self._create_backup()
backup_id3 = self._create_backup()
req = webob.Request.blank('/v2/fake/backups/detail')
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
dom = minidom.parseString(res.body)
backup_detail = dom.getElementsByTagName('backup')
self.assertEqual(backup_detail.item(0).attributes.length, 11)
self.assertEqual(
backup_detail.item(0).getAttribute('availability_zone'), 'az1')
self.assertEqual(
backup_detail.item(0).getAttribute('container'), 'volumebackups')
self.assertEqual(
backup_detail.item(0).getAttribute('description'),
'this is a test backup')
self.assertEqual(
backup_detail.item(0).getAttribute('name'), 'test_backup')
self.assertEqual(
backup_detail.item(0).getAttribute('id'), backup_id1)
self.assertEqual(
int(backup_detail.item(0).getAttribute('object_count')), 0)
self.assertEqual(
int(backup_detail.item(0).getAttribute('size')), 0)
self.assertEqual(
backup_detail.item(0).getAttribute('status'), 'creating')
self.assertEqual(
int(backup_detail.item(0).getAttribute('volume_id')), 1)
self.assertEqual(backup_detail.item(1).attributes.length, 11)
self.assertEqual(
backup_detail.item(1).getAttribute('availability_zone'), 'az1')
self.assertEqual(
backup_detail.item(1).getAttribute('container'), 'volumebackups')
self.assertEqual(
backup_detail.item(1).getAttribute('description'),
'this is a test backup')
self.assertEqual(
backup_detail.item(1).getAttribute('name'), 'test_backup')
self.assertEqual(
backup_detail.item(1).getAttribute('id'), backup_id2)
self.assertEqual(
int(backup_detail.item(1).getAttribute('object_count')), 0)
self.assertEqual(
int(backup_detail.item(1).getAttribute('size')), 0)
self.assertEqual(
backup_detail.item(1).getAttribute('status'), 'creating')
self.assertEqual(
int(backup_detail.item(1).getAttribute('volume_id')), 1)
self.assertEqual(backup_detail.item(2).attributes.length, 11)
self.assertEqual(
backup_detail.item(2).getAttribute('availability_zone'), 'az1')
self.assertEqual(
backup_detail.item(2).getAttribute('container'), 'volumebackups')
self.assertEqual(
backup_detail.item(2).getAttribute('description'),
'this is a test backup')
self.assertEqual(
backup_detail.item(2).getAttribute('name'), 'test_backup')
self.assertEqual(
backup_detail.item(2).getAttribute('id'), backup_id3)
self.assertEqual(
int(backup_detail.item(2).getAttribute('object_count')), 0)
self.assertEqual(
int(backup_detail.item(2).getAttribute('size')), 0)
self.assertEqual(
backup_detail.item(2).getAttribute('status'), 'creating')
self.assertEqual(
int(backup_detail.item(2).getAttribute('volume_id')), 1)
db.backup_destroy(context.get_admin_context(), backup_id3)
db.backup_destroy(context.get_admin_context(), backup_id2)
db.backup_destroy(context.get_admin_context(), backup_id1)
@mock.patch('cinder.db.service_get_all_by_topic')
def test_create_backup_json(self, _mock_service_get_all_by_topic):
_mock_service_get_all_by_topic.return_value = [
{'availability_zone': "fake_az", 'host': 'test_host',
'disabled': 0, 'updated_at': timeutils.utcnow()}]
volume_id = utils.create_volume(self.context, size=5)['id']
body = {"backup": {"display_name": "nightly001",
"display_description":
"Nightly Backup 03-Sep-2012",
"volume_id": volume_id,
"container": "nightlybackups",
}
}
req = webob.Request.blank('/v2/fake/backups')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
LOG.info(res_dict)
self.assertEqual(res.status_int, 202)
self.assertIn('id', res_dict['backup'])
self.assertTrue(_mock_service_get_all_by_topic.called)
db.volume_destroy(context.get_admin_context(), volume_id)
@mock.patch('cinder.db.service_get_all_by_topic')
def test_create_backup_snapshot_json(self, _mock_service_get_all_by_topic):
_mock_service_get_all_by_topic.return_value = [
{'availability_zone': "fake_az", 'host': 'test_host',
'disabled': 0, 'updated_at': timeutils.utcnow()}]
volume_id = utils.create_volume(self.context, size=5,
status='available')['id']
body = {"backup": {"display_name": "nightly001",
"display_description":
"Nightly Backup 03-Sep-2012",
"volume_id": volume_id,
"container": "nightlybackups",
}
}
req = webob.Request.blank('/v2/fake/backups')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
LOG.info(res_dict)
self.assertEqual(res.status_int, 202)
self.assertIn('id', res_dict['backup'])
self.assertTrue(_mock_service_get_all_by_topic.called)
db.volume_destroy(context.get_admin_context(), volume_id)
@mock.patch('cinder.db.service_get_all_by_topic')
def test_create_backup_xml(self, _mock_service_get_all_by_topic):
_mock_service_get_all_by_topic.return_value = [
{'availability_zone': "fake_az", 'host': 'test_host',
'disabled': 0, 'updated_at': timeutils.utcnow()}]
volume_id = utils.create_volume(self.context, size=2)['id']
req = webob.Request.blank('/v2/fake/backups')
req.body = ('<backup display_name="backup-001" '
'display_description="Nightly Backup" '
'volume_id="%s" container="Container001"/>' % volume_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
dom = minidom.parseString(res.body)
backup = dom.getElementsByTagName('backup')
self.assertTrue(backup.item(0).hasAttribute('id'))
self.assertTrue(_mock_service_get_all_by_topic.called)
db.volume_destroy(context.get_admin_context(), volume_id)
@mock.patch('cinder.db.service_get_all_by_topic')
def test_create_backup_delta(self, _mock_service_get_all_by_topic):
_mock_service_get_all_by_topic.return_value = [
{'availability_zone': "fake_az", 'host': 'test_host',
'disabled': 0, 'updated_at': timeutils.utcnow()}]
volume_id = utils.create_volume(self.context, size=5)['id']
backup_id = self._create_backup(volume_id, status="available")
body = {"backup": {"display_name": "nightly001",
"display_description":
"Nightly Backup 03-Sep-2012",
"volume_id": volume_id,
"container": "nightlybackups",
"incremental": True,
}
}
req = webob.Request.blank('/v2/fake/backups')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
LOG.info(res_dict)
self.assertEqual(202, res.status_int)
self.assertIn('id', res_dict['backup'])
self.assertTrue(_mock_service_get_all_by_topic.called)
db.backup_destroy(context.get_admin_context(), backup_id)
db.volume_destroy(context.get_admin_context(), volume_id)
@mock.patch('cinder.db.service_get_all_by_topic')
def test_create_incremental_backup_invalid_status(
self, _mock_service_get_all_by_topic):
_mock_service_get_all_by_topic.return_value = [
{'availability_zone': "fake_az", 'host': 'test_host',
'disabled': 0, 'updated_at': timeutils.utcnow()}]
volume_id = utils.create_volume(self.context, size=5)['id']
backup_id = self._create_backup(volume_id)
body = {"backup": {"display_name": "nightly001",
"display_description":
"Nightly Backup 03-Sep-2012",
"volume_id": volume_id,
"container": "nightlybackups",
"incremental": True,
}
}
req = webob.Request.blank('/v2/fake/backups')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
LOG.info(res_dict)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertEqual('Invalid backup: The parent backup must be '
'available for incremental backup.',
res_dict['badRequest']['message'])
db.backup_destroy(context.get_admin_context(), backup_id)
db.volume_destroy(context.get_admin_context(), volume_id)
def test_create_backup_with_no_body(self):
# omit body from the request
req = webob.Request.blank('/v2/fake/backups')
req.body = json.dumps(None)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'The server could not comply with the request since'
' it is either malformed or otherwise incorrect.')
def test_create_backup_with_body_KeyError(self):
# omit volume_id from body
body = {"backup": {"display_name": "nightly001",
"display_description":
"Nightly Backup 03-Sep-2012",
"container": "nightlybackups",
}
}
req = webob.Request.blank('/v2/fake/backups')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Incorrect request body format')
def test_create_backup_with_VolumeNotFound(self):
body = {"backup": {"display_name": "nightly001",
"display_description":
"Nightly Backup 03-Sep-2012",
"volume_id": 9999,
"container": "nightlybackups",
}
}
req = webob.Request.blank('/v2/fake/backups')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 404)
self.assertEqual(res_dict['itemNotFound']['code'], 404)
self.assertEqual(res_dict['itemNotFound']['message'],
'Volume 9999 could not be found.')
def test_create_backup_with_InvalidVolume(self):
# need to create the volume referenced below first
volume_id = utils.create_volume(self.context, size=5,
status='restoring')['id']
body = {"backup": {"display_name": "nightly001",
"display_description":
"Nightly Backup 03-Sep-2012",
"volume_id": volume_id,
"container": "nightlybackups",
}
}
req = webob.Request.blank('/v2/fake/backups')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Invalid volume: Volume to be backed up must'
' be available')
def test_create_backup_with_InvalidVolume2(self):
# need to create the volume referenced below first
volume_id = utils.create_volume(self.context, size=5,
status='in-use')['id']
body = {"backup": {"display_name": "nightly001",
"display_description":
"Nightly Backup 03-Sep-2012",
"volume_id": volume_id,
"container": "nightlybackups",
}
}
req = webob.Request.blank('/v2/fake/backups')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Invalid volume: Volume to be backed up must'
' be available')
@mock.patch('cinder.db.service_get_all_by_topic')
def test_create_backup_WithOUT_enabled_backup_service(
self,
_mock_service_get_all_by_topic):
# need an enabled backup service available
_mock_service_get_all_by_topic.return_value = []
volume_id = utils.create_volume(self.context, size=2)['id']
req = webob.Request.blank('/v2/fake/backups')
body = {"backup": {"display_name": "nightly001",
"display_description":
"Nightly Backup 03-Sep-2012",
"volume_id": volume_id,
"container": "nightlybackups",
}
}
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 500)
self.assertEqual(res_dict['computeFault']['code'], 500)
self.assertEqual(res_dict['computeFault']['message'],
'Service cinder-backup could not be found.')
volume = self.volume_api.get(context.get_admin_context(), volume_id)
self.assertEqual(volume['status'], 'available')
@mock.patch('cinder.db.service_get_all_by_topic')
def test_create_incremental_backup_invalid_no_full(
self, _mock_service_get_all_by_topic):
_mock_service_get_all_by_topic.return_value = [
{'availability_zone': "fake_az", 'host': 'test_host',
'disabled': 0, 'updated_at': timeutils.utcnow()}]
volume_id = utils.create_volume(self.context, size=5,
status='available')['id']
body = {"backup": {"display_name": "nightly001",
"display_description":
"Nightly Backup 03-Sep-2012",
"volume_id": volume_id,
"container": "nightlybackups",
"incremental": True,
}
}
req = webob.Request.blank('/v2/fake/backups')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
LOG.info(res_dict)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertEqual('Invalid backup: No backups available to do '
'an incremental backup.',
res_dict['badRequest']['message'])
db.volume_destroy(context.get_admin_context(), volume_id)
@mock.patch('cinder.db.service_get_all_by_topic')
def test_is_backup_service_enabled(self, _mock_service_get_all_by_topic):
test_host = 'test_host'
alt_host = 'strange_host'
empty_service = []
# service host not match with volume's host
host_not_match = [{'availability_zone': "fake_az", 'host': alt_host,
'disabled': 0, 'updated_at': timeutils.utcnow()}]
# service az not match with volume's az
az_not_match = [{'availability_zone': "strange_az", 'host': test_host,
'disabled': 0, 'updated_at': timeutils.utcnow()}]
# service disabled
disabled_service = []
# dead service that last reported at 20th century
dead_service = [{'availability_zone': "fake_az", 'host': alt_host,
'disabled': 0, 'updated_at': '1989-04-16 02:55:44'}]
# first service's host not match but second one works.
multi_services = [{'availability_zone': "fake_az", 'host': alt_host,
'disabled': 0, 'updated_at': timeutils.utcnow()},
{'availability_zone': "fake_az", 'host': test_host,
'disabled': 0, 'updated_at': timeutils.utcnow()}]
# Setup mock to run through the following service cases
_mock_service_get_all_by_topic.side_effect = [empty_service,
host_not_match,
az_not_match,
disabled_service,
dead_service,
multi_services]
volume_id = utils.create_volume(self.context, size=2,
host=test_host)['id']
volume = self.volume_api.get(context.get_admin_context(), volume_id)
# test empty service
self.assertEqual(self.backup_api._is_backup_service_enabled(volume,
test_host),
False)
# test host not match service
self.assertEqual(self.backup_api._is_backup_service_enabled(volume,
test_host),
False)
# test az not match service
self.assertEqual(self.backup_api._is_backup_service_enabled(volume,
test_host),
False)
# test disabled service
self.assertEqual(self.backup_api._is_backup_service_enabled(volume,
test_host),
False)
# test dead service
self.assertEqual(self.backup_api._is_backup_service_enabled(volume,
test_host),
False)
# test multi services and the last service matches
self.assertEqual(self.backup_api._is_backup_service_enabled(volume,
test_host),
True)
def test_delete_backup_available(self):
backup_id = self._create_backup(status='available')
req = webob.Request.blank('/v2/fake/backups/%s' %
backup_id)
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
self.assertEqual(self._get_backup_attrib(backup_id, 'status'),
'deleting')
db.backup_destroy(context.get_admin_context(), backup_id)
def test_delete_delta_backup(self):
backup_id = self._create_backup(status='available')
delta_id = self._create_backup(status='available',
incremental=True)
req = webob.Request.blank('/v2/fake/backups/%s' %
delta_id)
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(202, res.status_int)
self.assertEqual('deleting',
self._get_backup_attrib(delta_id, 'status'))
db.backup_destroy(context.get_admin_context(), delta_id)
db.backup_destroy(context.get_admin_context(), backup_id)
def test_delete_backup_error(self):
backup_id = self._create_backup(status='error')
req = webob.Request.blank('/v2/fake/backups/%s' %
backup_id)
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
self.assertEqual(self._get_backup_attrib(backup_id, 'status'),
'deleting')
db.backup_destroy(context.get_admin_context(), backup_id)
def test_delete_backup_with_backup_NotFound(self):
req = webob.Request.blank('/v2/fake/backups/9999')
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 404)
self.assertEqual(res_dict['itemNotFound']['code'], 404)
self.assertEqual(res_dict['itemNotFound']['message'],
'Backup 9999 could not be found.')
def test_delete_backup_with_InvalidBackup(self):
backup_id = self._create_backup()
req = webob.Request.blank('/v2/fake/backups/%s' %
backup_id)
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Invalid backup: Backup status must be '
'available or error')
db.backup_destroy(context.get_admin_context(), backup_id)
def test_delete_backup_with_InvalidBackup2(self):
volume_id = utils.create_volume(self.context, size=5)['id']
backup_id = self._create_backup(volume_id, status="available")
delta_backup_id = self._create_backup(status='available',
incremental=True,
parent_id=backup_id)
req = webob.Request.blank('/v2/fake/backups/%s' %
backup_id)
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertEqual('Invalid backup: Incremental backups '
'exist for this backup.',
res_dict['badRequest']['message'])
db.backup_destroy(context.get_admin_context(), delta_backup_id)
db.backup_destroy(context.get_admin_context(), backup_id)
def test_restore_backup_volume_id_specified_json(self):
backup_id = self._create_backup(status='available')
# need to create the volume referenced below first
volume_id = utils.create_volume(self.context, size=5)['id']
body = {"restore": {"volume_id": volume_id, }}
req = webob.Request.blank('/v2/fake/backups/%s/restore' %
backup_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 202)
self.assertEqual(res_dict['restore']['backup_id'], backup_id)
self.assertEqual(res_dict['restore']['volume_id'], volume_id)
def test_restore_backup_volume_id_specified_xml(self):
backup_id = self._create_backup(status='available')
volume_id = utils.create_volume(self.context, size=2)['id']
req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id)
req.body = '<restore volume_id="%s"/>' % volume_id
req.method = 'POST'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
dom = minidom.parseString(res.body)
restore = dom.getElementsByTagName('restore')
self.assertEqual(restore.item(0).getAttribute('backup_id'),
backup_id)
self.assertEqual(restore.item(0).getAttribute('volume_id'), volume_id)
db.backup_destroy(context.get_admin_context(), backup_id)
db.volume_destroy(context.get_admin_context(), volume_id)
def test_restore_backup_with_no_body(self):
# omit body from the request
backup_id = self._create_backup(status='available')
req = webob.Request.blank('/v2/fake/backups/%s/restore' %
backup_id)
req.body = json.dumps(None)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Incorrect request body format')
db.backup_destroy(context.get_admin_context(), backup_id)
def test_restore_backup_with_body_KeyError(self):
# omit restore from body
backup_id = self._create_backup(status='available')
req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id)
body = {"": {}}
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Incorrect request body format')
@mock.patch('cinder.volume.API.create')
def test_restore_backup_volume_id_unspecified(self,
_mock_volume_api_create):
# intercept volume creation to ensure created volume
# has status of available
def fake_volume_api_create(context, size, name, description):
volume_id = utils.create_volume(self.context, size=size)['id']
return db.volume_get(context, volume_id)
_mock_volume_api_create.side_effect = fake_volume_api_create
backup_id = self._create_backup(size=5, status='available')
body = {"restore": {}}
req = webob.Request.blank('/v2/fake/backups/%s/restore' %
backup_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 202)
self.assertEqual(res_dict['restore']['backup_id'], backup_id)
@mock.patch('cinder.backup.API.restore')
def test_restore_backup_with_InvalidInput(self,
_mock_volume_api_restore):
msg = _("Invalid input")
_mock_volume_api_restore.side_effect = \
exception.InvalidInput(reason=msg)
backup_id = self._create_backup(status='available')
# need to create the volume referenced below first
volume_id = utils.create_volume(self.context, size=0)['id']
body = {"restore": {"volume_id": volume_id, }}
req = webob.Request.blank('/v2/fake/backups/%s/restore' %
backup_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Invalid input received: Invalid input')
def test_restore_backup_with_InvalidVolume(self):
backup_id = self._create_backup(status='available')
# need to create the volume referenced below first
volume_id = utils.create_volume(self.context, size=5,
status='attaching')['id']
body = {"restore": {"volume_id": volume_id, }}
req = webob.Request.blank('/v2/fake/backups/%s/restore' %
backup_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Invalid volume: Volume to be restored to must '
'be available')
db.volume_destroy(context.get_admin_context(), volume_id)
db.backup_destroy(context.get_admin_context(), backup_id)
def test_restore_backup_with_InvalidBackup(self):
backup_id = self._create_backup(status='restoring')
# need to create the volume referenced below first
volume_id = utils.create_volume(self.context, size=5)['id']
body = {"restore": {"volume_id": volume_id, }}
req = webob.Request.blank('/v2/fake/backups/%s/restore' %
backup_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Invalid backup: Backup status must be available')
db.volume_destroy(context.get_admin_context(), volume_id)
db.backup_destroy(context.get_admin_context(), backup_id)
def test_restore_backup_with_BackupNotFound(self):
# need to create the volume referenced below first
volume_id = utils.create_volume(self.context, size=5)['id']
body = {"restore": {"volume_id": volume_id, }}
req = webob.Request.blank('/v2/fake/backups/9999/restore')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 404)
self.assertEqual(res_dict['itemNotFound']['code'], 404)
self.assertEqual(res_dict['itemNotFound']['message'],
'Backup 9999 could not be found.')
db.volume_destroy(context.get_admin_context(), volume_id)
def test_restore_backup_with_VolumeNotFound(self):
backup_id = self._create_backup(status='available')
body = {"restore": {"volume_id": "9999", }}
req = webob.Request.blank('/v2/fake/backups/%s/restore' %
backup_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 404)
self.assertEqual(res_dict['itemNotFound']['code'], 404)
self.assertEqual(res_dict['itemNotFound']['message'],
'Volume 9999 could not be found.')
db.backup_destroy(context.get_admin_context(), backup_id)
@mock.patch('cinder.backup.API.restore')
def test_restore_backup_with_VolumeSizeExceedsAvailableQuota(
self,
_mock_backup_restore):
_mock_backup_restore.side_effect = \
exception.VolumeSizeExceedsAvailableQuota(requested='2',
consumed='2',
quota='3')
backup_id = self._create_backup(status='available')
# need to create the volume referenced below first
volume_id = utils.create_volume(self.context, size=5)['id']
body = {"restore": {"volume_id": volume_id, }}
req = webob.Request.blank('/v2/fake/backups/%s/restore' %
backup_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 413)
self.assertEqual(res_dict['overLimit']['code'], 413)
self.assertEqual(res_dict['overLimit']['message'],
'Requested volume or snapshot exceeds allowed '
'gigabytes quota. Requested 2G, quota is 3G and '
'2G has been consumed.')
@mock.patch('cinder.backup.API.restore')
def test_restore_backup_with_VolumeLimitExceeded(self,
_mock_backup_restore):
_mock_backup_restore.side_effect = \
exception.VolumeLimitExceeded(allowed=1)
backup_id = self._create_backup(status='available')
# need to create the volume referenced below first
volume_id = utils.create_volume(self.context, size=5)['id']
body = {"restore": {"volume_id": volume_id, }}
req = webob.Request.blank('/v2/fake/backups/%s/restore' %
backup_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 413)
self.assertEqual(res_dict['overLimit']['code'], 413)
self.assertEqual(res_dict['overLimit']['message'],
'Maximum number of volumes allowed (1) exceeded')
def test_restore_backup_to_undersized_volume(self):
backup_size = 10
backup_id = self._create_backup(status='available', size=backup_size)
# need to create the volume referenced below first
volume_size = 5
volume_id = utils.create_volume(self.context, size=volume_size)['id']
body = {"restore": {"volume_id": volume_id, }}
req = webob.Request.blank('/v2/fake/backups/%s/restore' %
backup_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Invalid volume: volume size %d is too '
'small to restore backup of size %d.'
% (volume_size, backup_size))
db.volume_destroy(context.get_admin_context(), volume_id)
db.backup_destroy(context.get_admin_context(), backup_id)
def test_restore_backup_to_oversized_volume(self):
backup_id = self._create_backup(status='available', size=10)
# need to create the volume referenced below first
volume_id = utils.create_volume(self.context, size=15)['id']
body = {"restore": {"volume_id": volume_id, }}
req = webob.Request.blank('/v2/fake/backups/%s/restore' %
backup_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 202)
self.assertEqual(res_dict['restore']['backup_id'], backup_id)
self.assertEqual(res_dict['restore']['volume_id'], volume_id)
db.volume_destroy(context.get_admin_context(), volume_id)
db.backup_destroy(context.get_admin_context(), backup_id)
@mock.patch('cinder.backup.rpcapi.BackupAPI.restore_backup')
def test_restore_backup_with_different_host(self, mock_restore_backup):
backup_id = self._create_backup(status='available', size=10,
host='HostA@BackendB#PoolA')
volume_id = utils.create_volume(self.context, size=10,
host='HostB@BackendB#PoolB')['id']
body = {"restore": {"volume_id": volume_id, }}
req = webob.Request.blank('/v2/fake/backups/%s/restore' %
backup_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 202)
self.assertEqual(res_dict['restore']['backup_id'], backup_id)
self.assertEqual(res_dict['restore']['volume_id'], volume_id)
mock_restore_backup.assert_called_once_with(mock.ANY,
'HostB',
backup_id,
volume_id)
db.volume_destroy(context.get_admin_context(), volume_id)
db.backup_destroy(context.get_admin_context(), backup_id)
def test_export_record_as_non_admin(self):
backup_id = self._create_backup(status='available', size=10)
req = webob.Request.blank('/v2/fake/backups/%s/export_record' %
backup_id)
req.method = 'GET'
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
# request is not authorized
self.assertEqual(res.status_int, 403)
@mock.patch('cinder.backup.rpcapi.BackupAPI.export_record')
def test_export_backup_record_id_specified_json(self,
_mock_export_record_rpc):
backup_id = self._create_backup(status='available', size=10)
ctx = context.RequestContext('admin', 'fake', is_admin=True)
backup_service = 'fake'
backup_url = 'fake'
_mock_export_record_rpc.return_value = \
{'backup_service': backup_service,
'backup_url': backup_url}
req = webob.Request.blank('/v2/fake/backups/%s/export_record' %
backup_id)
req.method = 'GET'
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
res_dict = json.loads(res.body)
# verify that request is successful
self.assertEqual(res.status_int, 200)
self.assertEqual(res_dict['backup-record']['backup_service'],
backup_service)
self.assertEqual(res_dict['backup-record']['backup_url'],
backup_url)
db.backup_destroy(context.get_admin_context(), backup_id)
@mock.patch('cinder.backup.rpcapi.BackupAPI.export_record')
def test_export_record_backup_id_specified_xml(self,
_mock_export_record_rpc):
backup_id = self._create_backup(status='available', size=10)
ctx = context.RequestContext('admin', 'fake', is_admin=True)
backup_service = 'fake'
backup_url = 'fake'
_mock_export_record_rpc.return_value = \
{'backup_service': backup_service,
'backup_url': backup_url}
req = webob.Request.blank('/v2/fake/backups/%s/export_record' %
backup_id)
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
self.assertEqual(res.status_int, 200)
dom = minidom.parseString(res.body)
export = dom.getElementsByTagName('backup-record')
self.assertEqual(export.item(0).getAttribute('backup_service'),
backup_service)
self.assertEqual(export.item(0).getAttribute('backup_url'),
backup_url)
# db.backup_destroy(context.get_admin_context(), backup_id)
def test_export_record_with_bad_backup_id(self):
ctx = context.RequestContext('admin', 'fake', is_admin=True)
backup_id = 'bad_id'
req = webob.Request.blank('/v2/fake/backups/%s/export_record' %
backup_id)
req.method = 'GET'
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 404)
self.assertEqual(res_dict['itemNotFound']['code'], 404)
self.assertEqual(res_dict['itemNotFound']['message'],
'Backup %s could not be found.' % backup_id)
def test_export_record_for_unavailable_backup(self):
backup_id = self._create_backup(status='restoring')
ctx = context.RequestContext('admin', 'fake', is_admin=True)
req = webob.Request.blank('/v2/fake/backups/%s/export_record' %
backup_id)
req.method = 'GET'
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Invalid backup: Backup status must be available '
'and not restoring.')
db.backup_destroy(context.get_admin_context(), backup_id)
@mock.patch('cinder.backup.rpcapi.BackupAPI.export_record')
def test_export_record_with_unavailable_service(self,
_mock_export_record_rpc):
msg = 'fake unavailable service'
_mock_export_record_rpc.side_effect = \
exception.InvalidBackup(reason=msg)
backup_id = self._create_backup(status='available')
ctx = context.RequestContext('admin', 'fake', is_admin=True)
req = webob.Request.blank('/v2/fake/backups/%s/export_record' %
backup_id)
req.method = 'GET'
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Invalid backup: %s' % msg)
db.backup_destroy(context.get_admin_context(), backup_id)
def test_import_record_as_non_admin(self):
backup_service = 'fake'
backup_url = 'fake'
req = webob.Request.blank('/v2/fake/backups/import_record')
body = {'backup-record': {'backup_service': backup_service,
'backup_url': backup_url}}
req.body = json.dumps(body)
req.method = 'POST'
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
# request is not authorized
self.assertEqual(res.status_int, 403)
@mock.patch('cinder.backup.api.API._list_backup_services')
@mock.patch('cinder.backup.rpcapi.BackupAPI.import_record')
def test_import_record_volume_id_specified_json(self,
_mock_import_record_rpc,
_mock_list_services):
ctx = context.RequestContext('admin', 'fake', is_admin=True)
backup_service = 'fake'
backup_url = 'fake'
_mock_import_record_rpc.return_value = \
{'display_name': 'fake',
'display_description': 'fake',
'container': 'fake',
'size': 1,
'service_metadata': 'fake',
'service': 'fake',
'object_count': 1,
'status': 'available',
'availability_zone': 'fake'}
_mock_list_services.return_value = ['fake']
req = webob.Request.blank('/v2/fake/backups/import_record')
body = {'backup-record': {'backup_service': backup_service,
'backup_url': backup_url}}
req.body = json.dumps(body)
req.method = 'POST'
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
res_dict = json.loads(res.body)
# verify that request is successful
self.assertEqual(res.status_int, 201)
self.assertTrue('id' in res_dict['backup'])
@mock.patch('cinder.backup.api.API._list_backup_services')
@mock.patch('cinder.backup.rpcapi.BackupAPI.import_record')
def test_import_record_volume_id_specified_xml(self,
_mock_import_record_rpc,
_mock_list_services):
ctx = context.RequestContext('admin', 'fake', is_admin=True)
backup_service = 'fake'
backup_url = 'fake'
_mock_import_record_rpc.return_value = \
{'display_name': 'fake',
'display_description': 'fake',
'container': 'fake',
'size': 1,
'service_metadata': 'fake',
'service': 'fake',
'object_count': 1,
'status': 'available',
'availability_zone': 'fake'}
_mock_list_services.return_value = ['fake']
req = webob.Request.blank('/v2/fake/backups/import_record')
req.body = ('<backup-record backup_service="%(backup_service)s" '
'backup_url="%(backup_url)s"/>') \
% {'backup_url': backup_url,
'backup_service': backup_service}
req.method = 'POST'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
self.assertEqual(res.status_int, 201)
dom = minidom.parseString(res.body)
backup = dom.getElementsByTagName('backup')
self.assertTrue(backup.item(0).hasAttribute('id'))
@mock.patch('cinder.backup.api.API._list_backup_services')
def test_import_record_with_no_backup_services(self,
_mock_list_services):
ctx = context.RequestContext('admin', 'fake', is_admin=True)
backup_service = 'fake'
backup_url = 'fake'
_mock_list_services.return_value = []
req = webob.Request.blank('/v2/fake/backups/import_record')
body = {'backup-record': {'backup_service': backup_service,
'backup_url': backup_url}}
req.body = json.dumps(body)
req.method = 'POST'
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 500)
self.assertEqual(res_dict['computeFault']['code'], 500)
self.assertEqual(res_dict['computeFault']['message'],
'Service %s could not be found.'
% backup_service)
@mock.patch('cinder.backup.api.API._list_backup_services')
@mock.patch('cinder.backup.rpcapi.BackupAPI.import_record')
def test_import_backup_with_missing_backup_services(self,
_mock_import_record,
_mock_list_services):
ctx = context.RequestContext('admin', 'fake', is_admin=True)
backup_service = 'fake'
backup_url = 'fake'
_mock_list_services.return_value = ['no-match1', 'no-match2']
_mock_import_record.side_effect = \
exception.ServiceNotFound(service_id='fake')
req = webob.Request.blank('/v2/fake/backups/import_record')
body = {'backup-record': {'backup_service': backup_service,
'backup_url': backup_url}}
req.body = json.dumps(body)
req.method = 'POST'
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 500)
self.assertEqual(res_dict['computeFault']['code'], 500)
self.assertEqual(res_dict['computeFault']['message'],
'Service %s could not be found.'
% backup_service)
def test_import_record_with_missing_body_elements(self):
ctx = context.RequestContext('admin', 'fake', is_admin=True)
backup_service = 'fake'
backup_url = 'fake'
# test with no backup_service
req = webob.Request.blank('/v2/fake/backups/import_record')
body = {'backup-record': {'backup_url': backup_url}}
req.body = json.dumps(body)
req.method = 'POST'
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Incorrect request body format.')
# test with no backup_url
req = webob.Request.blank('/v2/fake/backups/import_record')
body = {'backup-record': {'backup_service': backup_service}}
req.body = json.dumps(body)
req.method = 'POST'
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Incorrect request body format.')
# test with no backup_url and backup_url
req = webob.Request.blank('/v2/fake/backups/import_record')
body = {'backup-record': {}}
req.body = json.dumps(body)
req.method = 'POST'
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Incorrect request body format.')
def test_import_record_with_no_body(self):
ctx = context.RequestContext('admin', 'fake', is_admin=True)
req = webob.Request.blank('/v2/fake/backups/import_record')
req.body = json.dumps(None)
req.method = 'POST'
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
res_dict = json.loads(res.body)
# verify that request is successful
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Incorrect request body format.')
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2010-2014 Tobias Weber <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
import stat
import random
import string
import logging
from json import dumps, loads
from io import BytesIO
try:
from urllib.error import HTTPError # python 3
from urllib.parse import urlencode
except ImportError:
from urllib2 import HTTPError # python 2
from urllib import urlencode
try:
import Cookie # python 2
import cookielib as cookiejar
except ImportError:
import http.cookies as Cookie # python 3
from http import cookiejar
if sys.version_info[0] == 3:
STR = str
else:
STR = unicode
from levitas.middleware import Middleware
from tests import test
from .test import BaseTest
from .utf8_chars import UTF8_CHARS
log = logging.getLogger("levitas.tests.middlewareTest")
SETTINGS = \
"""
from tests.middlewareTest import (Test_none_result,
Test_empty_result,
Test_empty_result_list,
Test_response_string_py2,
Test_response_string_py3,
Test_response_bytes,
Test_charset,
Test_invalid_result,
Test_addHeader,
Test_response_error,
Test_response_redirect,
Test_redirected,
Test_response_file,
Test_get_browser_language,
Test_get_cookie,
Test_set_cookie,
Test_clear_cookie,
Test_set_signed_cookie,
Test_get_signed_cookie,
Test_get_args,
Test_post_args,
Test_post_fileupload)
from levitas.lib.levitasFieldStorage import LevitasFieldStorage
cookie_secret = "mysecret"
fieldstorage_class = LevitasFieldStorage
upload_path = "/tmp"
urls = [
(r"^/none_result", Test_none_result),
(r"^/empty_result", Test_empty_result),
(r"^/empty_result_list", Test_empty_result_list),
(r"^/response_string_py2", Test_response_string_py2),
(r"^/response_string_py3", Test_response_string_py3),
(r"^/response_bytes", Test_response_bytes),
(r"^/charset", Test_charset),
(r"^/invalid_result", Test_invalid_result),
(r"^/addHeader", Test_addHeader),
(r"^/responseError", Test_response_error),
(r"^/redirect", Test_response_redirect),
(r"^/redirected", Test_redirected),
(r"^/response_file", Test_response_file),
(r"^/get_browser_language", Test_get_browser_language),
(r"^/get_cookie", Test_get_cookie),
(r"^/set_cookie", Test_set_cookie),
(r"^/clear_cookie", Test_clear_cookie),
(r"^/set_signed_cookie", Test_set_signed_cookie),
(r"^/get_signed_cookie", Test_get_signed_cookie),
(r"^/get_args", Test_get_args),
(r"^/post_args", Test_post_args),
(r"^/post_fileupload", Test_post_fileupload),
]
"""
class Test_none_result(Middleware):
def get(self):
return None
class Test_empty_result(Middleware):
def get(self):
return ""
class Test_empty_result_list(Middleware):
def get(self):
return []
class Test_charset(Middleware):
def get(self):
return UTF8_CHARS
class Test_invalid_result(Middleware):
def get(self):
class NotIterObj(object):
pass
return NotIterObj()
class Test_response_string_py2(Middleware):
def get(self):
if sys.version_info[0] == 3:
return b"test string"
else:
return "test string"
class Test_response_string_py3(Middleware):
def get(self):
if sys.version_info[0] == 3:
return "test string"
else:
return u"test string"
class Test_response_bytes(Middleware):
def get(self):
return b"test string"
class Test_response_encoded_string(Middleware):
def get(self):
if sys.version_info[0] == 3:
return "test string".encode(self._encoding)
else:
return u"test string".encode(self._encoding)
class Test_addHeader(Middleware):
def get(self):
self.addHeader("Test-Header", "Test-Value")
return
class Test_response_error(Middleware):
def get(self):
return self.responseError(600, "Test-Error")
class Test_response_file(Middleware):
def get(self):
f = "tests/files/testfile.png"
s = os.stat(f)[stat.ST_SIZE]
self.addHeader("Content-type", "image/png")
self.addHeader("Content-Length", str(s))
return open(f, "rb")
class Test_response_redirect(Middleware):
def get(self):
return self.redirect("/redirected")
class Test_redirected(Middleware):
def get(self):
return u"redirected"
class Test_get_browser_language(Middleware):
def get(self):
return u",".join(self.get_browser_language())
class Test_get_cookie(Middleware):
def get(self):
return self.get_cookie("testcookie", "get cookie error")
class Test_set_cookie(Middleware):
def get(self):
self.set_cookie("testcookie", "testvalue", expires_days=14, httponly=True)
return
class Test_clear_cookie(Middleware):
def get(self):
self.clear_cookie("testcookie")
return
class Test_set_signed_cookie(Middleware):
def get(self):
self.set_signed_cookie("testcookie", "testvalue", expires_days=14, httponly=True)
return
class Test_get_signed_cookie(Middleware):
def get(self):
return self.get_signed_cookie("testcookie")
class Test_get_args(Middleware):
def get(self):
return dumps(self.request_data)
class Test_post_args(Middleware):
def post(self):
return dumps(self.request_data)
class Test_post_fileupload(Middleware):
def post(self):
result = "OK"
data = self.request_data
# Get all arguments and files from request_data
args = {}
files = {}
for k in data.keys():
d = data[k]
if d.filename is None:
args[k] = d.value
else:
files[d.name] = (d.filename, d.file)
#print(args)
#print(files)
if (not "arg1" in args) or (not "arg2" in args):
result = "Arguments are incorrect"
elif (not "file1" in files) or (not "file2" in files):
result = "Fileuploads are incorrect"
elif not os.path.exists("/tmp/dummy.txt"):
result = "Upload dummy.txt failed"
# Close all files
for k, v in files.items():
v[1].close()
# Remove uploaded file
try:
os.remove("/tmp/dummy.txt")
except:
pass
return result
class MiddlewareTest(BaseTest):
def test_handler_404(self):
"""Test resource not found - code 404"""
obj = self._request("some_path")
self.assertTrue(isinstance(obj, HTTPError) and \
obj.code == 404, type(obj))
def test_none_result(self):
"""Test None is returned"""
obj = self._request("none_result")
#info = obj.info()
#code = obj.code
data = obj.read()
self.assertTrue(data == b"", data)
def test_empty_result(self):
"""Test empty string is returned"""
obj = self._request("empty_result")
data = obj.read()
self.assertTrue(data == b"", data)
def test_empty_result_list(self):
"""Test empty list is returned"""
obj = self._request("empty_result_list")
data = obj.read()
self.assertTrue(data == b"", data)
def test_response_string_py2(self):
"""Test python 2 str type is returned"""
obj = self._request("response_string_py2")
self.assertTrue(obj.code == 200, type(obj))
def test_response_string_py3(self):
"""Test python 3 str is returned"""
obj = self._request("response_string_py3")
self.assertTrue(obj.code == 200, type(obj))
def test_response_bytes(self):
"""Test bytes type is returned"""
obj = self._request("response_bytes")
self.assertTrue(obj.code == 200, type(obj))
def test_charset(self):
"""Test utf8 charset"""
obj = self._request("charset")
data = obj.read()
s = data.decode("utf-8")
self.assertTrue(s == UTF8_CHARS, s)
def test_invalid_result(self):
"""Test internal server error is catched"""
obj = self._request("invalid_result")
self.assertTrue(isinstance(obj, HTTPError) and \
obj.code == 500, type(obj))
def test_addHeader(self):
"""Test headers are responsed"""
obj = self._request("addHeader")
headers = obj.headers
self.assertTrue(headers["Test-Header"] == "Test-Value", str(obj))
def test_response_error(self):
"""Test response error"""
obj = self._request("responseError")
self.assertTrue(obj.code == 600, str(obj))
def test_response_file(self):
"""Test response binary file"""
obj = self._request("response_file")
headers = obj.headers
f = "tests/files/testfile.png"
s = os.stat(f)[stat.ST_SIZE]
self.assertTrue(headers["Content-type"] == "image/png", str(obj))
self.assertTrue(headers["Content-Length"] == str(s), str(obj))
def test_response_redirect(self):
"""Test redirect"""
obj = self._request("redirect")
self.assertTrue(obj.read() == b"redirected", str(obj))
def test_get_browser_language(self):
"""Test browser language is available"""
lang_header = "de-de,de;q=0.8,en-us;q=0.5,en;q=0.3"
self.headers["ACCEPT_LANGUAGE"] = lang_header
obj = self._request("get_browser_language")
self.assertTrue(obj.read() == b"de-de,de,en-us,en", str(obj))
def test_get_cookie(self):
"""Test get a cookie"""
self.headers["Cookie"] = "testcookie=testvalue"
obj = self._request("get_cookie")
data = obj.read()
self.assertTrue(data == b"testvalue", data)
def test_set_cookie(self):
"""Test set a cookie"""
obj = self._request("set_cookie")
info = obj.info()
cookies = Cookie.BaseCookie()
cookies.load(info["Set-Cookie"])
self.assertTrue("testcookie" in cookies,
"'testcookie' in cookies")
cookie = cookies["testcookie"]
self.assertTrue(cookie["path"] == "/", cookie["path"])
t = cookiejar.http2time(cookie["expires"])
self.assertTrue(t >= time.time(),
"expires is smaller then current time")
self.assertTrue("httponly" in cookie,
"'httponly' in cookie")
def test_clear_cookie(self):
"""Test delete a cookie"""
self.headers["Cookie"] = "testcookie=testvalue"
obj = self._request("clear_cookie")
info = obj.info()
cookies = Cookie.BaseCookie()
cookies.load(info["Set-Cookie"])
self.assertTrue("testcookie" in cookies,
"'testcookie' in cookies")
cookie = cookies["testcookie"]
t = cookiejar.http2time(cookie["expires"])
self.assertTrue(t < time.time(),
"expires time must be smaller then current time")
def test_signed_cookie(self):
"""Test secure cookie"""
obj = self._request("set_signed_cookie")
info = obj.info()
cookies = Cookie.BaseCookie()
cookies.load(info["Set-Cookie"])
self.assertTrue("testcookie" in cookies,
"'testcookie' in cookies")
cookie = cookies["testcookie"]
self.headers["Cookie"] = cookie.OutputString()
obj = self._request("get_signed_cookie")
v = obj.read()
self.assertEqual(v, b"testvalue", "get signed cookie must return 'testvalue'")
def test_get_args(self):
"""Test get request arguments"""
params = {"arg1": "test1", "arg2": "test2"}
params = urlencode(params, doseq=True)
path = "get_args?%s" % params
obj = self._request(path)
data = obj.read()
if not isinstance(data, STR):
data = data.decode()
data = loads(data)
self.assertEqual(params, urlencode(data, doseq=True))
def test_post_args(self):
"""Test post request arguments"""
params = {"arg1": "test1", "arg2": "test2"}
params = urlencode(params, doseq=True)
obj = self._request("post_args", data=params.encode("utf-8"))
data = obj.read()
if not isinstance(data, STR):
data = data.decode()
data = loads(data)
self.assertEqual(params, urlencode(data, doseq=True))
def test_post_fileupload(self):
"""Test file upload"""
def escape_quote(s):
return s.replace('"', '\\"')
def create_file():
f = BytesIO()
for i in range(1024): # @UnusedVariable
f.write(b" " * (1024 * 10))
f.seek(0)
content = f.read()
return content
lines = []
# multipart/form-data Fields
_boundary_chars = string.digits + string.ascii_letters
boundary = ''.join(random.choice(_boundary_chars)
for i in range(30)) # @UnusedVariable
fields = {"arg1": "test1", "arg2": "test2"}
for name, value in fields.items():
lines.extend((
'--{0}'.format(boundary),
'Content-Disposition: form-data; name="{0}"'.format(escape_quote(name)),
'',
str(value),
))
def add_file(name, filename, content, mimetype):
# multipart/form-data File
f.close()
lines.extend((
'--{0}'.format(boundary),
'Content-Disposition: form-data; name="{0}"; filename="{1}"'.format(
escape_quote(name), escape_quote(filename)),
'Content-Type: {0}'.format(mimetype),
'',
content,
))
f = open("tests/files/testfile.png", "rb")
content = f.read()
add_file("file1", "testfile.png", content, "image/png")
content = create_file()
add_file("file2", "dummy.txt", content, "text/plain")
lines.extend((
'--{0}--'.format(boundary),
'',
))
if sys.version_info[0] == 3:
_lines = []
for l in lines:
if isinstance(l, str):
l = l.encode("utf-8")
_lines.append(l)
body = b'\r\n'.join(_lines)
else:
body = '\r\n'.join(lines)
self.headers = {
'Content-Type': 'multipart/form-data; boundary={0}'.format(boundary),
'Content-Length': str(len(body)),
}
# Send multipart/form-data
obj = self._request("post_fileupload", data=body)
try:
data = obj.read()
except:
data = type(obj)
self.assertEqual(data, b"OK", data)
def run():
return test.run(SETTINGS, MiddlewareTest)
if __name__ == "__main__":
run()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import textwrap
import unittest
from mock import Mock, create_autospec, patch
from apache.aurora.client.cli.context import AuroraCommandContext
from apache.aurora.client.hooks.hooked_api import HookedAuroraClientAPI
from apache.aurora.common.aurora_job_key import AuroraJobKey
from apache.aurora.common.cluster import Cluster
from apache.aurora.common.clusters import CLUSTERS, Clusters
from ...api_util import SchedulerProxyApiSpec, SchedulerThriftApiSpec
from gen.apache.aurora.api.constants import ACTIVE_STATES, CURRENT_API_VERSION
from gen.apache.aurora.api.ttypes import (
AssignedTask,
ExecutorConfig,
Identity,
JobKey,
Response,
ResponseCode,
ResponseDetail,
Result,
ScheduledTask,
ScheduleStatus,
ScheduleStatusResult,
ServerInfo,
TaskConfig,
TaskEvent,
TaskQuery
)
def mock_verb_options(verb):
# Handle default values opt.kwargs.get('default')
def opt_name(opt):
return opt.name.lstrip('--').replace('-', '_')
def name_or_dest(opt):
"""Prefers 'dest' if available otherwise defaults to name."""
return opt.kwargs.get('dest') if 'dest' in opt.kwargs else opt_name(opt)
options = Mock(
spec_set=[name_or_dest(opt) for opt in verb.get_options()]
)
# Apply default values to options.
for opt in verb.get_options():
if 'default' in opt.kwargs:
setattr(
options,
name_or_dest(opt),
opt.kwargs.get('default'))
return options
class FakeAuroraCommandContext(AuroraCommandContext):
def __init__(self):
super(FakeAuroraCommandContext, self).__init__()
self.status = []
self.fake_api = self.create_mock_api()
self.task_result = []
self.out = []
self.err = []
self.config = None
def get_api(self, cluster):
return self.fake_api
@classmethod
def create_mock_api(cls):
"""Builds up a mock API object, with a mock SchedulerProxy.
Returns the API and the proxy"""
mock_scheduler_proxy = create_autospec(spec=SchedulerProxyApiSpec, instance=True)
mock_scheduler_proxy.url = "http://something_or_other"
mock_scheduler_proxy.scheduler_client.return_value = mock_scheduler_proxy
mock_api = create_autospec(spec=HookedAuroraClientAPI)
mock_api.scheduler_proxy = mock_scheduler_proxy
return mock_api
def print_out(self, msg, indent=0):
indent_str = " " * indent
self.out.append("%s%s" % (indent_str, msg))
def print_err(self, msg, indent=0):
indent_str = " " * indent
self.err.append("%s%s" % (indent_str, msg))
def get_job_config(self, jobkey, config_file):
if not self.config:
return super(FakeAuroraCommandContext, self).get_job_config(jobkey, config_file)
else:
return self.config
def get_out(self):
return self.out
def get_out_str(self):
return '\n'.join(self.out)
def get_err(self):
return self.err
def add_expected_status_query_result(self, expected_result):
self.add_task_result(expected_result)
self.fake_api.check_status.side_effect = self.task_result
def add_expected_query_result(self, expected_result, job_key=None):
self.add_task_result(expected_result)
self.fake_api.query_no_configs.side_effect = self.task_result
if job_key:
self.fake_api.build_query.return_value = TaskQuery(
jobKeys=[job_key.to_thrift()], statuses=ACTIVE_STATES)
def add_task_result(self, expected_result):
self.task_result.append(expected_result)
# each call adds an expected query result, in order.
self.fake_api.scheduler_proxy.getTasksWithoutConfigs.side_effect = self.task_result
def add_config(self, config):
self.config = config
class AuroraClientCommandTest(unittest.TestCase):
FAKE_TIME = 42131
def setUp(self):
patcher = patch('webbrowser.open_new_tab')
self.mock_webbrowser = patcher.start()
self.addCleanup(patcher.stop)
def run(self, result=None):
# Since CLUSTERS is a global value that evaluates code on import this is the best way to
# ensure it does not pollute any tests.
with CLUSTERS.patch(self.TEST_CLUSTERS._clusters.values()):
super(AuroraClientCommandTest, self).run(result)
@classmethod
def create_blank_response(cls, code, msg):
return Response(
responseCode=code,
details=[ResponseDetail(message=msg)],
serverInfo=ServerInfo(thriftAPIVersion=CURRENT_API_VERSION.major))
@classmethod
def create_simple_success_response(cls):
return cls.create_blank_response(ResponseCode.OK, 'OK')
@classmethod
def create_error_response(cls):
return cls.create_blank_response(ResponseCode.ERROR, 'Whoops')
@classmethod
def create_mock_api(cls):
"""Builds up a mock API object, with a mock SchedulerProxy"""
mock_scheduler = create_autospec(spec=SchedulerThriftApiSpec, instance=True)
mock_scheduler.url = "http://something_or_other"
mock_scheduler_client = create_autospec(spec=SchedulerProxyApiSpec, instance=True)
mock_scheduler_client.url = "http://something_or_other"
mock_api = create_autospec(spec=HookedAuroraClientAPI, instance=True)
mock_api.scheduler_proxy = mock_scheduler_client
return mock_api, mock_scheduler_client
@classmethod
def create_mock_api_factory(cls):
"""Create a collection of mocks for a test that wants to mock out the client API
by patching the api factory."""
mock_api, mock_scheduler_client = cls.create_mock_api()
mock_api_factory = lambda: mock_api
return mock_api_factory, mock_scheduler_client
@classmethod
def create_query_call_result(cls, task=None):
status_response = cls.create_empty_task_result()
if task is None:
for i in range(20):
status_response.result.scheduleStatusResult.tasks.append(cls.create_scheduled_task(i))
else:
status_response.result.scheduleStatusResult.tasks.append(task)
return status_response
@classmethod
def create_empty_task_result(cls):
status_response = cls.create_simple_success_response()
status_response.result = Result(scheduleStatusResult=ScheduleStatusResult(tasks=[]))
return status_response
@classmethod
def create_scheduled_task(cls, instance_id, status=ScheduleStatus.RUNNING,
task_id=None, initial_time=None):
task = ScheduledTask(
status=status,
assignedTask=AssignedTask(
instanceId=instance_id,
taskId=task_id or "Task%s" % instance_id,
slaveId="Slave%s" % instance_id,
slaveHost="Slave%s" % instance_id,
task=TaskConfig()),
taskEvents=[TaskEvent(timestamp=initial_time or 1000)])
return task
@classmethod
def create_task_config(cls, name):
return TaskConfig(
maxTaskFailures=1,
executorConfig=ExecutorConfig(data='fake data'),
metadata=[],
job=JobKey(role=cls.TEST_ROLE, environment=cls.TEST_ENV, name=name),
owner=Identity(role=cls.TEST_ROLE),
environment=cls.TEST_ENV,
jobName=name,
numCpus=2,
ramMb=2,
diskMb=2)
@classmethod
def create_scheduled_tasks(cls):
tasks = []
for name in ['foo', 'bar', 'baz']:
task = ScheduledTask(
failureCount=0,
assignedTask=AssignedTask(
taskId=1287391823,
slaveHost='slavehost',
task=cls.create_task_config(name),
instanceId=4237894,
assignedPorts={}),
status=ScheduleStatus.RUNNING,
taskEvents=[TaskEvent(
timestamp=28234726395,
status=ScheduleStatus.RUNNING,
message="Hi there")])
tasks.append(task)
return tasks
@classmethod
def setup_get_tasks_status_calls(cls, scheduler):
status_response = cls.create_query_call_result()
scheduler.getTasksWithoutConfigs.return_value = status_response
@classmethod
def fake_time(cls, ignored):
"""Utility function used for faking time to speed up tests."""
cls.FAKE_TIME += 2
return cls.FAKE_TIME
CONFIG_BASE = """
HELLO_WORLD = Job(
name = '%(job)s',
role = '%(role)s',
cluster = '%(cluster)s',
environment = '%(env)s',
instances = 20,
%(inner)s
update_config = UpdateConfig(
batch_size = 1,
restart_threshold = 60,
watch_secs = 45,
max_per_shard_failures = 2,
),
task = Task(
name = 'test',
processes = [Process(name = 'hello_world', cmdline = 'echo {{thermos.ports[http]}}')],
resources = Resources(cpu = 0.1, ram = 64 * MB, disk = 64 * MB),
)
)
jobs = [HELLO_WORLD]
"""
CRON_CONFIG_BASE = """
HELLO_WORLD = Job(
name = '%(job)s',
role = '%(role)s',
cluster = '%(cluster)s',
environment = '%(env)s',
cron_schedule = '*/5 * * * *',
%(inner)s
task = SimpleTask('test', 'echo test')
)
jobs = [HELLO_WORLD]
"""
UNBOUND_CONFIG = textwrap.dedent("""\
HELLO_WORLD = Job(
name = '%(job)s',
role = '%(role)s',
cluster = '{{cluster_binding}}',
environment = '%(env)s',
instances = '{{instances_binding}}',
update_config = UpdateConfig(
batch_size = "{{TEST_BATCH}}",
restart_threshold = 60,
watch_secs = 45,
max_per_shard_failures = 2,
),
task = Task(
name = 'test',
processes = [Process(
name = 'hello_world',
cmdline = 'echo {{thermos.ports[http]}} {{flags_binding}}'
)],
resources = Resources(cpu = 0.1, ram = 64 * MB, disk = 64 * MB),
)
)
jobs = [HELLO_WORLD]
""")
TEST_ROLE = 'bozo'
TEST_ENV = 'test'
TEST_JOB = 'hello'
TEST_CLUSTER = 'west'
TEST_JOBSPEC = 'west/bozo/test/hello'
TEST_JOBKEY = AuroraJobKey('west', 'bozo', 'test', 'hello')
TEST_CLUSTERS = Clusters([Cluster(
name=TEST_CLUSTER,
zk='zookeeper.example.com',
scheduler_zk_path='/foo/bar',
auth_mechanism='UNAUTHENTICATED')])
@classmethod
def get_instance_spec(cls, instances_spec):
"""Create a job instance spec string"""
return '%s/%s' % (cls.TEST_JOBSPEC, instances_spec)
@classmethod
def get_test_config(cls, base, cluster, role, env, job, inner=''):
"""Create a config from the template"""
return base % {'job': job, 'role': role, 'env': env, 'cluster': cluster, 'inner': inner}
@classmethod
def get_unbound_test_config(cls, role=None, env=None, job=None):
result = cls.UNBOUND_CONFIG % {'job': job or cls.TEST_JOB, 'role': role or cls.TEST_ROLE,
'env': env or cls.TEST_ENV}
return result
@classmethod
def get_valid_config(cls):
return cls.get_test_config(
cls.CONFIG_BASE,
cls.TEST_CLUSTER,
cls.TEST_ROLE,
cls.TEST_ENV,
cls.TEST_JOB)
@classmethod
def get_valid_cron_config(cls):
return cls.get_test_config(
cls.CRON_CONFIG_BASE,
cls.TEST_CLUSTER,
cls.TEST_ROLE,
cls.TEST_ENV,
cls.TEST_JOB)
@classmethod
def get_invalid_config(cls, bad_clause):
return cls.get_test_config(
cls.CONFIG_BASE,
cls.TEST_CLUSTER,
cls.TEST_ROLE,
cls.TEST_ENV,
cls.TEST_JOB,
bad_clause)
@classmethod
def get_invalid_cron_config(cls, bad_clause):
return cls.get_test_config(
cls.CRON_CONFIG_BASE,
cls.TEST_CLUSTER,
cls.TEST_ROLE,
cls.TEST_ENV,
cls.TEST_JOB,
bad_clause)
@classmethod
def assert_lock_message(cls, context):
assert [line for line in context.get_err() if line == "\t%s" % context.LOCK_ERROR_MSG]
class IOMock(object):
def __init__(self):
self.out = []
def put(self, s):
self.out.append(s)
def get(self):
return self.out
|
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import uuid
from datetime import datetime
from typing import Dict, List, Optional, TYPE_CHECKING
from msrest import Serializer
from azure.core.async_paging import AsyncItemPaged
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.tracing.decorator import distributed_trace
from azure.core import MatchConditions
from .._version import SDK_MONIKER
from .._utils import (
prep_if_match,
prep_if_none_match
)
from .._generated.aio import AzureDigitalTwinsAPI
from .._generated.models import (
QuerySpecification,
DigitalTwinsAddOptions,
DigitalTwinsDeleteOptions,
DigitalTwinsUpdateOptions,
DigitalTwinsUpdateComponentOptions,
DigitalTwinsDeleteRelationshipOptions,
DigitalTwinsUpdateRelationshipOptions,
DigitalTwinsAddRelationshipOptions,
DigitalTwinsModelData
)
if TYPE_CHECKING:
from azure.core.credentials_async import AsyncTokenCredential
from .._generated.models import (
IncomingRelationship,
DigitalTwinsEventRoute
)
class DigitalTwinsClient(object): # pylint: disable=too-many-public-methods
"""Creates an instance of the Digital Twins client.
:param str endpoint: The URL endpoint of an Azure search service
:param ~azure.core.credentials_async.AsyncTokenCredential credential:
A credential to authenticate requests to the service.
"""
def __init__(self, endpoint: str, credential: "AsyncTokenCredential", **kwargs) -> None:
if not endpoint.startswith('http'):
endpoint = 'https://' + endpoint
self._client = AzureDigitalTwinsAPI(
credential=credential,
base_url=endpoint,
sdk_moniker=SDK_MONIKER,
**kwargs
)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "DigitalTwinsClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
@distributed_trace_async
async def get_digital_twin(self, digital_twin_id: str, **kwargs) -> Dict[str, object]:
"""Get a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:return: Dictionary containing the twin.
:rtype: Dict[str, object]
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError:
If the digital twin doesn't exist.
"""
return await self._client.digital_twins.get_by_id(
digital_twin_id,
**kwargs
)
@distributed_trace_async
async def upsert_digital_twin(
self,
digital_twin_id: str,
digital_twin: Dict[str, object],
**kwargs
) -> Dict[str, object]:
"""Create or update a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:param Dict[str,object] digital_twin:
Dictionary containing the twin to create or update.
:keyword ~azure.core.MatchConditions match_condition:
The condition under which to perform the operation.
:keyword str etag:
Only perform the operation if the entity's etag matches the value provided
according to the `match_condition`.
:return: Dictionary containing the created or updated twin.
:rtype: Dict[str, object]
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceExistsError:
If the digital twin already exists.
"""
options = None
etag = kwargs.pop("etag", None)
match_condition = kwargs.pop("match_condition", MatchConditions.Unconditionally)
if_none_match, error_map = prep_if_none_match(etag, match_condition)
if if_none_match:
options = DigitalTwinsAddOptions(if_none_match=if_none_match)
return await self._client.digital_twins.add(
digital_twin_id,
digital_twin,
digital_twins_add_options=options,
error_map=error_map,
**kwargs
)
@distributed_trace_async
async def update_digital_twin(
self,
digital_twin_id: str,
json_patch: List[Dict[str, object]],
**kwargs
) -> None:
"""Update a digital twin using a JSON patch.
:param str digital_twin_id: The ID of the digital twin.
:param List[Dict[str,object]] json_patch: An update specification described by JSON Patch.
Updates to property values and $model elements may happen in the same request.
Operations are limited to add, replace and remove.
:keyword ~azure.core.MatchConditions match_condition:
The condition under which to perform the operation.
:keyword str etag:
Only perform the operation if the entity's etag matches the value provided
according to the `match_condition`.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError:
If there is no digital twin with the provided ID.
"""
options = None
etag = kwargs.pop("etag", None)
match_condition = kwargs.pop("match_condition", MatchConditions.Unconditionally)
if_match, error_map = prep_if_match(etag, match_condition)
if if_match:
options = DigitalTwinsUpdateOptions(if_match=if_match)
return await self._client.digital_twins.update(
digital_twin_id,
json_patch,
digital_twins_update_options=options,
error_map=error_map,
**kwargs
)
@distributed_trace_async
async def delete_digital_twin(
self,
digital_twin_id: str,
**kwargs
) -> None:
"""Delete a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:keyword ~azure.core.MatchConditions match_condition:
The condition under which to perform the operation.
:keyword str etag:
Only perform the operation if the entity's etag matches the value provided
according to the `match_condition`.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError:
If there is no digital twin with the provided ID.
"""
options = None
etag = kwargs.pop("etag", None)
match_condition = kwargs.pop("match_condition", MatchConditions.Unconditionally)
if_match, error_map = prep_if_match(etag, match_condition)
if if_match:
options = DigitalTwinsDeleteOptions(if_match=if_match)
return await self._client.digital_twins.delete(
digital_twin_id,
digital_twins_delete_options=options,
error_map=error_map,
**kwargs
)
@distributed_trace_async
async def get_component(self, digital_twin_id: str, component_name: str, **kwargs) -> Dict[str, object]:
"""Get a component on a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:param str component_name: The component being retrieved.
:return: Dictionary containing the component.
:rtype: Dict[str, object]
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is either no
digital twin with the provided ID or the component name is invalid.
"""
return await self._client.digital_twins.get_component(
digital_twin_id,
component_name,
**kwargs
)
@distributed_trace_async
async def update_component(
self,
digital_twin_id: str,
component_name: str,
json_patch: List[Dict[str, object]],
**kwargs
) -> None:
"""Update properties of a component on a digital twin using a JSON patch.
:param str digital_twin_id: The ID of the digital twin.
:param str component_name: The component being updated.
:param List[Dict[str,object]] json_patch: An update specification described by JSON Patch.
:keyword ~azure.core.MatchConditions match_condition:
The condition under which to perform the operation.
:keyword str etag:
Only perform the operation if the entity's etag matches the value provided
according to the `match_condition`.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is either no
digital twin with the provided ID or the component name is invalid.
"""
options = None
etag = kwargs.pop("etag", None)
match_condition = kwargs.pop("match_condition", MatchConditions.Unconditionally)
if_match, error_map = prep_if_match(etag, match_condition)
if if_match:
options = DigitalTwinsUpdateComponentOptions(if_match=if_match)
return await self._client.digital_twins.update_component(
digital_twin_id,
component_name,
patch_document=json_patch,
digital_twins_update_component_options=options,
error_map=error_map,
**kwargs
)
@distributed_trace_async
async def get_relationship(
self,
digital_twin_id: str,
relationship_id: str,
**kwargs
) -> Dict[str, object]:
"""Get a relationship on a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:param str relationship_id: The ID of the relationship to retrieve.
:return: Dictionary containing the relationship.
:rtype: Dict[str, object]
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is either no
digital twin or relationship with the provided ID.
"""
return await self._client.digital_twins.get_relationship_by_id(
digital_twin_id,
relationship_id,
**kwargs
)
@distributed_trace_async
async def upsert_relationship(
self,
digital_twin_id: str,
relationship_id: str,
relationship: Dict[str, object],
**kwargs
) -> Dict[str, object]:
"""Create or update a relationship on a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:param str relationship_id: The ID of the relationship to retrieve.
:param Dict[str,object] relationship: Dictionary containing the relationship.
:keyword ~azure.core.MatchConditions match_condition:
The condition under which to perform the operation.
:keyword str etag:
Only perform the operation if the entity's etag matches the value provided
according to the `match_condition`.
:return: The created or updated relationship.
:rtype: Dict[str, object]
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is either no
digital twin, target digital twin or relationship with the provided ID.
"""
options = None
etag = kwargs.pop("etag", None)
match_condition = kwargs.pop("match_condition", MatchConditions.Unconditionally)
if_none_match, error_map = prep_if_none_match(etag, match_condition)
if if_none_match:
options = DigitalTwinsAddRelationshipOptions(if_none_match=if_none_match)
return await self._client.digital_twins.add_relationship(
id=digital_twin_id,
relationship_id=relationship_id,
relationship=relationship,
digital_twins_add_relationship_options=options,
error_map=error_map,
**kwargs
)
@distributed_trace_async
async def update_relationship(
self,
digital_twin_id: str,
relationship_id: str,
json_patch: List[Dict[str, object]],
**kwargs
) -> None:
"""Updates the properties of a relationship on a digital twin using a JSON patch.
:param str digital_twin_id: The ID of the digital twin.
:param str relationship_id: The ID of the relationship to retrieve.
:param List[Dict[str,object]] json_patch: JSON Patch description of the update
to the relationship properties.
:keyword ~azure.core.MatchConditions match_condition:
The condition under which to perform the operation.
:keyword str etag:
Only perform the operation if the entity's etag matches the value provided
according to the `match_condition`.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is either no
digital twin or relationship with the provided ID.
"""
options = None
etag = kwargs.pop("etag", None)
match_condition = kwargs.pop("match_condition", MatchConditions.Unconditionally)
if_match, error_map = prep_if_match(etag, match_condition)
if if_match:
options = DigitalTwinsUpdateRelationshipOptions(if_match=if_match)
return await self._client.digital_twins.update_relationship(
id=digital_twin_id,
relationship_id=relationship_id,
patch_document=json_patch,
digital_twins_update_relationship_options=options,
error_map=error_map,
**kwargs
)
@distributed_trace_async
async def delete_relationship(
self,
digital_twin_id: str,
relationship_id: str,
**kwargs
) -> None:
"""Delete a relationship on a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:param str relationship_id: The ID of the relationship to delete.
:keyword ~azure.core.MatchConditions match_condition:
The condition under which to perform the operation.
:keyword str etag:
Only perform the operation if the entity's etag matches the value provided
according to the `match_condition`.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is either no
digital twin or relationship with the provided ID.
"""
options = None
etag = kwargs.pop("etag", None)
match_condition = kwargs.pop("match_condition", MatchConditions.Unconditionally)
if_match, error_map = prep_if_match(etag, match_condition)
if if_match:
options = DigitalTwinsDeleteRelationshipOptions(if_match=if_match)
return await self._client.digital_twins.delete_relationship(
digital_twin_id,
relationship_id,
digital_twins_delete_relationship_options=options,
error_map=error_map,
**kwargs
)
@distributed_trace
def list_relationships(
self,
digital_twin_id: str,
relationship_id: Optional[str] = None,
**kwargs
) -> AsyncItemPaged[Dict[str, object]]:
"""Retrieve relationships for a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:param str relationship_id: The ID of the relationship to
get (if None all the relationship will be retrieved).
:return: An iterator instance of list of relationships.
:rtype: ~azure.core.async_paging.AsyncItemPaged[Dict[str,object]]
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is no
digital twin with the provided ID.
"""
return self._client.digital_twins.list_relationships(
digital_twin_id,
relationship_name=relationship_id,
**kwargs
)
@distributed_trace
def list_incoming_relationships(
self,
digital_twin_id: str,
**kwargs
) -> AsyncItemPaged['IncomingRelationship']:
"""Retrieve all incoming relationships for a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:return: An iterator instance of list of incoming relationships.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.digitaltwins.core.IncomingRelationship]
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is no
digital twin with the provided ID.
"""
return self._client.digital_twins.list_incoming_relationships(
digital_twin_id,
**kwargs
)
@distributed_trace_async
async def publish_telemetry(
self,
digital_twin_id: str,
telemetry: object,
**kwargs
) -> None:
"""Publish telemetry from a digital twin, which is then consumed by
one or many destination endpoints (subscribers) defined under.
:param str digital_twin_id: The ID of the digital twin
:param object telemetry: The telemetry data to be sent
:keyword str message_id: The message ID. If not specified, a UUID will be generated.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is no
digital twin with the provided ID.
"""
message_id = kwargs.pop('message_id', None) or str(uuid.uuid4())
timestamp = Serializer.serialize_iso(datetime.utcnow())
return await self._client.digital_twins.send_telemetry(
digital_twin_id,
message_id=message_id,
telemetry=telemetry,
telemetry_source_time=timestamp,
**kwargs
)
@distributed_trace_async
async def publish_component_telemetry(
self,
digital_twin_id: str,
component_name: str,
telemetry: object,
**kwargs
) -> None:
"""Publish telemetry from a digital twin's component, which is then consumed
by one or many destination endpoints (subscribers) defined under.
:param str digital_twin_id: The ID of the digital twin.
:param str component_name: The name of the DTDL component.
:param object telemetry: The telemetry data to be sent.
:keyword str message_id: The message ID. If not specified, a UUID will be generated.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is no
digital twin with the provided ID or the component name is invalid.
"""
message_id = kwargs.pop('message_id', None) or str(uuid.uuid4())
timestamp = Serializer.serialize_iso(datetime.utcnow())
return await self._client.digital_twins.send_component_telemetry(
digital_twin_id,
component_name,
message_id=message_id,
telemetry=telemetry,
telemetry_source_time=timestamp,
**kwargs
)
@distributed_trace_async
async def get_model(self, model_id: str, **kwargs) -> DigitalTwinsModelData:
"""Get a model, including the model metadata and the model definition.
:param str model_id: The ID of the model.
:keyword bool include_model_definition: Include the model definition
as part of the result. The default value is False.
:return: The model data.
:rtype: ~azure.digitaltwins.core.DigitalTwinsModelData
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is no
model with the provided ID.
"""
include_model_definition = kwargs.pop("include_model_definition", False)
return await self._client.digital_twin_models.get_by_id(
model_id,
include_model_definition=include_model_definition,
**kwargs
)
@distributed_trace
def list_models(
self,
dependencies_for: Optional[List[str]] = None,
**kwargs
) -> AsyncItemPaged[DigitalTwinsModelData]:
"""Get the list of models.
:param List[str] dependencies_for: The model IDs to have dependencies retrieved.
If omitted, all models are retrieved.
:keyword bool include_model_definition: Include the model definition
as part of the results. The default value is False.
:keyword int results_per_page: The maximum number of items to retrieve per request.
The server may choose to return less than the requested max.
:return: An iterator instance of list of model data.
:rtype: ~azure.core.paging.AsyncItemPaged[~azure.digitaltwins.core.DigitalTwinsModelData]
:raises ~azure.core.exceptions.HttpResponseError:
"""
include_model_definition = kwargs.pop('include_model_definition', False)
results_per_page = kwargs.pop('results_per_page', None)
digital_twin_models_list_options = None
if results_per_page is not None:
digital_twin_models_list_options = {'max_item_count': results_per_page}
return self._client.digital_twin_models.list(
dependencies_for=dependencies_for,
include_model_definition=include_model_definition,
digital_twin_models_list_options=digital_twin_models_list_options,
**kwargs
)
@distributed_trace_async
async def create_models(self, dtdl_models: List[object], **kwargs) -> List[DigitalTwinsModelData]:
"""Create one or more models. When any error occurs, no models are uploaded.
:param List[object] model_list: The set of models to create.
Each dict corresponds to exactly one model.
:return: The list of created models
:rtype: List[~azure.digitaltwins.core.DigitalTwinsModelData]
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceExistsError: One or more of
the provided models already exist.
"""
return await self._client.digital_twin_models.add(
dtdl_models,
**kwargs
)
@distributed_trace_async
async def decommission_model(self, model_id: str, **kwargs) -> None:
"""Decommissions a model.
:param str model_id: The ID for the model. The ID is globally unique and case sensitive.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: There is no model
with the provided ID.
"""
json_patch = [{'op': 'replace', 'path': '/decommissioned', 'value': True}]
return await self._client.digital_twin_models.update(
model_id,
json_patch,
**kwargs
)
@distributed_trace_async
async def delete_model(self, model_id: str, **kwargs) -> None:
"""Delete a model.
:param str model_id: The ID of the model to delete.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: There is no model
with the provided ID.
:raises ~azure.core.exceptions.ResourceExistsError: There are dependencies
on the model that prevent it from being deleted.
"""
return await self._client.digital_twin_models.delete(
model_id,
**kwargs
)
@distributed_trace_async
async def get_event_route(self, event_route_id: str, **kwargs) -> 'DigitalTwinsEventRoute':
"""Get an event route.
:param str event_route_id: The ID of the event route.
:return: The event route object.
:rtype: ~azure.digitaltwins.core.DigitalTwinsEventRoute
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: There is no
event route with the provided ID.
"""
return await self._client.event_routes.get_by_id(
event_route_id,
**kwargs
)
@distributed_trace
def list_event_routes(self, **kwargs) -> AsyncItemPaged['DigitalTwinsEventRoute']:
"""Retrieves all event routes.
:keyword int results_per_page: The maximum number of items to retrieve per request.
The server may choose to return less than the requested max.
:return: An iterator instance of event routes.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.digitaltwins.core.DigitalTwinsEventRoute]
:raises ~azure.core.exceptions.HttpResponseError:
"""
event_routes_list_options = None
results_per_page = kwargs.pop('results_per_page', None)
if results_per_page is not None:
event_routes_list_options = {'max_item_count': results_per_page}
return self._client.event_routes.list(
event_routes_list_options=event_routes_list_options,
**kwargs
)
@distributed_trace_async
async def upsert_event_route(
self,
event_route_id: str,
event_route: 'DigitalTwinsEventRoute',
**kwargs
) -> None:
"""Create or update an event route.
:param str event_route_id: The ID of the event route to create or update.
:param ~azure.digitaltwins.core.DigitalTwinsEventRoute event_route: The event route data.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
return await self._client.event_routes.add(
event_route_id,
event_route=event_route,
**kwargs
)
@distributed_trace_async
async def delete_event_route(self, event_route_id: str, **kwargs) -> None:
"""Delete an event route.
:param str event_route_id: The ID of the event route to delete.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: There is no
event route with the provided ID.
"""
return await self._client.event_routes.delete(
event_route_id,
**kwargs
)
@distributed_trace
def query_twins(self, query_expression: str, **kwargs) -> AsyncItemPaged[Dict[str, object]]:
"""Query for digital twins.
Note: that there may be a delay between before changes in your instance are reflected in queries.
For more details on query limitations, see
https://docs.microsoft.com/azure/digital-twins/how-to-query-graph#query-limitations
:param str query_expression: The query expression to execute.
:return: An iterable of query results.
:rtype: ~azure.core.async_paging.AsyncItemPaged[Dict[str, object]]
:raises ~azure.core.exceptions.HttpResponseError:
"""
async def extract_data(deserialized):
list_of_elem = deserialized.value
return deserialized.continuation_token or None, iter(list_of_elem)
async def get_next(continuation_token=None):
query_spec = QuerySpecification(
query=query_expression,
continuation_token=continuation_token)
return await self._client.query.query_twins(query_spec, **kwargs)
return AsyncItemPaged(
get_next,
extract_data
)
|
|
"""Support to interface with Sonos players."""
from __future__ import annotations
import datetime
import logging
from typing import Any
import urllib.parse
from soco import alarms
from soco.core import (
MUSIC_SRC_LINE_IN,
MUSIC_SRC_RADIO,
PLAY_MODE_BY_MEANING,
PLAY_MODES,
)
import voluptuous as vol
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
ATTR_MEDIA_ENQUEUE,
MEDIA_TYPE_ALBUM,
MEDIA_TYPE_ARTIST,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_PLAYLIST,
MEDIA_TYPE_TRACK,
REPEAT_MODE_ALL,
REPEAT_MODE_OFF,
REPEAT_MODE_ONE,
SUPPORT_BROWSE_MEDIA,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_REPEAT_SET,
SUPPORT_SEEK,
SUPPORT_SELECT_SOURCE,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.components.media_player.errors import BrowseError
from homeassistant.components.plex.const import PLEX_URI_SCHEME
from homeassistant.components.plex.services import play_on_sonos
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TIME, STATE_IDLE, STATE_PAUSED, STATE_PLAYING
from homeassistant.core import HomeAssistant, ServiceCall, callback
from homeassistant.helpers import config_validation as cv, entity_platform, service
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.network import is_internal_request
from .const import (
DATA_SONOS,
DOMAIN as SONOS_DOMAIN,
MEDIA_TYPES_TO_SONOS,
PLAYABLE_MEDIA_TYPES,
SONOS_CREATE_MEDIA_PLAYER,
SONOS_STATE_PLAYING,
SONOS_STATE_TRANSITIONING,
SOURCE_LINEIN,
SOURCE_TV,
)
from .entity import SonosEntity
from .helpers import soco_error
from .media_browser import build_item_response, get_media, library_payload
from .speaker import SonosMedia, SonosSpeaker
_LOGGER = logging.getLogger(__name__)
SUPPORT_SONOS = (
SUPPORT_BROWSE_MEDIA
| SUPPORT_CLEAR_PLAYLIST
| SUPPORT_NEXT_TRACK
| SUPPORT_PAUSE
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_REPEAT_SET
| SUPPORT_SEEK
| SUPPORT_SELECT_SOURCE
| SUPPORT_SHUFFLE_SET
| SUPPORT_STOP
| SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_SET
)
VOLUME_INCREMENT = 2
REPEAT_TO_SONOS = {
REPEAT_MODE_OFF: False,
REPEAT_MODE_ALL: True,
REPEAT_MODE_ONE: "ONE",
}
SONOS_TO_REPEAT = {meaning: mode for mode, meaning in REPEAT_TO_SONOS.items()}
ATTR_SONOS_GROUP = "sonos_group"
UPNP_ERRORS_TO_IGNORE = ["701", "711", "712"]
SERVICE_JOIN = "join"
SERVICE_UNJOIN = "unjoin"
SERVICE_SNAPSHOT = "snapshot"
SERVICE_RESTORE = "restore"
SERVICE_SET_TIMER = "set_sleep_timer"
SERVICE_CLEAR_TIMER = "clear_sleep_timer"
SERVICE_UPDATE_ALARM = "update_alarm"
SERVICE_PLAY_QUEUE = "play_queue"
SERVICE_REMOVE_FROM_QUEUE = "remove_from_queue"
ATTR_SLEEP_TIME = "sleep_time"
ATTR_ALARM_ID = "alarm_id"
ATTR_VOLUME = "volume"
ATTR_ENABLED = "enabled"
ATTR_INCLUDE_LINKED_ZONES = "include_linked_zones"
ATTR_MASTER = "master"
ATTR_WITH_GROUP = "with_group"
ATTR_QUEUE_POSITION = "queue_position"
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Sonos from a config entry."""
platform = entity_platform.async_get_current_platform()
@callback
def async_create_entities(speaker: SonosSpeaker) -> None:
"""Handle device discovery and create entities."""
_LOGGER.debug("Creating media_player on %s", speaker.zone_name)
async_add_entities([SonosMediaPlayerEntity(speaker)])
@service.verify_domain_control(hass, SONOS_DOMAIN)
async def async_service_handle(service_call: ServiceCall) -> None:
"""Handle dispatched services."""
assert platform is not None
entities = await platform.async_extract_from_service(service_call)
if not entities:
return
speakers = []
for entity in entities:
assert isinstance(entity, SonosMediaPlayerEntity)
speakers.append(entity.speaker)
if service_call.service == SERVICE_JOIN:
master = platform.entities.get(service_call.data[ATTR_MASTER])
if master:
await SonosSpeaker.join_multi(hass, master.speaker, speakers) # type: ignore[arg-type]
else:
_LOGGER.error(
"Invalid master specified for join service: %s",
service_call.data[ATTR_MASTER],
)
elif service_call.service == SERVICE_UNJOIN:
await SonosSpeaker.unjoin_multi(hass, speakers) # type: ignore[arg-type]
elif service_call.service == SERVICE_SNAPSHOT:
await SonosSpeaker.snapshot_multi(
hass, speakers, service_call.data[ATTR_WITH_GROUP] # type: ignore[arg-type]
)
elif service_call.service == SERVICE_RESTORE:
await SonosSpeaker.restore_multi(
hass, speakers, service_call.data[ATTR_WITH_GROUP] # type: ignore[arg-type]
)
config_entry.async_on_unload(
async_dispatcher_connect(hass, SONOS_CREATE_MEDIA_PLAYER, async_create_entities)
)
hass.services.async_register(
SONOS_DOMAIN,
SERVICE_JOIN,
async_service_handle,
cv.make_entity_service_schema({vol.Required(ATTR_MASTER): cv.entity_id}),
)
hass.services.async_register(
SONOS_DOMAIN,
SERVICE_UNJOIN,
async_service_handle,
cv.make_entity_service_schema({}),
)
join_unjoin_schema = cv.make_entity_service_schema(
{vol.Optional(ATTR_WITH_GROUP, default=True): cv.boolean}
)
hass.services.async_register(
SONOS_DOMAIN, SERVICE_SNAPSHOT, async_service_handle, join_unjoin_schema
)
hass.services.async_register(
SONOS_DOMAIN, SERVICE_RESTORE, async_service_handle, join_unjoin_schema
)
platform.async_register_entity_service( # type: ignore
SERVICE_SET_TIMER,
{
vol.Required(ATTR_SLEEP_TIME): vol.All(
vol.Coerce(int), vol.Range(min=0, max=86399)
)
},
"set_sleep_timer",
)
platform.async_register_entity_service(SERVICE_CLEAR_TIMER, {}, "clear_sleep_timer") # type: ignore
platform.async_register_entity_service( # type: ignore
SERVICE_UPDATE_ALARM,
{
vol.Required(ATTR_ALARM_ID): cv.positive_int,
vol.Optional(ATTR_TIME): cv.time,
vol.Optional(ATTR_VOLUME): cv.small_float,
vol.Optional(ATTR_ENABLED): cv.boolean,
vol.Optional(ATTR_INCLUDE_LINKED_ZONES): cv.boolean,
},
"set_alarm",
)
platform.async_register_entity_service( # type: ignore
SERVICE_PLAY_QUEUE,
{vol.Optional(ATTR_QUEUE_POSITION): cv.positive_int},
"play_queue",
)
platform.async_register_entity_service( # type: ignore
SERVICE_REMOVE_FROM_QUEUE,
{vol.Optional(ATTR_QUEUE_POSITION): cv.positive_int},
"remove_from_queue",
)
class SonosMediaPlayerEntity(SonosEntity, MediaPlayerEntity):
"""Representation of a Sonos entity."""
_attr_supported_features = SUPPORT_SONOS
_attr_media_content_type = MEDIA_TYPE_MUSIC
def __init__(self, speaker: SonosSpeaker) -> None:
"""Initialize the media player entity."""
super().__init__(speaker)
self._attr_unique_id = self.soco.uid
self._attr_name = self.speaker.zone_name
@property
def coordinator(self) -> SonosSpeaker:
"""Return the current coordinator SonosSpeaker."""
return self.speaker.coordinator or self.speaker
def __hash__(self) -> int:
"""Return a hash of self."""
return hash(self.unique_id)
@property # type: ignore[misc]
def state(self) -> str:
"""Return the state of the entity."""
if self.media.playback_status in (
"PAUSED_PLAYBACK",
"STOPPED",
):
# Sonos can consider itself "paused" but without having media loaded
# (happens if playing Spotify and via Spotify app you pick another device to play on)
if self.media.title is None:
return STATE_IDLE
return STATE_PAUSED
if self.media.playback_status in (
SONOS_STATE_PLAYING,
SONOS_STATE_TRANSITIONING,
):
return STATE_PLAYING
return STATE_IDLE
async def _async_poll(self) -> None:
"""Retrieve latest state by polling."""
await self.hass.data[DATA_SONOS].favorites[
self.speaker.household_id
].async_poll()
await self.hass.async_add_executor_job(self._update)
def _update(self) -> None:
"""Retrieve latest state by polling."""
self.speaker.update_groups()
self.speaker.update_volume()
if self.speaker.is_coordinator:
self.speaker.update_media()
@property
def volume_level(self) -> float | None:
"""Volume level of the media player (0..1)."""
return self.speaker.volume and self.speaker.volume / 100
@property
def is_volume_muted(self) -> bool | None:
"""Return true if volume is muted."""
return self.speaker.muted
@property # type: ignore[misc]
def shuffle(self) -> str | None:
"""Shuffling state."""
shuffle: str = PLAY_MODES[self.media.play_mode][0]
return shuffle
@property # type: ignore[misc]
def repeat(self) -> str | None:
"""Return current repeat mode."""
sonos_repeat = PLAY_MODES[self.media.play_mode][1]
return SONOS_TO_REPEAT[sonos_repeat]
@property
def media(self) -> SonosMedia:
"""Return the SonosMedia object from the coordinator speaker."""
return self.coordinator.media
@property # type: ignore[misc]
def media_content_id(self) -> str | None:
"""Content id of current playing media."""
return self.media.uri
@property # type: ignore[misc]
def media_duration(self) -> float | None:
"""Duration of current playing media in seconds."""
return self.media.duration
@property # type: ignore[misc]
def media_position(self) -> float | None:
"""Position of current playing media in seconds."""
return self.media.position
@property # type: ignore[misc]
def media_position_updated_at(self) -> datetime.datetime | None:
"""When was the position of the current playing media valid."""
return self.media.position_updated_at
@property # type: ignore[misc]
def media_image_url(self) -> str | None:
"""Image url of current playing media."""
return self.media.image_url or None
@property # type: ignore[misc]
def media_channel(self) -> str | None:
"""Channel currently playing."""
return self.media.channel or None
@property
def media_playlist(self) -> str | None:
"""Title of playlist currently playing."""
return self.media.playlist_name
@property # type: ignore[misc]
def media_artist(self) -> str | None:
"""Artist of current playing media, music track only."""
return self.media.artist or None
@property # type: ignore[misc]
def media_album_name(self) -> str | None:
"""Album name of current playing media, music track only."""
return self.media.album_name or None
@property # type: ignore[misc]
def media_title(self) -> str | None:
"""Title of current playing media."""
return self.media.title or None
@property # type: ignore[misc]
def source(self) -> str | None:
"""Name of the current input source."""
return self.media.source_name or None
@soco_error()
def volume_up(self) -> None:
"""Volume up media player."""
self.soco.volume += VOLUME_INCREMENT
@soco_error()
def volume_down(self) -> None:
"""Volume down media player."""
self.soco.volume -= VOLUME_INCREMENT
@soco_error()
def set_volume_level(self, volume: str) -> None:
"""Set volume level, range 0..1."""
self.soco.volume = str(int(volume * 100))
@soco_error(UPNP_ERRORS_TO_IGNORE)
def set_shuffle(self, shuffle: str) -> None:
"""Enable/Disable shuffle mode."""
sonos_shuffle = shuffle
sonos_repeat = PLAY_MODES[self.media.play_mode][1]
self.coordinator.soco.play_mode = PLAY_MODE_BY_MEANING[
(sonos_shuffle, sonos_repeat)
]
@soco_error(UPNP_ERRORS_TO_IGNORE)
def set_repeat(self, repeat: str) -> None:
"""Set repeat mode."""
sonos_shuffle = PLAY_MODES[self.media.play_mode][0]
sonos_repeat = REPEAT_TO_SONOS[repeat]
self.coordinator.soco.play_mode = PLAY_MODE_BY_MEANING[
(sonos_shuffle, sonos_repeat)
]
@soco_error()
def mute_volume(self, mute: bool) -> None:
"""Mute (true) or unmute (false) media player."""
self.soco.mute = mute
@soco_error()
def select_source(self, source: str) -> None:
"""Select input source."""
soco = self.coordinator.soco
if source == SOURCE_LINEIN:
soco.switch_to_line_in()
elif source == SOURCE_TV:
soco.switch_to_tv()
else:
fav = [fav for fav in self.speaker.favorites if fav.title == source]
if len(fav) == 1:
src = fav.pop()
uri = src.reference.get_uri()
if soco.music_source_from_uri(uri) in [
MUSIC_SRC_RADIO,
MUSIC_SRC_LINE_IN,
]:
soco.play_uri(uri, title=source)
else:
soco.clear_queue()
soco.add_to_queue(src.reference)
soco.play_from_queue(0)
@property # type: ignore[misc]
def source_list(self) -> list[str]:
"""List of available input sources."""
sources = [fav.title for fav in self.speaker.favorites]
model = self.coordinator.model_name.upper()
if "PLAY:5" in model or "CONNECT" in model:
sources += [SOURCE_LINEIN]
elif "PLAYBAR" in model:
sources += [SOURCE_LINEIN, SOURCE_TV]
elif "BEAM" in model or "PLAYBASE" in model:
sources += [SOURCE_TV]
return sources
@soco_error(UPNP_ERRORS_TO_IGNORE)
def media_play(self) -> None:
"""Send play command."""
self.coordinator.soco.play()
@soco_error(UPNP_ERRORS_TO_IGNORE)
def media_stop(self) -> None:
"""Send stop command."""
self.coordinator.soco.stop()
@soco_error(UPNP_ERRORS_TO_IGNORE)
def media_pause(self) -> None:
"""Send pause command."""
self.coordinator.soco.pause()
@soco_error(UPNP_ERRORS_TO_IGNORE)
def media_next_track(self) -> None:
"""Send next track command."""
self.coordinator.soco.next()
@soco_error(UPNP_ERRORS_TO_IGNORE)
def media_previous_track(self) -> None:
"""Send next track command."""
self.coordinator.soco.previous()
@soco_error(UPNP_ERRORS_TO_IGNORE)
def media_seek(self, position: str) -> None:
"""Send seek command."""
self.coordinator.soco.seek(str(datetime.timedelta(seconds=int(position))))
@soco_error()
def clear_playlist(self) -> None:
"""Clear players playlist."""
self.coordinator.soco.clear_queue()
@soco_error()
def play_media(self, media_type: str, media_id: str, **kwargs: Any) -> None:
"""
Send the play_media command to the media player.
If media_id is a Plex payload, attempt Plex->Sonos playback.
If media_id is a Sonos or Tidal share link, attempt playback
using the respective service.
If media_type is "playlist", media_id should be a Sonos
Playlist name. Otherwise, media_id should be a URI.
If ATTR_MEDIA_ENQUEUE is True, add `media_id` to the queue.
"""
soco = self.coordinator.soco
if media_id and media_id.startswith(PLEX_URI_SCHEME):
media_id = media_id[len(PLEX_URI_SCHEME) :]
play_on_sonos(self.hass, media_type, media_id, self.name) # type: ignore[no-untyped-call]
return
share_link = self.speaker.share_link
if share_link.is_share_link(media_id):
if kwargs.get(ATTR_MEDIA_ENQUEUE):
share_link.add_share_link_to_queue(media_id)
else:
soco.clear_queue()
share_link.add_share_link_to_queue(media_id)
soco.play_from_queue(0)
elif media_type in (MEDIA_TYPE_MUSIC, MEDIA_TYPE_TRACK):
if kwargs.get(ATTR_MEDIA_ENQUEUE):
soco.add_uri_to_queue(media_id)
else:
soco.play_uri(media_id)
elif media_type == MEDIA_TYPE_PLAYLIST:
if media_id.startswith("S:"):
item = get_media(self.media.library, media_id, media_type) # type: ignore[no-untyped-call]
soco.play_uri(item.get_uri())
return
try:
playlists = soco.get_sonos_playlists()
playlist = next(p for p in playlists if p.title == media_id)
except StopIteration:
_LOGGER.error('Could not find a Sonos playlist named "%s"', media_id)
else:
soco.clear_queue()
soco.add_to_queue(playlist)
soco.play_from_queue(0)
elif media_type in PLAYABLE_MEDIA_TYPES:
item = get_media(self.media.library, media_id, media_type) # type: ignore[no-untyped-call]
if not item:
_LOGGER.error('Could not find "%s" in the library', media_id)
return
soco.play_uri(item.get_uri())
else:
_LOGGER.error('Sonos does not support a media type of "%s"', media_type)
@soco_error()
def set_sleep_timer(self, sleep_time: int) -> None:
"""Set the timer on the player."""
self.coordinator.soco.set_sleep_timer(sleep_time)
@soco_error()
def clear_sleep_timer(self) -> None:
"""Clear the timer on the player."""
self.coordinator.soco.set_sleep_timer(None)
@soco_error()
def set_alarm(
self,
alarm_id: int,
time: datetime.datetime | None = None,
volume: float | None = None,
enabled: bool | None = None,
include_linked_zones: bool | None = None,
) -> None:
"""Set the alarm clock on the player."""
alarm = None
for one_alarm in alarms.get_alarms(self.coordinator.soco):
if one_alarm.alarm_id == str(alarm_id):
alarm = one_alarm
if alarm is None:
_LOGGER.warning("Did not find alarm with id %s", alarm_id)
return
if time is not None:
alarm.start_time = time
if volume is not None:
alarm.volume = int(volume * 100)
if enabled is not None:
alarm.enabled = enabled
if include_linked_zones is not None:
alarm.include_linked_zones = include_linked_zones
alarm.save()
@soco_error()
def play_queue(self, queue_position: int = 0) -> None:
"""Start playing the queue."""
self.soco.play_from_queue(queue_position)
@soco_error()
def remove_from_queue(self, queue_position: int = 0) -> None:
"""Remove item from the queue."""
self.coordinator.soco.remove_from_queue(queue_position)
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return entity specific state attributes."""
attributes: dict[str, Any] = {
ATTR_SONOS_GROUP: self.speaker.sonos_group_entities
}
if self.media.queue_position is not None:
attributes[ATTR_QUEUE_POSITION] = self.media.queue_position
return attributes
async def async_get_browse_image(
self,
media_content_type: str | None,
media_content_id: str | None,
media_image_id: str | None = None,
) -> tuple[None | str, None | str]:
"""Fetch media browser image to serve via proxy."""
if (
media_content_type in [MEDIA_TYPE_ALBUM, MEDIA_TYPE_ARTIST]
and media_content_id
):
item = await self.hass.async_add_executor_job(
get_media,
self.media.library,
media_content_id,
MEDIA_TYPES_TO_SONOS[media_content_type],
)
if image_url := getattr(item, "album_art_uri", None):
result = await self._async_fetch_image(image_url) # type: ignore[no-untyped-call]
return result # type: ignore
return (None, None)
async def async_browse_media(
self, media_content_type: str | None = None, media_content_id: str | None = None
) -> Any:
"""Implement the websocket media browsing helper."""
is_internal = is_internal_request(self.hass)
def _get_thumbnail_url(
media_content_type: str,
media_content_id: str,
media_image_id: str | None = None,
) -> str | None:
if is_internal:
item = get_media( # type: ignore[no-untyped-call]
self.media.library,
media_content_id,
media_content_type,
)
return getattr(item, "album_art_uri", None) # type: ignore[no-any-return]
return self.get_browse_image_url(
media_content_type,
urllib.parse.quote_plus(media_content_id),
media_image_id,
)
if media_content_type in [None, "library"]:
return await self.hass.async_add_executor_job(
library_payload, self.media.library, _get_thumbnail_url
)
payload = {
"search_type": media_content_type,
"idstring": media_content_id,
}
response = await self.hass.async_add_executor_job(
build_item_response, self.media.library, payload, _get_thumbnail_url
)
if response is None:
raise BrowseError(
f"Media not found: {media_content_type} / {media_content_id}"
)
return response
|
|
#!/usr/bin/env python
from xml.parsers import expat
from xml.sax.saxutils import XMLGenerator
from xml.sax.xmlreader import AttributesImpl
"Makes working with XML feel like you are working with JSON"
"""
Copyright (C) 2012 Martin Blech and individual contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
try: # pragma no cover
from io import StringIO
except ImportError: # pragma no cover
try:
from io import StringIO
except ImportError:
from io import StringIO
try: # pragma no cover
from collections import OrderedDict
except ImportError: # pragma no cover
OrderedDict = dict
try: # pragma no cover
_basestring = str
except NameError: # pragma no cover
_basestring = str
try: # pragma no cover
_unicode = str
except NameError: # pragma no cover
_unicode = str
__author__ = 'Martin Blech'
__version__ = '0.5.0'
__license__ = 'MIT'
class ParsingInterrupted(Exception):
pass
class _DictSAXHandler(object):
def __init__(self,
item_depth=0,
item_callback=lambda *args: True,
xml_attribs=True,
attr_prefix='@',
cdata_key='#text',
force_cdata=False,
cdata_separator='',
postprocessor=None,
dict_constructor=OrderedDict,
strip_whitespace=True):
self.path = []
self.stack = []
self.data = None
self.item = None
self.item_depth = item_depth
self.xml_attribs = xml_attribs
self.item_callback = item_callback
self.attr_prefix = attr_prefix
self.cdata_key = cdata_key
self.force_cdata = force_cdata
self.cdata_separator = cdata_separator
self.postprocessor = postprocessor
self.dict_constructor = dict_constructor
self.strip_whitespace = strip_whitespace
def startElement(self, name, attrs):
attrs = self.dict_constructor(list(zip(attrs[0::2], attrs[1::2])))
self.path.append((name, attrs or None))
if len(self.path) > self.item_depth:
self.stack.append((self.item, self.data))
if self.xml_attribs:
attrs = self.dict_constructor(
(self.attr_prefix+key, value)
for (key, value) in list(attrs.items()))
else:
attrs = None
self.item = attrs or None
self.data = None
def endElement(self, name):
if len(self.path) == self.item_depth:
item = self.item
if item is None:
item = self.data
should_continue = self.item_callback(self.path, item)
if not should_continue:
raise ParsingInterrupted()
if len(self.stack):
item, data = self.item, self.data
self.item, self.data = self.stack.pop()
if self.strip_whitespace and data is not None:
data = data.strip() or None
if data and self.force_cdata and item is None:
item = self.dict_constructor()
if item is not None:
if data:
self.push_data(item, self.cdata_key, data)
self.item = self.push_data(self.item, name, item)
else:
self.item = self.push_data(self.item, name, data)
else:
self.item = self.data = None
self.path.pop()
def characters(self, data):
if not self.data:
self.data = data
else:
self.data += self.cdata_separator + data
def push_data(self, item, key, data):
if self.postprocessor is not None:
result = self.postprocessor(self.path, key, data)
if result is None:
return item
key, data = result
if item is None:
item = self.dict_constructor()
try:
value = item[key]
if isinstance(value, list):
value.append(data)
else:
item[key] = [value, data]
except KeyError:
item[key] = data
return item
def parse(xml_input, encoding='utf-8', expat=expat, *args, **kwargs):
"""Parse the given XML input and convert it into a dictionary.
`xml_input` can either be a `string` or a file-like object.
If `xml_attribs` is `True`, element attributes are put in the dictionary
among regular child elements, using `@` as a prefix to avoid collisions. If
set to `False`, they are just ignored.
Simple example::
>>> doc = xmltodict.parse(\"\"\"
... <a prop="x">
... <b>1</b>
... <b>2</b>
... </a>
... \"\"\")
>>> doc['a']['@property']
u'x'
>>> doc['a']['b']
[u'1', u'2']
If `item_depth` is `0`, the function returns a dictionary for the root
element (default behavior). Otherwise, it calls `item_callback` every time
an item at the specified depth is found and returns `None` in the end
(streaming mode).
The callback function receives two parameters: the `path` from the document
root to the item (name-attribs pairs), and the `item` (dict). If the
callback's return value is false-ish, parsing will be stopped with the
:class:`ParsingInterrupted` exception.
Streaming example::
>>> def handle(path, item):
... print 'path:%s item:%s' % (path, item)
... return True
...
>>> xmltodict.parse(\"\"\"
... <a prop="x">
... <b>1</b>
... <b>2</b>
... </a>\"\"\", item_depth=2, item_callback=handle)
path:[(u'a', {u'prop': u'x'}), (u'b', None)] item:1
path:[(u'a', {u'prop': u'x'}), (u'b', None)] item:2
The optional argument `postprocessor` is a function that takes `path`, `key`
and `value` as positional arguments and returns a new `(key, value)` pair
where both `key` and `value` may have changed. Usage example::
>>> def postprocessor(path, key, value):
... try:
... return key + ':int', int(value)
... except (ValueError, TypeError):
... return key, value
>>> xmltodict.parse('<a><b>1</b><b>2</b><b>x</b></a>',
... postprocessor=postprocessor)
OrderedDict([(u'a', OrderedDict([(u'b:int', [1, 2]), (u'b', u'x')]))])
You can pass an alternate version of `expat` (such as `defusedexpat`) by
using the `expat` parameter. E.g:
>>> import defusedexpat
>>> xmltodict.parse('<a>hello</a>', expat=defusedexpat.pyexpat)
OrderedDict([(u'a', u'hello')])
"""
handler = _DictSAXHandler(*args, **kwargs)
parser = expat.ParserCreate()
parser.ordered_attributes = True
parser.StartElementHandler = handler.startElement
parser.EndElementHandler = handler.endElement
parser.CharacterDataHandler = handler.characters
try:
parser.ParseFile(xml_input)
except (TypeError, AttributeError):
if isinstance(xml_input, _unicode):
xml_input = xml_input.encode(encoding)
parser.Parse(xml_input, True)
return handler.item
def _emit(key, value, content_handler,
attr_prefix='@',
cdata_key='#text',
root=True,
preprocessor=None):
if preprocessor is not None:
result = preprocessor(key, value)
if result is None:
return
key, value = result
if not isinstance(value, (list, tuple)):
value = [value]
if root and len(value) > 1:
raise ValueError('document with multiple roots')
for v in value:
if v is None:
v = OrderedDict()
elif not isinstance(v, dict):
v = _unicode(v)
if isinstance(v, _basestring):
v = OrderedDict(((cdata_key, v),))
cdata = None
attrs = OrderedDict()
children = []
for ik, iv in list(v.items()):
if ik == cdata_key:
cdata = iv
continue
if ik.startswith(attr_prefix):
attrs[ik[len(attr_prefix):]] = iv
continue
children.append((ik, iv))
content_handler.startElement(key, AttributesImpl(attrs))
for child_key, child_value in children:
_emit(child_key, child_value, content_handler,
attr_prefix, cdata_key, False, preprocessor)
if cdata is not None:
content_handler.characters(cdata)
content_handler.endElement(key)
def unparse(item, output=None, encoding='utf-8', **kwargs):
((key, value),) = list(item.items())
must_return = False
if output is None:
output = StringIO()
must_return = True
content_handler = XMLGenerator(output, encoding)
content_handler.startDocument()
_emit(key, value, content_handler, **kwargs)
content_handler.endDocument()
if must_return:
value = output.getvalue()
try: # pragma no cover
value = value.decode(encoding)
except AttributeError: # pragma no cover
pass
return value
if __name__ == '__main__': # pragma: no cover
import sys
import marshal
(item_depth,) = sys.argv[1:]
item_depth = int(item_depth)
def handle_item(path, item):
marshal.dump((path, item), sys.stdout)
return True
try:
root = parse(sys.stdin,
item_depth=item_depth,
item_callback=handle_item,
dict_constructor=dict)
if item_depth == 0:
handle_item([], root)
except KeyboardInterrupt:
pass
|
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A simple script to make building/testing Mojo components easier."""
import argparse
from copy import deepcopy
import logging
import os
import subprocess
import sys
from get_test_list import GetTestList
from mopy.config import Config
from mopy.paths import Paths
from mopy.gn import GNArgsForConfig, ParseGNConfig, CommandLineForGNArgs
from mopy.log import InitLogging
_logger = logging.getLogger()
_verbose_count = 0
def _args_to_config(args):
# Default to host OS.
target_os = None
if args.android:
target_os = Config.OS_ANDROID
target_cpu = args.target_cpu
additional_args = {}
if 'clang' in args:
additional_args['is_clang'] = args.clang
if 'asan' in args and args.asan:
additional_args['sanitizer'] = Config.SANITIZER_ASAN
# Additional non-standard config entries:
if 'goma' in args:
goma_dir = os.environ.get('GOMA_DIR')
goma_home_dir = os.path.join(os.getenv('HOME', ''), 'goma')
if args.goma and goma_dir:
additional_args['use_goma'] = True
additional_args['goma_dir'] = goma_dir
elif args.goma and os.path.exists(goma_home_dir):
additional_args['use_goma'] = True
additional_args['goma_dir'] = goma_home_dir
else:
additional_args['use_goma'] = False
additional_args['goma_dir'] = None
if 'nacl' in args:
additional_args['use_nacl'] = args.nacl
if not ('asan' in args and args.asan):
go_dir = os.path.join(Paths().src_root, 'third_party', 'go', 'tool')
if args.android:
additional_args['mojo_use_go'] = True
additional_args['go_build_tool'] = os.path.join(
go_dir, 'android_arm', 'bin', 'go')
elif target_os is None and Config.GetHostOS() == Config.OS_LINUX:
additional_args['mojo_use_go'] = True
additional_args['go_build_tool'] = os.path.join(
go_dir, 'linux_amd64', 'bin', 'go')
if 'dry_run' in args:
additional_args['dry_run'] = args.dry_run
if 'builder_name' in args:
additional_args['builder_name'] = args.builder_name
if 'build_number' in args:
additional_args['build_number'] = args.build_number
if 'master_name' in args:
additional_args['master_name'] = args.master_name
if 'test_results_server' in args:
additional_args['test_results_server'] = args.test_results_server
if 'gn_args' in args:
additional_args['gn_args'] = args.gn_args
return Config(target_os=target_os, target_cpu=target_cpu,
is_debug=args.debug, dcheck_always_on=args.dcheck_always_on,
**additional_args)
def _get_out_dir(config):
"""Gets the build output directory (e.g., out/Debug), relative to src, for the
given config."""
paths = Paths(config)
return paths.SrcRelPath(paths.build_dir)
def _sync(config): # pylint: disable=W0613
"""Runs gclient sync for the given config."""
_logger.debug('_sync()')
return subprocess.call(['gclient', 'sync'])
def _gn(config):
"""Runs gn gen for the given config."""
_logger.debug('_gn()')
command = ['gn', 'gen', '--check']
gn_args = CommandLineForGNArgs(GNArgsForConfig(config))
out_dir = _get_out_dir(config)
command.append(out_dir)
command.append('--args=%s' % ' '.join(gn_args))
print 'Running %s %s ...' % (command[0],
' '.join('\'%s\'' % x for x in command[1:]))
return subprocess.call(command)
def _build(config):
"""Builds for the given config."""
_logger.debug('_build()')
out_dir = _get_out_dir(config)
gn_args = ParseGNConfig(out_dir)
print 'Building in %s ...' % out_dir
if gn_args.get('use_goma'):
# Use the configured goma directory.
local_goma_dir = gn_args.get('goma_dir')
print 'Ensuring goma (in %s) started ...' % local_goma_dir
command = ['python',
os.path.join(local_goma_dir, 'goma_ctl.py'),
'ensure_start']
exit_code = subprocess.call(command)
if exit_code:
return exit_code
return subprocess.call(['ninja', '-j', '1000', '-l', '100', '-C', out_dir])
else:
return subprocess.call(['ninja', '-C', out_dir])
def _run_tests(config, test_types):
"""Runs the tests of the given type(s) for the given config."""
assert isinstance(test_types, list)
config = deepcopy(config)
config.values['test_types'] = test_types
test_list = GetTestList(config, verbose_count=_verbose_count)
dry_run = config.values.get('dry_run')
final_exit_code = 0
failure_list = []
for entry in test_list:
print 'Running: %s' % entry['name']
print 'Command: %s' % ' '.join(entry['command'])
if dry_run:
continue
_logger.info('Starting: %s' % ' '.join(entry['command']))
exit_code = subprocess.call(entry['command'])
_logger.info('Completed: %s' % ' '.join(entry['command']))
if exit_code:
if not final_exit_code:
final_exit_code = exit_code
failure_list.append(entry['name'])
print 72 * '='
print 'SUMMARY:',
if dry_run:
print 'Dry run: no tests run'
elif not failure_list:
assert not final_exit_code
print 'All tests passed'
else:
assert final_exit_code
print 'The following had failures:', ', '.join(failure_list)
return final_exit_code
def _test(config):
_logger.debug('_test()')
return _run_tests(config, [Config.TEST_TYPE_DEFAULT])
def _perftest(config):
_logger.debug('_perftest()')
return _run_tests(config, [Config.TEST_TYPE_PERF])
def _pytest(config):
_logger.debug('_pytest()')
return _run_tests(config, ['python'])
def main():
os.chdir(Paths().src_root)
parser = argparse.ArgumentParser(description='A script to make building'
'/testing Mojo components easier.')
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument('--verbose',
help='Be verbose (multiple times for more)',
default=0, dest='verbose_count', action='count')
parent_parser.add_argument('--asan', help='Use Address Sanitizer',
action='store_true')
parent_parser.add_argument('--dcheck_always_on',
help='DCHECK and MOJO_DCHECK are fatal even in '
'release builds',
action='store_true')
debug_group = parent_parser.add_mutually_exclusive_group()
debug_group.add_argument('--debug', help='Debug build (default)',
default=True, action='store_true')
debug_group.add_argument('--release', help='Release build', default=False,
dest='debug', action='store_false')
os_group = parent_parser.add_mutually_exclusive_group()
os_group.add_argument('--android', help='Build for Android',
action='store_true')
parent_parser.add_argument('--target-cpu',
help='CPU architecture to build for.',
choices=['x64', 'x86', 'arm'])
subparsers = parser.add_subparsers()
sync_parser = subparsers.add_parser('sync', parents=[parent_parser],
help='Sync using gclient (does not run gn).')
sync_parser.set_defaults(func=_sync)
gn_parser = subparsers.add_parser('gn', parents=[parent_parser],
help='Run gn for mojo (does not sync).')
gn_parser.set_defaults(func=_gn)
gn_parser.add_argument('--args', help='Specify extra args',
default=None, dest='gn_args')
# Note: no default, if nothing is specified on the command line GN decides.
gn_parser.add_argument('--nacl', help='Add in NaCl', action='store_true',
default=argparse.SUPPRESS)
gn_parser.add_argument('--no-nacl', help='Remove NaCl', action='store_false',
default=argparse.SUPPRESS, dest='nacl')
clang_group = gn_parser.add_mutually_exclusive_group()
clang_group.add_argument('--clang', help='Use Clang (default)', default=None,
action='store_true')
clang_group.add_argument('--gcc', help='Use GCC',
dest='clang', action='store_false')
goma_group = gn_parser.add_mutually_exclusive_group()
goma_group.add_argument('--goma',
help='Use Goma (if $GOMA_DIR is set or $HOME/goma '
'exists; default)',
default=True,
action='store_true')
goma_group.add_argument('--no-goma', help='Don\'t use Goma', default=False,
dest='goma', action='store_false')
build_parser = subparsers.add_parser('build', parents=[parent_parser],
help='Build')
build_parser.set_defaults(func=_build)
test_parser = subparsers.add_parser('test', parents=[parent_parser],
help='Run unit tests (does not build).')
test_parser.set_defaults(func=_test)
test_parser.add_argument('--dry-run',
help='Print instead of executing commands',
default=False, action='store_true')
perftest_parser = subparsers.add_parser('perftest', parents=[parent_parser],
help='Run perf tests (does not build).')
perftest_parser.set_defaults(func=_perftest)
pytest_parser = subparsers.add_parser('pytest', parents=[parent_parser],
help='Run Python unit tests (does not build).')
pytest_parser.set_defaults(func=_pytest)
args = parser.parse_args()
global _verbose_count
_verbose_count = args.verbose_count
InitLogging(_verbose_count)
return args.func(_args_to_config(args))
if __name__ == '__main__':
sys.exit(main())
|
|
__doc__ = """
Neural network library, drawing inspiration from Torch's nn and nngraph
"""
import cgt
from cgt import core, size
import numpy as np
from .nn_ops.im2col import im2col
from .nn_ops.max_pool_2d import max_pool_2d #pylint: disable=W0611
from .nn_ops.cross_channel_lrn import cross_channel_lrn #pylint: disable=W0611
from .nn_ops import cudnn_ops #pylint: disable=W0611
from collections import namedtuple
class Module(object):
def __init__(self, inputs, outputs):
self.c = core.Composition(inputs, outputs)
def __call__(self, inputs):
return self.c.expand(inputs)
# assert all(isinstance(x,core.Node) for x in inputs)
# tup_out = core.Result(self.c, inputs)
# return [core.Result(core.TupleIndex(i),[tup_out]) for i in xrange(self.c.n_out)]
def get_parameters(self):
return list(node for node in self.c.get_nodes() if node.is_data())
def expand(self, inputs):
return self.c.expand(inputs)
def is_parameter(node):
return node.is_data() and node.props["is_parameter"]
def get_parameters(loss):
return list(node for node in cgt.core.topsorted([loss]) if is_parameter(node))
def parameter(val, name=None, device=None):
fixed_shape_mask = "all"
out = cgt.shared(val, name=name, device=device, fixed_shape_mask=fixed_shape_mask)
out.props["is_parameter"] = True
return out
# ================================================================
# Math functions
# ================================================================
def rectify(x):
return x * (x >= 0)
def _nu_softplus(x,out):
absx = np.abs(x)
out[:] = (absx+x)/2 + np.log(1 + np.exp(-absx))
def softplus(x):
op = core.ElwiseUnary("softplus",core.UnaryInfo("SoftPlus", _nu_softplus, True, 'f', lambda x, g, gy: gy/(cgt.exp(-x)+1.0), "(x > 0) ? (x + log(exp(-x) + 1)) : log(1+exp(x))"))
return core.Result(op, [x])
def softmax(x,axis=1):
# x = cgt.broadcast("-", x, x.max(axis=1,keepdims=True),"xx,x1")
out = cgt.exp(x)
out = cgt.broadcast("/", out, out.sum(axis=axis,keepdims=True), "xx,x1")
return out
def logsoftmax(x, axis=1):
return cgt.log(softmax(x, axis=axis))
def zero_one_loss(x, y):
assert x.ndim == 2 and y.ndim in (1,2) and core.dtype_kind(y.dtype)=='i'
return cgt.equal(x.argmax(axis=1,keepdims=False),y.flatten())
def dropout(x, p=0):
if p==0:
return x
else:
mask = cgt.greater(cgt.rand(*cgt.shape(x)), p)
x = x * mask
x = x /(1.0-p)
return x
def conv2d_fft(x_BKRC, f_LKrc, subsample, pad):
# TODO add shape assertion
f_LKrc = cgt.flip(f_LKrc, [2,3])
padnrows = size(x_BKRC, 2) + size(f_LKrc, 2) - 1
padncols = size(x_BKRC, 3) + size(f_LKrc, 3) - 1
tx = cgt.rfft(x_BKRC, (padnrows,padncols), (2,3))
tf = cgt.rfft(f_LKrc, (padnrows,padncols), (2,3))
out = cgt.irfft( cgt.einsum("BKrc,LKrc->BLrc",tx, tf), (2,3))
out = out[:,:,pad[0]:(padnrows-pad[0]):subsample[0],pad[1]:(padncols-pad[1]):subsample[1]] #pylint: disable=E1127
return out
def conv2d(x_BKRC, f_LKrc, kernelshape, pad=(0,0), stride=(1,1)):
col_BmnZ = im2col(x_BKRC, kernelshape, pad, stride)
L,K,r,c = f_LKrc.shape
f_LZ = f_LKrc.reshape([L, K*r*c])
B,m,n,Z = col_BmnZ.shape
col_Bmn_Z = col_BmnZ.reshape([B*m*n, Z])
col_Bmn_L = core.Result(core.Mul22(False,True), [col_Bmn_Z, f_LZ])
return col_Bmn_L.reshape([B,m,n,L]).transpose([0,3,1,2])
# ================================================================
# Initializations
# ================================================================
IIDGaussian = namedtuple("IIDGaussian", ["mean", "std"])
IIDGaussian.__new__.__defaults__ = (0, 1)
IIDUniform = namedtuple("IIDUniform", ["low", "high"])
Constant = namedtuple("Constant", ["constant"])
XavierNormal = namedtuple("XavierNormal", ["scale"])
XavierUniform = namedtuple("XavierUniform", ["scale"])
XavierNormal.__new__.__defaults__ = (1,)
XavierUniform.__new__.__defaults__ = (1,)
HeNormal = namedtuple("HeNormal", ["scale"])
HeUniform = namedtuple("HeUniform", ['scale'])
def init_array(init, shape):
if isinstance(init, IIDGaussian):
return (np.random.randn(*shape)*init.std + init.mean).astype(cgt.floatX)
elif isinstance(init, IIDUniform):
return (np.random.rand(*shape)*(init.high-init.low) + init.low).astype(cgt.floatX)
elif isinstance(init, Constant):
return init.constant*np.ones(shape, cgt.floatX)
elif isinstance(init, XavierNormal):
std = get_xavier_weight(init, shape)
return (np.random.randn(*shape)*std).astype(cgt.floatX)
elif isinstance(init, XavierUniform):
std = get_xavier_weight(init, shape)
high = -np.sqrt(3) * std
low = np.sqrt(3) * std
return (np.random.rand(*shape)*(high-low) + low).astype(cgt.floatX)
elif isinstance(init, HeNormal):
std = get_he_weight(init, shape)
return (np.random.randn(*shape)*std).astype(cgt.floatX)
elif isinstance(init, HeUniform):
std = get_he_weight(init, shape)
low = -np.sqrt(3) * std
high = np.sqrt(3) * std
return (np.random.rand(*shape)*(high-low) + low).astype(cgt.floatX)
else:
raise ValueError("Invalid initializer %s"%init)
def get_xavier_weight(init, shape):
"""For relu activation scale (init.scale) should be sqrt(2). For sigmoid and tanh 1.0 should be used.
Math depends on chosen underlying distribution (Normal, Uniform, etc) and activation function.
For uniform with RELU you obtain
a = sqrt{frac{6}{fan_{in}+fan_{out}}
W &\sim U[-a, a]. See reference for full details.
Reference: Xavier Glorot and Yoshua Bengio (2010):
Understanding the difficulty of training deep feedforward neural
networks. International conference on artificial intelligence and
statistics."""
if len(shape) < 2:
raise RuntimeError("Shape length must be greater than two")
n1, n2 = shape[:2]
field_size = np.prod(shape[2:])
scale = init.scale
std = scale * np.sqrt(2.0 / ((n1 + n2) * field_size))
return std
def get_he_weight(init, shape):
"""Use sqrt(2) for RELU and 1 for sigmoid/linear/tanh for init.scale
Weights are initialized with a standard deviation of
sigma = scale*sqrt{1/fan_{in}}
Reference: Kaiming He et al. (2015):
Delving deep into rectifiers: Surpassing human-level performance on
imagenet classification. arXiv preprint arXiv:1502.01852."""
if len(shape) == 2:
fan_in = shape[0]
elif len(shape) > 2:
fan_in = np.prod(shape[1:])
else:
raise RuntimeError("This initializer does not work with shapes of length less than two")
std = init.scale * np.sqrt(1.0 / fan_in)
return std
# ================================================================
# Layer constructors
# ================================================================
class Affine(object):
"""
Like torch's nn.Linear
"""
def __init__(self, input_size, output_size, name=None, weight_init=Constant(0), bias_init=Constant(0)):
input_size = int(input_size)
output_size = int(output_size)
name = "unnamed" if name is None else name
self.weight = parameter(init_array(weight_init, (input_size, output_size)),
name=name+".W")
self.bias = parameter(init_array(bias_init, (1, output_size)),
name=name+".b")
def __call__(self, x):
return cgt.broadcast("+", x.dot(self.weight), self.bias, "xx,1x")
class SpatialConvolution(object):
def __init__(self, input_channels, output_channels, kernelshape, pad, stride=(1,1), name=None, weight_init=Constant(0), bias_init=Constant(0)):
# type conversion
input_channels = int(input_channels)
output_channels = int(output_channels)
self.kernelshape = tuple(map(int, kernelshape))
self.pad = tuple(map(int,pad))
self.stride = tuple(map(int,stride))
name = "unnamed" if name is None else name
self.weight = parameter(init_array(weight_init, (output_channels, input_channels) + self.kernelshape),
name=name+".W")
self.bias = parameter(init_array(bias_init, (1, output_channels, 1, 1)),
name=name+".b")
def __call__(self, x):
tmp = conv2d(x, self.weight, self.kernelshape, self.pad, self.stride)
return cgt.broadcast("+", tmp, self.bias, "xxxx,1x11")
# ================================================================
# Optimization
# ================================================================
def sgd(cost, params, learning_rate):
"""Stochastic Gradient Descent (SGD) updates
Math:
* ``param := param - learning_rate * gradient``
Parameters
----------
cost : a scalar loss.
params : a list of cgt shared variables. We generate update
expressions w.r.t. these variables.
learning_rate : float
Tunes the size of the update step.
Returns
-------
list of tuples of the form (param, updates)
"""
updates = []
grads = cgt.grad(cost, params)
for param, grad in zip(params, grads):
updates.append((param, param - learning_rate * grad))
return updates
def momentum(cost, params, learning_rate, momentum=0.9):
"""Stochastic Gradient Descent (SGD) updates with momentum
Math:
* ``velocity := momentum * velocity - learning_rate * grad``
* ``param := param + velocity``
Parameters
----------
cost : a scalar loss.
params : a list of cgt shared variables. We generate update
expressions w.r.t. these variables.
learning_rate : float
Tunes the size of the update step.
momentum: float
Tunes the weight given to the velocity term.
Returns
-------
list of tuples of the form [(param, updates) (velocity, velocity_update)]
"""
updates = []
grads = cgt.grad(cost, params)
for param, grad in zip(params, grads):
value = param.op.get_value()
velocity = cgt.shared(np.zeros(value.shape, dtype=value.dtype))
x = momentum * velocity + param - learning_rate * grad
updates.append((velocity, x-param))
updates.append((param, x))
return updates
def nesterov_momentum(cost, params, learning_rate, momentum=0.9):
"""Stochastic Gradient Descent (SGD) updates with Nesterov momentum
Math:
* ``velocity := momentum * velocity - learning_rate * grad``
* ``param := momentum*velocity + param - learning_rate * grad``
Parameters
----------
cost : a scalar loss.
params : a list of cgt shared variables. We generate update
expressions w.r.t. these variables.
learning_rate : float
Tunes the size of the update step.
momentum: float
Tunes the weight given to the velocity term.
Returns
-------
list of tuples of the form [(param, updates) (velocity, velocity_update)]
"""
updates = []
grads = cgt.grad(cost, params)
for param, grad in zip(params, grads):
value = param.op.get_value()
velocity = cgt.shared(np.zeros(value.shape, dtype=value.dtype))
x = momentum * velocity - learning_rate * grad
updates.append((velocity, x))
updates.append((param, momentum*x + param - learning_rate * grad))
return updates
def adagrad(cost, params, learning_rate=1.0, epsilon=1e-6):
"""Adagrad updates
The learning rate will be scaled by dividing it by the sqaure root of the sum of accumulated squared gradients.
Math:
* ``accu_new = accu + grad ** 2``
* ``param = param - (learning_rate * grad) / cgt.sqrt(accu_new + epsilon)``
Parameters
----------
cost : a scalar loss.
params : a list of cgt shared variables. We generate update
expressions w.r.t. these variables.
learning_rate : float
Tunes the size of the update step.
epsilon: avoids division close to zero. Small float.
Returns
-------
list of tuples of the form [(param, updates), (accumulated_grads, accumulated_grads_new)]
References
----------
.. [1] Duchi, J., Hazan, E., & Singer, Y. (2011):
Adaptive subgradient methods for online learning and stochastic
optimization. JMLR, 12:2121-2159.
"""
updates = []
grads = cgt.grad(cost, params)
for param, grad in zip(params, grads):
value = param.op.get_value()
accu = cgt.shared(np.zeros(value.shape, dtype=value.dtype))
accu_new = accu + grad ** 2
updates.append((accu, accu_new))
updates.append((param, param - (learning_rate * grad) / cgt.sqrt(accu_new + epsilon)))
return updates
def rmsprop(cost, params, learning_rate=1.0, rho=0.9, epsilon=1e-6):
"""RMSProp updates
Divide learning rate by moving average of RMS gradients. See [1]
Math:
* ``accu_new = rho * accu + (1 - rho) * grad ** 2``
* ``param = param - (learning_rate * grad / cgt.sqrt(accu_new + epsilon))``
Parameters
----------
cost : a scalar loss.
params : a list of cgt shared variables. We generate update
expressions w.r.t. these variables.
learning_rate : float
Tunes the size of the update step.
rho : float
Controls decay of gradient moving average.
epsilon : float
Avoid division by 0 while scaling. Small constant.
Returns
-------
list of tuples of the form [(param, updates), (accumulated_RMS_grads, accumulated_RMS_grads_new)]
References
----------
.. [1] Yann N. Dauphin, Harm de Vries, Junyoung Chung, Yoshua Bengio (2015):
RMSProp and equilibrated adaptive learning rates for non-convex optimization
arXiv:1502.04390 http://arxiv.org/abs/1502.04390
"""
updates = []
grads = cgt.grad(cost, params)
for param, grad in zip(params, grads):
value = param.op.get_value()
accu = cgt.shared(np.zeros(value.shape, dtype=value.dtype))
accu_new = rho * accu + (1 - rho) * grad ** 2
updates.append((accu, accu_new))
updates.append((param, param - (learning_rate * grad / cgt.sqrt(accu_new + epsilon))))
return updates
def adadelta(cost, params, learning_rate=1.0, rho=0.95, epsilon=1e-6):
""" Adadelta updates
The learning rate is scaled by the ratio of accumulated gradients to the ratio of accumulated step sizes.
Math:
* ``accu_new = rho * accu + (1 - rho) * grad ** 2``
* ``update = (grad * cgt.sqrt(delta_accu + epsilon) / cgt.sqrt(accu_new + epsilon))``
* ``param = param - learning_rate * update``
* ``delta_accu_new = rho * delta_accu + (1 - rho) * update ** 2``
Parameters
----------
cost : a scalar loss.
params : a list of cgt shared variables. We generate update
expressions w.r.t. these variables.
learning_rate : float
Tunes the size of the update step.
rho : float
Controls decay of gradient moving average.
epsilon : float
Avoid division by 0 while scaling. Small constant.
Returns
-------
list of tuples of the form
[(param, updates), (accumulated_grads, accumulated_grads_new), (step_accum, step_accum_new)]
References
----------
.. [1] Zeiler, M. D. (2012):
ADADELTA: An Adaptive Learning Rate Method.
arXiv Preprint arXiv:1212.5701.
"""
updates = []
grads = cgt.grad(cost, params)
for param, grad in zip(params, grads):
value = param.op.get_value()
accu = cgt.shared(np.zeros(value.shape, dtype=value.dtype))
delta_accu = cgt.shared(np.zeros(value.shape, dtype=value.dtype))
accu_new = rho * accu + (1 - rho) * grad ** 2
updates.append((accu, accu_new))
update = (grad * cgt.sqrt(delta_accu + epsilon) / cgt.sqrt(accu_new + epsilon))
updates.append((param, param - learning_rate * update))
delta_accu_new = rho * delta_accu + (1 - rho) * update ** 2
updates.append((delta_accu, delta_accu_new))
return updates
|
|
"""
======================================================
Corss validation on the data using multiple processes.
======================================================
"""
from svmutil import *
import commands
import numpy
import random
import multiprocessing
import sys
class result():
# internal class passed to by sub process
def __init__(self, i, n):
self.fp_ind = i # fingerprint index
self.pred_fp_ = numpy.zeros(n) # predicted fingerprints
self.acc = 0 # cross validation accuracy
def internalCV_mp(kernel, labels, n_folds, select_c=False, n_p=8, prob=False):
"""
Internel cross validation using train data.
Parameters:
-----------
kernel, numpy 2d array, n_train*n_train, training kernel
labels, numpy 2d array, n_train*n_fingerprints, training labels
n_folds, number of cross validations
pred_f, string, file to store predicted fingerprints in the CV
select_c, bool, whether doing C selection in CV
n_p, int, number of processes to use
prob, boolean, probability output if prob=True.
Returns:
--------
pred_fp: numpy 2d array, cross validation predictions or probability of
positive label if prob=True.
Note:
-----
Wrtie the cross validation predition fingerprints in pred_f
"""
(n_x, n_x) = kernel.shape
(n_x, n_y) = labels.shape
x = kernel
if n_y < n_p:
n_p = n_y
#cv_accs = numpy.zeros(n_y)
pred_fp = numpy.zeros((n_x, n_y))
tags = _label_folds(n_x, n_folds)
result_queue = multiprocessing.Queue(n_y)
if n_y < n_p:
print "Only %d fingerprints are used" % n_y
print "Change n_p to %d" % n_y
n_p = n_y
task_dict = {}
for i in range(n_y):
task_dict[i%n_p] = []
for i in range(n_y):
task_dict[i%n_p].append(i)
jobs = []
for i in range(n_p):
if select_c:
p = multiprocessing.Process(target=_CV_BestC,
args=(result_queue, x, labels,
task_dict[i], tags, n_folds,
prob,))
jobs.append(p)
p.start()
else:
p = multiprocessing.Process(target=_CV,
args=(result_queue, x, labels,
task_dict[i], tags, n_folds,
prob,))
jobs.append(p)
p.start()
for job in jobs:
job.join()
for i in range(n_y):
res = result_queue.get()
fp_ind = res.fp_ind
pred_fp[:,fp_ind] = res.pred_fp_
#cv_accs[fp_ind] = res.acc
return pred_fp
#w = open(cv_acc_f,"w")
#w.write(" ".join(map(str,cv_accs)))
#w.close()
#numpy.savetxt(pred_f, pred_fp, fmt="%d")
def _CV(Queue, x, labels, inds, tags, n_folds, pb):
"""
Internel cross validation using c = 1
"""
for ind in inds:
print "cv on %d'th fingerprint" % ind
n = len(x)
pred = numpy.zeros(n)
y = labels[:,ind]
res = result(ind,n)
for i in range(1,n_folds+1):
test = tags == i
train = ~(tags == i)
test = numpy.array(range(n))[test].tolist()
train = numpy.array(range(n))[train].tolist()
train_km = x[numpy.ix_(train,train)]
test_km = x[numpy.ix_(test,train)]
train_label = y[train]
test_label = y[test]
n_train = len(train_km)
n_test = len(test_km)
train_km = numpy.append(numpy.array(range(1,n_train+1)).reshape(n_train,1), train_km,1).tolist()
test_km = numpy.append(numpy.array(range(1,n_test+1)).reshape(n_test,1), test_km,1).tolist()
prob = svm_problem(train_label, train_km, isKernel=True)
if pb:
param = svm_parameter('-t 4 -c 1 -b 1 -q')
m = svm_train(prob,param)
p_label, p_acc, p_val=svm_predict(test_label,test_km, m,'-b 1 -q')
pred[numpy.ix_(test)] = [p[0] for p in p_val]
acc = sum(p_label == y) / float(n)
else:
param = svm_parameter('-t 4 -c 1 -b 0 -q')
m = svm_train(prob,param)
p_label, p_acc, p_val=svm_predict(test_label,test_km, m,'-b 0 -q')
pred[numpy.ix_(test)] = p_label
acc = sum(pred == y) / float(n)
res.ind = ind
res.pred_fp_ = pred
res.acc = acc
Queue.put(res)
def _CV_BestC(Queue, kernel, labels, inds, tags, n_folds, pb):
"""
Internel cross validation using best C
"""
for ind in inds:
print "cv on %d'th fingerprint" % ind
n = len(kernel)
y = labels[:,ind]
pred_label = numpy.zeros(n)
res = result(ind,n)
for i in range(1,n_folds+1):
# divide data
validate = numpy.array(tags== i)
test = numpy.array(tags == (i+1 if i+1<6 else 1))
train = numpy.array(~numpy.logical_xor(test, validate))
validate_km = kernel[numpy.ix_(validate, train)]
test_km = kernel[numpy.ix_(test, train)]
train_km = kernel[numpy.ix_(train, train)]
n_validate = len(validate_km)
n_train = len(train_km)
n_test = len(test_km)
validate_km = numpy.append(numpy.array(range(1,n_validate+1)).reshape(
n_validate,1), validate_km,1).tolist()
train_km = numpy.append(numpy.array(range(1,n_train+1)).reshape(
n_train,1), train_km,1).tolist()
test_km = numpy.append(numpy.array(range(1,n_test+1)).reshape(
n_test,1), test_km,1).tolist()
validate_y = y[validate]
test_y = y[test]
train_y = y[train]
# select C on validation set with best acc
best_acc = 0
best_c = 2**-5
for C in [2**-5,2**-4,2**-3,2**-2,2**-1,2**0,2**1,2**2,2**3,2**4,
2**5, 2**6,2**7,2**8,2**9,2**10]:
prob = svm_problem(train_y, train_km, isKernel=True)
if pb:
param = svm_parameter('-t 4 -c %f -b 1 -q' % C)
m = svm_train(prob, param)
p_label, p_acc, p_val = svm_predict(validate_y, validate_km, m, '-b 1 -q')
else:
param = svm_parameter('-t 4 -c %f -b 0 -q' % C)
m = svm_train(prob, param)
p_label, p_acc, p_val = svm_predict(validate_y, validate_km, m, '-b 0 -q')
acc = p_acc[0]
if acc > best_acc:
best_c = C
best_acc = acc
# prediction on test set with best C
# merge training set and validation set
train_all = numpy.array(~test)
test_km = kernel[numpy.ix_(test, train_all)]
train_km = kernel[numpy.ix_(train_all, train_all)]
test_y = y[test]
train_y = y[train_all]
n_train = len(train_km)
n_test = len(test_km)
train_km = numpy.append(numpy.array(range(1,n_train+1)).reshape(
n_train,1), train_km,1).tolist()
test_km = numpy.append(numpy.array(range(1,n_test+1)).reshape(
n_test,1), test_km,1).tolist()
prob = svm_problem(train_y, train_km, isKernel=True)
if pb:
param = svm_parameter('-t 4 -c %f -b 1 -q' % best_c)
m = svm_train(prob, param)
p_label,p_acc,p_val = svm_predict(test_y, test_km, m,'-b 1 -q')
pred_label[test] = [p[0] for p in p_val]
acc = numpy.sum(p_label == numpy.array(y)) / float(n)
else:
param = svm_parameter('-t 4 -c %f -b 0 -q' % C)
m = svm_train(prob, param)
p_label,p_acc,p_val = svm_predict(test_y, test_km, m,'-b 0 -q')
pred_label[test] = p_label
acc = numpy.sum(pred_label == numpy.array(y)) / float(n)
res.ind = ind
res.pred_fp_ = pred_label
res.acc = acc
Queue.put(res)
def _label_folds(n_x ,n):
"""
labeling the data by folds. Sequential lableing.
"""
tag = [0]*n_x
for i in range(n_x):
tag[i] = i%n + 1
return numpy.array(tag)
#def _label_by_mol(spectra,n_cv):
# """
# Lableing the data by molecules. Dividing the folds by kegg_id
# """
# mol_dict = {}
# count = 1
# for s in spectra:
# if s.kegg_id not in mol_dict:
# mol_dict[s.kegg_id] = count
# count = count +1
# n_mol = len(mol_dict)
# a = range(1,n_mol+1)
# random.seed(1986)
# random.shuffle(a)
# count = 0
# for cid,num in mol_dict.items():
# mol_dict[cid] = a[count]
# count = count +1
# mol_ids = []
# for s in spectra:
# mol_ids.append(mol_dict[s.kegg_id])
# tags = []
# n_seg = n_mol/n_cv+1
# for mol_num in mol_ids:
# tags.append(mol_num/n_seg+1)
# return numpy.array(tags)
|
|
from __future__ import unicode_literals
import boto3
import sure # noqa
from moto import mock_ec2
from moto import mock_elbv2
from moto import mock_kms
from moto import mock_rds2
from moto import mock_resourcegroupstaggingapi
from moto import mock_s3
from tests import EXAMPLE_AMI_ID, EXAMPLE_AMI_ID2
@mock_ec2
@mock_resourcegroupstaggingapi
def test_get_resources_ec2():
client = boto3.client("ec2", region_name="eu-central-1")
instances = client.run_instances(
ImageId=EXAMPLE_AMI_ID,
MinCount=1,
MaxCount=1,
InstanceType="t2.micro",
TagSpecifications=[
{
"ResourceType": "instance",
"Tags": [
{"Key": "MY_TAG1", "Value": "MY_VALUE1"},
{"Key": "MY_TAG2", "Value": "MY_VALUE2"},
],
},
{
"ResourceType": "instance",
"Tags": [{"Key": "MY_TAG3", "Value": "MY_VALUE3"}],
},
],
)
instance_id = instances["Instances"][0]["InstanceId"]
image_id = client.create_image(Name="testami", InstanceId=instance_id)["ImageId"]
client.create_tags(Resources=[image_id], Tags=[{"Key": "ami", "Value": "test"}])
rtapi = boto3.client("resourcegroupstaggingapi", region_name="eu-central-1")
resp = rtapi.get_resources()
# Check we have 1 entry for Instance, 1 Entry for AMI
resp["ResourceTagMappingList"].should.have.length_of(2)
# 1 Entry for AMI
resp = rtapi.get_resources(ResourceTypeFilters=["ec2:image"])
resp["ResourceTagMappingList"].should.have.length_of(1)
resp["ResourceTagMappingList"][0]["ResourceARN"].should.contain("image/")
# As were iterating the same data, this rules out that the test above was a fluke
resp = rtapi.get_resources(ResourceTypeFilters=["ec2:instance"])
resp["ResourceTagMappingList"].should.have.length_of(1)
resp["ResourceTagMappingList"][0]["ResourceARN"].should.contain("instance/")
# Basic test of tag filters
resp = rtapi.get_resources(
TagFilters=[{"Key": "MY_TAG1", "Values": ["MY_VALUE1", "some_other_value"]}]
)
resp["ResourceTagMappingList"].should.have.length_of(1)
resp["ResourceTagMappingList"][0]["ResourceARN"].should.contain("instance/")
@mock_ec2
@mock_resourcegroupstaggingapi
def test_get_resources_ec2_vpc():
ec2 = boto3.resource("ec2", region_name="us-west-1")
vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16")
ec2.create_tags(Resources=[vpc.id], Tags=[{"Key": "test", "Value": "test"}])
def assert_response(resp):
results = resp.get("ResourceTagMappingList", [])
results.should.have.length_of(1)
vpc.id.should.be.within(results[0]["ResourceARN"])
rtapi = boto3.client("resourcegroupstaggingapi", region_name="us-west-1")
resp = rtapi.get_resources(ResourceTypeFilters=["ec2"])
assert_response(resp)
resp = rtapi.get_resources(ResourceTypeFilters=["ec2:vpc"])
assert_response(resp)
resp = rtapi.get_resources(TagFilters=[{"Key": "test", "Values": ["test"]}])
assert_response(resp)
@mock_ec2
@mock_resourcegroupstaggingapi
def test_get_tag_keys_ec2():
client = boto3.client("ec2", region_name="eu-central-1")
client.run_instances(
ImageId=EXAMPLE_AMI_ID,
MinCount=1,
MaxCount=1,
InstanceType="t2.micro",
TagSpecifications=[
{
"ResourceType": "instance",
"Tags": [
{"Key": "MY_TAG1", "Value": "MY_VALUE1"},
{"Key": "MY_TAG2", "Value": "MY_VALUE2"},
],
},
{
"ResourceType": "instance",
"Tags": [{"Key": "MY_TAG3", "Value": "MY_VALUE3"}],
},
],
)
rtapi = boto3.client("resourcegroupstaggingapi", region_name="eu-central-1")
resp = rtapi.get_tag_keys()
resp["TagKeys"].should.contain("MY_TAG1")
resp["TagKeys"].should.contain("MY_TAG2")
resp["TagKeys"].should.contain("MY_TAG3")
# TODO test pagenation
@mock_ec2
@mock_resourcegroupstaggingapi
def test_get_tag_values_ec2():
client = boto3.client("ec2", region_name="eu-central-1")
client.run_instances(
ImageId=EXAMPLE_AMI_ID,
MinCount=1,
MaxCount=1,
InstanceType="t2.micro",
TagSpecifications=[
{
"ResourceType": "instance",
"Tags": [
{"Key": "MY_TAG1", "Value": "MY_VALUE1"},
{"Key": "MY_TAG2", "Value": "MY_VALUE2"},
],
},
{
"ResourceType": "instance",
"Tags": [{"Key": "MY_TAG3", "Value": "MY_VALUE3"}],
},
],
)
client.run_instances(
ImageId=EXAMPLE_AMI_ID,
MinCount=1,
MaxCount=1,
InstanceType="t2.micro",
TagSpecifications=[
{
"ResourceType": "instance",
"Tags": [
{"Key": "MY_TAG1", "Value": "MY_VALUE4"},
{"Key": "MY_TAG2", "Value": "MY_VALUE5"},
],
},
{
"ResourceType": "instance",
"Tags": [{"Key": "MY_TAG3", "Value": "MY_VALUE6"}],
},
],
)
rtapi = boto3.client("resourcegroupstaggingapi", region_name="eu-central-1")
resp = rtapi.get_tag_values(Key="MY_TAG1")
resp["TagValues"].should.contain("MY_VALUE1")
resp["TagValues"].should.contain("MY_VALUE4")
@mock_ec2
@mock_elbv2
@mock_kms
@mock_resourcegroupstaggingapi
def test_get_many_resources():
elbv2 = boto3.client("elbv2", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
kms = boto3.client("kms", region_name="us-east-1")
security_group = ec2.create_security_group(
GroupName="a-security-group", Description="First One"
)
vpc = ec2.create_vpc(CidrBlock="172.28.7.0/24", InstanceTenancy="default")
subnet1 = ec2.create_subnet(
VpcId=vpc.id, CidrBlock="172.28.7.192/26", AvailabilityZone="us-east-1a"
)
subnet2 = ec2.create_subnet(
VpcId=vpc.id, CidrBlock="172.28.7.0/26", AvailabilityZone="us-east-1b"
)
elbv2.create_load_balancer(
Name="my-lb",
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme="internal",
Tags=[
{"Key": "key_name", "Value": "a_value"},
{"Key": "key_2", "Value": "val2"},
],
)
elbv2.create_load_balancer(
Name="my-other-lb",
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme="internal",
)
kms.create_key(
KeyUsage="ENCRYPT_DECRYPT",
Tags=[
{"TagKey": "key_name", "TagValue": "a_value"},
{"TagKey": "key_2", "TagValue": "val2"},
],
)
rtapi = boto3.client("resourcegroupstaggingapi", region_name="us-east-1")
resp = rtapi.get_resources(
ResourceTypeFilters=["elasticloadbalancing:loadbalancer"]
)
resp["ResourceTagMappingList"].should.have.length_of(2)
resp["ResourceTagMappingList"][0]["ResourceARN"].should.contain("loadbalancer/")
resp = rtapi.get_resources(
ResourceTypeFilters=["elasticloadbalancing:loadbalancer"],
TagFilters=[{"Key": "key_name"}],
)
resp["ResourceTagMappingList"].should.have.length_of(1)
resp["ResourceTagMappingList"][0]["Tags"].should.contain(
{"Key": "key_name", "Value": "a_value"}
)
# TODO test pagination
@mock_ec2
@mock_elbv2
@mock_resourcegroupstaggingapi
def test_get_resources_target_group():
ec2 = boto3.resource("ec2", region_name="eu-central-1")
elbv2 = boto3.client("elbv2", region_name="eu-central-1")
vpc = ec2.create_vpc(CidrBlock="172.28.7.0/24", InstanceTenancy="default")
# Create two tagged target groups
for i in range(1, 3):
i_str = str(i)
target_group = elbv2.create_target_group(
Name="test" + i_str,
Protocol="HTTP",
Port=8080,
VpcId=vpc.id,
TargetType="instance",
)["TargetGroups"][0]
elbv2.add_tags(
ResourceArns=[target_group["TargetGroupArn"]],
Tags=[{"Key": "Test", "Value": i_str}],
)
rtapi = boto3.client("resourcegroupstaggingapi", region_name="eu-central-1")
# Basic test
resp = rtapi.get_resources(ResourceTypeFilters=["elasticloadbalancing:targetgroup"])
resp["ResourceTagMappingList"].should.have.length_of(2)
# Test tag filtering
resp = rtapi.get_resources(
ResourceTypeFilters=["elasticloadbalancing:targetgroup"],
TagFilters=[{"Key": "Test", "Values": ["1"]}],
)
resp["ResourceTagMappingList"].should.have.length_of(1)
resp["ResourceTagMappingList"][0]["Tags"].should.contain(
{"Key": "Test", "Value": "1"}
)
@mock_s3
@mock_resourcegroupstaggingapi
def test_get_resources_s3():
# Tests pagination
s3_client = boto3.client("s3", region_name="eu-central-1")
# Will end up having key1,key2,key3,key4
response_keys = set()
# Create 4 buckets
for i in range(1, 5):
i_str = str(i)
s3_client.create_bucket(
Bucket="test_bucket" + i_str,
CreateBucketConfiguration={"LocationConstraint": "eu-central-1"},
)
s3_client.put_bucket_tagging(
Bucket="test_bucket" + i_str,
Tagging={"TagSet": [{"Key": "key" + i_str, "Value": "value" + i_str}]},
)
response_keys.add("key" + i_str)
rtapi = boto3.client("resourcegroupstaggingapi", region_name="eu-central-1")
resp = rtapi.get_resources(ResourcesPerPage=2)
for resource in resp["ResourceTagMappingList"]:
response_keys.remove(resource["Tags"][0]["Key"])
response_keys.should.have.length_of(2)
resp = rtapi.get_resources(
ResourcesPerPage=2, PaginationToken=resp["PaginationToken"]
)
for resource in resp["ResourceTagMappingList"]:
response_keys.remove(resource["Tags"][0]["Key"])
response_keys.should.have.length_of(0)
@mock_ec2
@mock_resourcegroupstaggingapi
def test_multiple_tag_filters():
client = boto3.client("ec2", region_name="eu-central-1")
resp = client.run_instances(
ImageId=EXAMPLE_AMI_ID,
MinCount=1,
MaxCount=1,
InstanceType="t2.micro",
TagSpecifications=[
{
"ResourceType": "instance",
"Tags": [
{"Key": "MY_TAG1", "Value": "MY_UNIQUE_VALUE"},
{"Key": "MY_TAG2", "Value": "MY_SHARED_VALUE"},
],
},
{
"ResourceType": "instance",
"Tags": [{"Key": "MY_TAG3", "Value": "MY_VALUE3"}],
},
],
)
instance_1_id = resp["Instances"][0]["InstanceId"]
resp = client.run_instances(
ImageId=EXAMPLE_AMI_ID2,
MinCount=1,
MaxCount=1,
InstanceType="t2.micro",
TagSpecifications=[
{
"ResourceType": "instance",
"Tags": [
{"Key": "MY_TAG1", "Value": "MY_ALT_UNIQUE_VALUE"},
{"Key": "MY_TAG2", "Value": "MY_SHARED_VALUE"},
],
},
{
"ResourceType": "instance",
"Tags": [{"Key": "MY_ALT_TAG3", "Value": "MY_VALUE3"}],
},
],
)
instance_2_id = resp["Instances"][0]["InstanceId"]
rtapi = boto3.client("resourcegroupstaggingapi", region_name="eu-central-1")
results = rtapi.get_resources(
TagFilters=[
{"Key": "MY_TAG1", "Values": ["MY_UNIQUE_VALUE"]},
{"Key": "MY_TAG2", "Values": ["MY_SHARED_VALUE"]},
]
).get("ResourceTagMappingList", [])
results.should.have.length_of(1)
instance_1_id.should.be.within(results[0]["ResourceARN"])
instance_2_id.shouldnt.be.within(results[0]["ResourceARN"])
@mock_rds2
@mock_resourcegroupstaggingapi
def test_get_resources_rds():
client = boto3.client("rds", region_name="us-west-2")
resources_tagged = []
resources_untagged = []
for i in range(3):
database = client.create_db_instance(
DBInstanceIdentifier="db-instance-{}".format(i),
Engine="postgres",
DBInstanceClass="db.m1.small",
CopyTagsToSnapshot=True if i else False,
Tags=[{"Key": "test", "Value": "value-{}".format(i)}] if i else [],
).get("DBInstance")
snapshot = client.create_db_snapshot(
DBInstanceIdentifier=database["DBInstanceIdentifier"],
DBSnapshotIdentifier="snapshot-{}".format(i),
).get("DBSnapshot")
group = resources_tagged if i else resources_untagged
group.append(database["DBInstanceArn"])
group.append(snapshot["DBSnapshotArn"])
def assert_response(response, expected_count, resource_type=None):
results = response.get("ResourceTagMappingList", [])
results.should.have.length_of(expected_count)
for item in results:
arn = item["ResourceARN"]
arn.should.be.within(resources_tagged)
arn.should_not.be.within(resources_untagged)
if resource_type:
sure.this(":{}:".format(resource_type)).should.be.within(arn)
rtapi = boto3.client("resourcegroupstaggingapi", region_name="us-west-2")
resp = rtapi.get_resources(ResourceTypeFilters=["rds"])
assert_response(resp, 4)
resp = rtapi.get_resources(ResourceTypeFilters=["rds:db"])
assert_response(resp, 2, resource_type="db")
resp = rtapi.get_resources(ResourceTypeFilters=["rds:snapshot"])
assert_response(resp, 2, resource_type="snapshot")
resp = rtapi.get_resources(TagFilters=[{"Key": "test", "Values": ["value-1"]}])
assert_response(resp, 2)
|
|
# -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hashlib import md5
from reportlab.lib.enums import TA_RIGHT
from reportlab.lib.styles import ParagraphStyle
from reportlab.lib.utils import flatten, open_for_read, getStringIO, \
LazyImageReader, haveImages
from reportlab.platypus.doctemplate import BaseDocTemplate, PageTemplate, IndexingFlowable
from reportlab.platypus.flowables import Flowable, CondPageBreak, \
KeepInFrame, ParagraphAndImage
from reportlab.platypus.tableofcontents import TableOfContents
from reportlab.platypus.tables import Table, TableStyle
from xhtml2pdf.reportlab_paragraph import Paragraph
from xhtml2pdf.util import getUID, getBorderStyle
import io
import cgi
import copy
import logging
import reportlab.pdfbase.pdfform as pdfform
import sys
try:
import PIL.Image as PILImage
except:
try:
import Image as PILImage
except:
PILImage = None
log = logging.getLogger("xhtml2pdf")
MAX_IMAGE_RATIO = 0.95
class PTCycle(list):
def __init__(self):
self._restart = 0
self._idx = 0
list.__init__(self)
def cyclicIterator(self):
while 1:
yield self[self._idx]
self._idx += 1
if self._idx >= len(self):
self._idx = self._restart
class PmlMaxHeightMixIn:
def setMaxHeight(self, availHeight):
self.availHeightValue = availHeight
if availHeight < 70000:
if hasattr(self, "canv"):
if not hasattr(self.canv, "maxAvailHeightValue"):
self.canv.maxAvailHeightValue = 0
self.availHeightValue = self.canv.maxAvailHeightValue = max(
availHeight,
self.canv.maxAvailHeightValue)
else:
self.availHeightValue = availHeight
if not hasattr(self, "availHeightValue"):
self.availHeightValue = 0
return self.availHeightValue
def getMaxHeight(self):
if not hasattr(self, "availHeightValue"):
return 0
return self.availHeightValue
class PmlBaseDoc(BaseDocTemplate):
"""
We use our own document template to get access to the canvas
and set some informations once.
"""
def beforePage(self):
# Tricky way to set producer, because of not real privateness in Python
info = "pisa HTML to PDF <http://www.htmltopdf.org>"
self.canv._doc.info.producer = info
'''
# Convert to ASCII because there is a Bug in Reportlab not
# supporting other than ASCII. Send to list on 23.1.2007
author = toString(self.pml_data.get("author", "")).encode("ascii","ignore")
subject = toString(self.pml_data.get("subject", "")).encode("ascii","ignore")
title = toString(self.pml_data.get("title", "")).encode("ascii","ignore")
# print repr((author,title,subject))
self.canv.setAuthor(author)
self.canv.setSubject(subject)
self.canv.setTitle(title)
if self.pml_data.get("fullscreen", 0):
self.canv.showFullScreen0()
if self.pml_data.get("showoutline", 0):
self.canv.showOutline()
if self.pml_data.get("duration", None) is not None:
self.canv.setPageDuration(self.pml_data["duration"])
'''
def afterFlowable(self, flowable):
# Does the flowable contain fragments?
if getattr(flowable, "outline", False):
self.notify('TOCEntry', (
flowable.outlineLevel,
cgi.escape(copy.deepcopy(flowable.text), 1),
self.page))
def handle_nextPageTemplate(self, pt):
'''
if pt has also templates for even and odd page convert it to list
'''
has_left_template = self._has_template_for_name(pt + '_left')
has_right_template = self._has_template_for_name(pt + '_right')
if has_left_template and has_right_template:
pt = [pt + '_left', pt + '_right']
'''On endPage change to the page template with name or index pt'''
if isinstance(pt, str):
if hasattr(self, '_nextPageTemplateCycle'):
del self._nextPageTemplateCycle
for t in self.pageTemplates:
if t.id == pt:
self._nextPageTemplateIndex = self.pageTemplates.index(t)
return
raise ValueError("can't find template('%s')" % pt)
elif isinstance(pt, int):
if hasattr(self, '_nextPageTemplateCycle'):
del self._nextPageTemplateCycle
self._nextPageTemplateIndex = pt
elif isinstance(pt, list) or isinstance(pt, tuple):
#used for alternating left/right pages
#collect the refs to the template objects, complain if any are bad
c = PTCycle()
for ptn in pt:
#special case name used to short circuit the iteration
if ptn == '*':
c._restart = len(c)
continue
for t in self.pageTemplates:
if t.id == ptn.strip():
c.append(t)
break
if not c:
raise ValueError("No valid page templates in cycle")
elif c._restart > len(c):
raise ValueError("Invalid cycle restart position")
#ensure we start on the first one$
self._nextPageTemplateCycle = c.cyclicIterator()
else:
raise TypeError("Argument pt should be string or integer or list")
def _has_template_for_name(self, name):
for template in self.pageTemplates:
if template.id == name.strip():
return True
return False
class PmlPageTemplate(PageTemplate):
PORTRAIT = 'portrait'
LANDSCAPE = 'landscape'
# by default portrait
pageorientation = PORTRAIT
def __init__(self, **kw):
self.pisaStaticList = []
self.pisaBackgroundList = []
self.pisaBackground = None
PageTemplate.__init__(self, **kw)
self._page_count = 0
self._first_flow = True
def isFirstFlow(self, canvas):
if self._first_flow:
if canvas.getPageNumber() <= self._page_count:
self._first_flow = False
else:
self._page_count = canvas.getPageNumber()
canvas._doctemplate._page_count = canvas.getPageNumber()
return self._first_flow
def isPortrait(self):
return self.pageorientation == self.PORTRAIT
def isLandscape(self):
return self.pageorientation == self.LANDSCAPE
def beforeDrawPage(self, canvas, doc):
canvas.saveState()
try:
# Background
pisaBackground = None
if (self.isFirstFlow(canvas)
and hasattr(self, "pisaBackground")
and self.pisaBackground
and (not self.pisaBackground.notFound())):
# Is image not PDF
if self.pisaBackground.mimetype.startswith("image/"):
try:
img = PmlImageReader(io.StringIO(self.pisaBackground.getData()))
iw, ih = img.getSize()
pw, ph = canvas._pagesize
width = pw # min(iw, pw) # max
wfactor = float(width) / iw
height = ph # min(ih, ph) # max
hfactor = float(height) / ih
factor_min = min(wfactor, hfactor)
if self.isPortrait():
w = iw * factor_min
h = ih * factor_min
canvas.drawImage(img, 0, ph - h, w, h)
elif self.isLandscape():
factor_max = max(wfactor, hfactor)
h = ih * factor_max
w = iw * factor_min
canvas.drawImage(img, 0, 0, w, h)
except:
log.exception("Draw background")
# PDF!
else:
pisaBackground = self.pisaBackground
if pisaBackground:
self.pisaBackgroundList.append(pisaBackground)
def pageNumbering(objList):
for obj in flatten(objList):
if isinstance(obj, PmlParagraph):
for frag in obj.frags:
if frag.pageNumber:
frag.text = str(pagenumber)
elif frag.pageCount:
frag.text = str(canvas._doctemplate._page_count)
elif isinstance(obj, PmlTable):
# Flatten the cells ([[1,2], [3,4]] becomes [1,2,3,4])
flat_cells = [item for sublist in obj._cellvalues for item in sublist]
pageNumbering(flat_cells)
try:
# Paint static frames
pagenumber = canvas.getPageNumber()
for frame in self.pisaStaticList:
frame = copy.deepcopy(frame)
story = frame.pisaStaticStory
pageNumbering(story)
frame.addFromList(story, canvas)
except Exception: # TODO: Kill this!
log.debug("PmlPageTemplate", exc_info=1)
finally:
canvas.restoreState()
_ctr = 1
class PmlImageReader(object): # TODO We need a factory here, returning either a class for java or a class for PIL
"""
Wraps up either PIL or Java to get data from bitmaps
"""
_cache = {}
def __init__(self, fileName):
if isinstance(fileName, PmlImageReader):
self.__dict__ = fileName.__dict__ # borgize
return
#start wih lots of null private fields, to be populated by
#the relevant engine.
self.fileName = fileName
self._image = None
self._width = None
self._height = None
self._transparent = None
self._data = None
imageReaderFlags = 0
if PILImage and isinstance(fileName, PILImage.Image):
self._image = fileName
self.fp = getattr(fileName, 'fp', None)
try:
self.fileName = self._image.fileName
except AttributeError:
self.fileName = 'PILIMAGE_%d' % id(self)
else:
try:
self.fp = open_for_read(fileName, 'b')
if isinstance(self.fp, io.StringIO().__class__):
imageReaderFlags = 0 # avoid messing with already internal files
if imageReaderFlags > 0: # interning
data = self.fp.read()
if imageReaderFlags & 2: # autoclose
try:
self.fp.close()
except:
pass
if imageReaderFlags & 4: # cache the data
if not self._cache:
from rl_config import register_reset
register_reset(self._cache.clear)
data = self._cache.setdefault(md5(data).digest(), data)
self.fp = getStringIO(data)
elif imageReaderFlags == - 1 and isinstance(fileName, (str, unicode)):
#try Ralf Schmitt's re-opening technique of avoiding too many open files
self.fp.close()
del self.fp # will become a property in the next statement
self.__class__ = LazyImageReader
if haveImages:
#detect which library we are using and open the image
if not self._image:
self._image = self._read_image(self.fp)
if getattr(self._image, 'format', None) == 'JPEG':
self.jpeg_fh = self._jpeg_fh
else:
from reportlab.pdfbase.pdfutils import readJPEGInfo
try:
self._width, self._height, c = readJPEGInfo(self.fp)
except:
raise RuntimeError('Imaging Library not available, unable to import bitmaps only jpegs')
self.jpeg_fh = self._jpeg_fh
self._data = self.fp.read()
self._dataA = None
self.fp.seek(0)
except: # TODO: Kill the catch-all
et, ev, tb = sys.exc_info()
if hasattr(ev, 'args'):
a = str(ev.args[- 1]) + (' fileName=%r' % fileName)
ev.args = ev.args[: - 1] + (a,)
raise et.with_traceback(tb)
else:
raise
def _read_image(self, fp):
if sys.platform[0:4] == 'java':
from javax.imageio import ImageIO
from java.io import ByteArrayInputStream
input_stream = ByteArrayInputStream(fp.read())
return ImageIO.read(input_stream)
elif PILImage:
return PILImage.open(fp)
def _jpeg_fh(self):
fp = self.fp
fp.seek(0)
return fp
def jpeg_fh(self):
return None
def getSize(self):
if self._width is None or self._height is None:
if sys.platform[0:4] == 'java':
self._width = self._image.getWidth()
self._height = self._image.getHeight()
else:
self._width, self._height = self._image.size
return self._width, self._height
def getRGBData(self):
"Return byte array of RGB data as string"
if self._data is None:
self._dataA = None
if sys.platform[0:4] == 'java':
import jarray # TODO: Move to top.
from java.awt.image import PixelGrabber
width, height = self.getSize()
buffer = jarray.zeros(width * height, 'i')
pg = PixelGrabber(self._image, 0, 0, width, height, buffer, 0, width)
pg.grabPixels()
# there must be a way to do this with a cast not a byte-level loop,
# I just haven't found it yet...
pixels = []
a = pixels.append
for rgb in buffer:
a(chr((rgb >> 16) & 0xff))
a(chr((rgb >> 8) & 0xff))
a(chr(rgb & 0xff))
self._data = ''.join(pixels)
self.mode = 'RGB'
else:
im = self._image
mode = self.mode = im.mode
if mode == 'RGBA':
im.load()
self._dataA = PmlImageReader(im.split()[3])
im = im.convert('RGB')
self.mode = 'RGB'
elif mode not in ('L', 'RGB', 'CMYK'):
im = im.convert('RGB')
self.mode = 'RGB'
if hasattr(im, 'tobytes'):
self._data = im.tobytes()
else:
# PIL compatibility
self._data = im.tostring()
return self._data
def getImageData(self):
width, height = self.getSize()
return width, height, self.getRGBData()
def getTransparent(self):
if sys.platform[0:4] == 'java':
return None
elif "transparency" in self._image.info:
transparency = self._image.info["transparency"] * 3
palette = self._image.palette
if hasattr(palette, 'palette'):
palette = palette.palette
elif hasattr(palette, 'data'):
palette = palette.data
else:
return None
# 8-bit PNGs could give an empty string as transparency value, so
# we have to be careful here.
try:
return map(ord, palette[transparency:transparency + 3])
except:
return None
else:
return None
def __str__(self):
try:
fn = self.fileName.read()
if not fn:
fn = id(self)
return "PmlImageObject_%s" % hash(fn)
except:
fn = self.fileName
if not fn:
fn = id(self)
return fn
class PmlImage(Flowable, PmlMaxHeightMixIn):
def __init__(self, data, width=None, height=None, mask="auto", mimetype=None, **kw):
self.kw = kw
self.hAlign = 'CENTER'
self._mask = mask
self._imgdata = data
# print "###", repr(data)
self.mimetype = mimetype
img = self.getImage()
if img:
self.imageWidth, self.imageHeight = img.getSize()
self.drawWidth = width or self.imageWidth
self.drawHeight = height or self.imageHeight
def wrap(self, availWidth, availHeight):
" This can be called more than once! Do not overwrite important data like drawWidth "
availHeight = self.setMaxHeight(availHeight)
# print "image wrap", id(self), availWidth, availHeight, self.drawWidth, self.drawHeight
width = min(self.drawWidth, availWidth)
wfactor = float(width) / self.drawWidth
height = min(self.drawHeight, availHeight * MAX_IMAGE_RATIO)
hfactor = float(height) / self.drawHeight
factor = min(wfactor, hfactor)
self.dWidth = self.drawWidth * factor
self.dHeight = self.drawHeight * factor
# print "imgage result", factor, self.dWidth, self.dHeight
return self.dWidth, self.dHeight
def getImage(self):
img = PmlImageReader(io.StringIO(self._imgdata))
return img
def draw(self):
img = self.getImage()
self.canv.drawImage(
img,
0, 0,
self.dWidth,
self.dHeight,
mask=self._mask)
def identity(self, maxLen=None):
r = Flowable.identity(self, maxLen)
return r
class PmlParagraphAndImage(ParagraphAndImage, PmlMaxHeightMixIn):
def wrap(self, availWidth, availHeight):
self.I.canv = self.canv
result = ParagraphAndImage.wrap(self, availWidth, availHeight)
del self.I.canv
return result
def split(self, availWidth, availHeight):
# print "# split", id(self)
if not hasattr(self, "wI"):
self.wI, self.hI = self.I.wrap(availWidth, availHeight) # drawWidth, self.I.drawHeight
return ParagraphAndImage.split(self, availWidth, availHeight)
class PmlParagraph(Paragraph, PmlMaxHeightMixIn):
def _calcImageMaxSizes(self, availWidth, availHeight):
self.hasImages = False
for frag in self.frags:
if hasattr(frag, "cbDefn") and frag.cbDefn.kind == "img":
img = frag.cbDefn
if img.width > 0 and img.height > 0:
self.hasImages = True
width = min(img.width, availWidth)
wfactor = float(width) / img.width
height = min(img.height, availHeight * MAX_IMAGE_RATIO) # XXX 99% because 100% do not work...
hfactor = float(height) / img.height
factor = min(wfactor, hfactor)
img.height *= factor
img.width *= factor
def wrap(self, availWidth, availHeight):
availHeight = self.setMaxHeight(availHeight)
style = self.style
self.deltaWidth = style.paddingLeft + style.paddingRight + style.borderLeftWidth + style.borderRightWidth
self.deltaHeight = style.paddingTop + style.paddingBottom + style.borderTopWidth + style.borderBottomWidth
# reduce the available width & height by the padding so the wrapping
# will use the correct size
availWidth -= self.deltaWidth
availHeight -= self.deltaHeight
# Modify maxium image sizes
self._calcImageMaxSizes(availWidth, availHeight)
# call the base class to do wrapping and calculate the size
Paragraph.wrap(self, availWidth, availHeight)
#self.height = max(1, self.height)
#self.width = max(1, self.width)
# increase the calculated size by the padding
self.width = self.width + self.deltaWidth
self.height = self.height + self.deltaHeight
return self.width, self.height
def split(self, availWidth, availHeight):
if len(self.frags) <= 0:
return []
#the split information is all inside self.blPara
if not hasattr(self, 'deltaWidth'):
self.wrap(availWidth, availHeight)
availWidth -= self.deltaWidth
availHeight -= self.deltaHeight
return Paragraph.split(self, availWidth, availHeight)
def draw(self):
# Create outline
if getattr(self, "outline", False):
# Check level and add all levels
last = getattr(self.canv, "outlineLast", - 1) + 1
while last < self.outlineLevel:
# print "(OUTLINE", last, self.text
key = getUID()
self.canv.bookmarkPage(key)
self.canv.addOutlineEntry(
self.text,
key,
last,
not self.outlineOpen)
last += 1
self.canv.outlineLast = self.outlineLevel
key = getUID()
self.canv.bookmarkPage(key)
self.canv.addOutlineEntry(
self.text,
key,
self.outlineLevel,
not self.outlineOpen)
last += 1
# Draw the background and borders here before passing control on to
# ReportLab. This is because ReportLab can't handle the individual
# components of the border independently. This will also let us
# support more border styles eventually.
canvas = self.canv
style = self.style
bg = style.backColor
leftIndent = style.leftIndent
bp = 0 # style.borderPadding
x = leftIndent - bp
y = - bp
w = self.width - (leftIndent + style.rightIndent) + 2 * bp
h = self.height + 2 * bp
if bg:
# draw a filled rectangle (with no stroke) using bg color
canvas.saveState()
canvas.setFillColor(bg)
canvas.rect(x, y, w, h, fill=1, stroke=0)
canvas.restoreState()
# we need to hide the bg color (if any) so Paragraph won't try to draw it again
style.backColor = None
# offset the origin to compensate for the padding
canvas.saveState()
canvas.translate(
(style.paddingLeft + style.borderLeftWidth),
-1 * (style.paddingTop + style.borderTopWidth)) # + (style.leading / 4)))
# Call the base class draw method to finish up
Paragraph.draw(self)
canvas.restoreState()
# Reset color because we need it again if we run 2-PASS like we
# do when using TOC
style.backColor = bg
canvas.saveState()
def _drawBorderLine(bstyle, width, color, x1, y1, x2, y2):
# We need width and border style to be able to draw a border
if width and getBorderStyle(bstyle):
# If no color for border is given, the text color is used (like defined by W3C)
if color is None:
color = style.textColor
# print "Border", bstyle, width, color
if color is not None:
canvas.setStrokeColor(color)
canvas.setLineWidth(width)
canvas.line(x1, y1, x2, y2)
_drawBorderLine(style.borderLeftStyle,
style.borderLeftWidth,
style.borderLeftColor,
x, y, x, y + h)
_drawBorderLine(style.borderRightStyle,
style.borderRightWidth,
style.borderRightColor,
x + w, y, x + w, y + h)
_drawBorderLine(style.borderTopStyle,
style.borderTopWidth,
style.borderTopColor,
x, y + h, x + w, y + h)
_drawBorderLine(style.borderBottomStyle,
style.borderBottomWidth,
style.borderBottomColor,
x, y, x + w, y)
canvas.restoreState()
class PmlKeepInFrame(KeepInFrame, PmlMaxHeightMixIn):
def wrap(self, availWidth, availHeight):
availWidth = max(availWidth, 1.0)
availHeight = max(availHeight, 1.0)
self.maxWidth = availWidth
self.maxHeight = self.setMaxHeight(availHeight)
return KeepInFrame.wrap(self, availWidth, availHeight)
class PmlTable(Table, PmlMaxHeightMixIn):
def _normWidth(self, w, maxw):
"""
Helper for calculating percentages
"""
if type(w) == type(""):
w = ((maxw / 100.0) * float(w[: - 1]))
elif (w is None) or (w == "*"):
w = maxw
return min(w, maxw)
def _listCellGeom(self, V, w, s, W=None, H=None, aH=72000):
# print "#", self.availHeightValue
if aH == 72000:
aH = self.getMaxHeight() or aH
return Table._listCellGeom(self, V, w, s, W=W, H=H, aH=aH)
def wrap(self, availWidth, availHeight):
self.setMaxHeight(availHeight)
# Strange bug, sometime the totalWidth is not set !?
try:
self.totalWidth
except:
self.totalWidth = availWidth
# Prepare values
totalWidth = self._normWidth(self.totalWidth, availWidth)
remainingWidth = totalWidth
remainingCols = 0
newColWidths = self._colWidths
# Calculate widths that are fix
# IMPORTANT!!! We can not substitute the private value
# self._colWidths therefore we have to modify list in place
for i, colWidth in enumerate(newColWidths):
if (colWidth is not None) or (colWidth == '*'):
colWidth = self._normWidth(colWidth, totalWidth)
remainingWidth -= colWidth
else:
remainingCols += 1
colWidth = None
newColWidths[i] = colWidth
# Distribute remaining space
minCellWidth = totalWidth * 0.01
if remainingCols > 0:
for i, colWidth in enumerate(newColWidths):
if colWidth is None:
newColWidths[i] = max(minCellWidth, remainingWidth / remainingCols) # - 0.1
# Bigger than totalWidth? Lets reduce the fix entries propotionally
if sum(newColWidths) > totalWidth:
quotient = totalWidth / sum(newColWidths)
for i in range(len(newColWidths)):
newColWidths[i] = newColWidths[i] * quotient
# To avoid rounding errors adjust one col with the difference
diff = sum(newColWidths) - totalWidth
if diff > 0:
newColWidths[0] -= diff
return Table.wrap(self, availWidth, availHeight)
class PmlPageCount(IndexingFlowable):
def __init__(self):
IndexingFlowable.__init__(self)
self.second_round = False
def isSatisfied(self):
s = self.second_round
self.second_round = True
return s
def drawOn(self, canvas, x, y, _sW=0):
pass
class PmlTableOfContents(TableOfContents):
def wrap(self, availWidth, availHeight):
"""
All table properties should be known by now.
"""
widths = (availWidth - self.rightColumnWidth,
self.rightColumnWidth)
# makes an internal table which does all the work.
# we draw the LAST RUN's entries! If there are
# none, we make some dummy data to keep the table
# from complaining
if len(self._lastEntries) == 0:
_tempEntries = [(0, 'Placeholder for table of contents', 0)]
else:
_tempEntries = self._lastEntries
lastMargin = 0
tableData = []
tableStyle = [
('VALIGN', (0, 0), (- 1, - 1), 'TOP'),
('LEFTPADDING', (0, 0), (- 1, - 1), 0),
('RIGHTPADDING', (0, 0), (- 1, - 1), 0),
('TOPPADDING', (0, 0), (- 1, - 1), 0),
('BOTTOMPADDING', (0, 0), (- 1, - 1), 0),
]
for i, entry in enumerate(_tempEntries):
level, text, pageNum = entry[:3]
leftColStyle = self.levelStyles[level]
if i: # Not for first element
tableStyle.append((
'TOPPADDING',
(0, i), (- 1, i),
max(lastMargin, leftColStyle.spaceBefore)))
# print leftColStyle.leftIndent
lastMargin = leftColStyle.spaceAfter
#right col style is right aligned
rightColStyle = ParagraphStyle(name='leftColLevel%d' % level,
parent=leftColStyle,
leftIndent=0,
alignment=TA_RIGHT)
leftPara = Paragraph(text, leftColStyle)
rightPara = Paragraph(str(pageNum), rightColStyle)
tableData.append([leftPara, rightPara])
self._table = Table(
tableData,
colWidths=widths,
style=TableStyle(tableStyle))
self.width, self.height = self._table.wrapOn(self.canv, availWidth, availHeight)
return self.width, self.height
class PmlRightPageBreak(CondPageBreak):
def __init__(self):
pass
def wrap(self, availWidth, availHeight):
if not self.canv.getPageNumber() % 2:
self.width = availWidth
self.height = availHeight
return availWidth, availHeight
self.width = self.height = 0
return 0, 0
class PmlLeftPageBreak(CondPageBreak):
def __init__(self):
pass
def wrap(self, availWidth, availHeight):
if self.canv.getPageNumber() % 2:
self.width = availWidth
self.height = availHeight
return availWidth, availHeight
self.width = self.height = 0
return 0, 0
# --- Pdf Form
class PmlInput(Flowable):
def __init__(self, name, type="text", width=10, height=10, default="", options=[]):
self.width = width
self.height = height
self.type = type
self.name = name
self.default = default
self.options = options
def wrap(self, *args):
return self.width, self.height
def draw(self):
c = self.canv
c.saveState()
c.setFont("Helvetica", 10)
if self.type == "text":
pdfform.textFieldRelative(c, self.name, 0, 0, self.width, self.height)
c.rect(0, 0, self.width, self.height)
elif self.type == "radio":
c.rect(0, 0, self.width, self.height)
elif self.type == "checkbox":
if self.default:
pdfform.buttonFieldRelative(c, self.name, "Yes", 0, 0)
else:
pdfform.buttonFieldRelative(c, self.name, "Off", 0, 0)
c.rect(0, 0, self.width, self.height)
elif self.type == "select":
pdfform.selectFieldRelative(c, self.name, self.default, self.options, 0, 0, self.width, self.height)
c.rect(0, 0, self.width, self.height)
c.restoreState()
|
|
"""Treadmill initialization and server presence daemon.
This service register the node into the Treadmill cell and, as such, is
responsible for publishing the node's capacity to the scheduler.
This service is also responsible for shutting down the node, when necessary or
requested, by disabling all traffic from and to the containers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
import logging
import os
import time
import kazoo
import click
from treadmill import appenv
from treadmill import context
from treadmill import netdev
from treadmill import postmortem
from treadmill import supervisor
from treadmill import sysinfo
from treadmill import traits
from treadmill import utils
from treadmill import version
from treadmill import zknamespace as z
from treadmill import zkutils
if os.name == 'posix':
from treadmill import iptables
_LOGGER = logging.getLogger(__name__)
_WATCHDOG_CHECK_INTERVAL = 30
def init():
"""Top level command handler."""
@click.command()
@click.option('--exit-on-fail', is_flag=True, default=False)
@click.option('--zkid', help='Zookeeper session ID file.')
@click.option('--notification-fd', help='Notification file descriptor.',
type=int)
@click.option('--approot', type=click.Path(exists=True),
envvar='TREADMILL_APPROOT', required=True)
@click.option('--runtime', envvar='TREADMILL_RUNTIME', required=True)
@click.pass_context
def top(ctx, exit_on_fail, zkid, notification_fd, approot, runtime):
"""Run treadmill init process."""
_LOGGER.info('Initializing Treadmill: %s (%s)', approot, runtime)
tm_env = appenv.AppEnvironment(approot)
stop_on_lost = functools.partial(_stop_on_lost, tm_env)
zkclient = zkutils.connect(
context.GLOBAL.zk.url,
idpath=zkid,
listener=stop_on_lost,
session_timeout=context.GLOBAL.zk.session_timeout
)
while not zkclient.exists(z.SERVER_PRESENCE):
_LOGGER.warning('namespace not ready.')
time.sleep(30)
hostname = sysinfo.hostname()
zk_blackout_path = z.path.blackedout_server(hostname)
zk_server_path = z.path.server(hostname)
zk_presence_path = z.path.server_presence(hostname)
while not zkclient.exists(zk_server_path):
_LOGGER.warning('server %s not defined in the cell.', hostname)
time.sleep(30)
_LOGGER.info('Checking blackout list.')
blacklisted = bool(zkclient.exists(zk_blackout_path))
root_cgroup = ctx.obj['ROOT_CGROUP']
os_args = {}
if os.name == 'posix':
os_args['cgroup_prefix'] = root_cgroup
if not blacklisted:
# Node startup.
_node_start(tm_env, runtime, zkclient, hostname, zk_server_path,
zk_presence_path, os_args)
utils.report_ready(notification_fd)
_init_network()
_start_init1(tm_env)
_LOGGER.info('Ready.')
down_reason = _main_loop(tm_env, zkclient, zk_presence_path)
if down_reason is not None:
_LOGGER.warning('Shutting down: %s', down_reason)
# Blackout the server.
zkutils.ensure_exists(
zkclient,
zk_blackout_path,
acl=[zkclient.make_host_acl(hostname, 'rwcda')],
data=down_reason
)
trigger_postmortem = True
else:
# Blacked out manually
trigger_postmortem = bool(zkclient.exists(zk_blackout_path))
if trigger_postmortem:
postmortem.run(approot, root_cgroup)
else:
# Node was already blacked out.
_LOGGER.warning('Shutting down blacked out node.')
# This is the shutdown phase.
# Delete the node
if zk_presence_path:
zkutils.ensure_deleted(zkclient, zk_presence_path)
zkclient.remove_listener(stop_on_lost)
zkclient.stop()
zkclient.close()
_cleanup_network()
# to ternminate all the running apps
_blackout_terminate(tm_env)
if exit_on_fail:
utils.sys_exit(-1)
else:
# Sit forever in a broken state
while True:
time.sleep(1000000)
return top
def _blackout_terminate(tm_env):
"""Blackout by terminating all containers in running dir.
"""
_LOGGER.info('Terminating monitor.')
supervisor.control_service(
os.path.join(tm_env.init_dir, 'monitor'),
supervisor.ServiceControlAction.down,
wait=supervisor.ServiceWaitAction.down
)
_LOGGER.info('Terminating init1.')
supervisor.control_service(
os.path.join(tm_env.init_dir, 'start_init1'),
supervisor.ServiceControlAction.down,
wait=supervisor.ServiceWaitAction.down
)
def _start_init1(tm_env):
"""Start init1 supervision."""
_LOGGER.info('Starting init1.')
supervisor.control_service(
os.path.join(tm_env.init_dir, 'start_init1'),
supervisor.ServiceControlAction.up,
wait=supervisor.ServiceWaitAction.up
)
def _init_network():
"""Initialize network.
"""
if os.name == 'nt':
return
# (Re)Enable IP forwarding
netdev.dev_conf_forwarding_set('tm0', True)
def _cleanup_network():
"""Cleanup network.
"""
if os.name == 'nt':
return
# Disable network traffic from and to the containers.
netdev.dev_conf_forwarding_set('tm0', False)
def _node_start(tm_env, runtime, zkclient, hostname,
zk_server_path, zk_presence_path, os_args):
"""Node startup. Try to re-establish old session or start fresh.
"""
old_session_ok = False
try:
_data, metadata = zkclient.get(zk_presence_path)
if metadata.owner_session_id == zkclient.client_id[0]:
_LOGGER.info('Reconnecting with previous session: %s',
metadata.owner_session_id)
old_session_ok = True
else:
_LOGGER.info('Session id does not match, new session.')
zkclient.delete(zk_presence_path)
except kazoo.client.NoNodeError:
_LOGGER.info('%s does not exist.', zk_presence_path)
if not old_session_ok:
_node_initialize(tm_env, runtime,
zkclient, hostname,
zk_server_path, zk_presence_path, os_args)
def _node_initialize(tm_env, runtime, zkclient, hostname,
zk_server_path, zk_presence_path, os_args):
"""Node initialization. Should only be done on a cold start.
"""
try:
new_node_info = sysinfo.node_info(tm_env, runtime, **os_args)
new_node_info['traits'] = traits.detect()
# Merging scheduler data with node_info data
node_info = zkutils.get(zkclient, zk_server_path)
node_info.update(new_node_info)
_LOGGER.info('Registering node: %s: %s, %r',
zk_server_path, hostname, node_info)
zkutils.update(zkclient, zk_server_path, node_info)
host_acl = zkclient.make_host_acl(hostname, 'rwcda')
_LOGGER.debug('host_acl: %r', host_acl)
zkutils.put(zkclient,
zk_presence_path, {'seen': False},
acl=[host_acl],
ephemeral=True)
# TODO: Fix the network initialization. Then the below can be part of
# appenv.initialize()
if os.name == 'posix':
# Flush all rules in iptables nat and mangle tables (it is assumed
# that none but Treadmill manages these tables) and bulk load all
# the Treadmill static rules
iptables.initialize(node_info['network']['external_ip'])
node_version = version.get_version()
if zkclient.exists(z.VERSION) and zkclient.exists(z.VERSION_HISTORY):
_LOGGER.info('Registering node version: %r', node_version)
version.save_version(zkclient, hostname, node_version)
else:
_LOGGER.warning(
'Unable to register node version, namespace not ready: %r',
node_version
)
except Exception: # pylint: disable=W0703
_LOGGER.exception('Node initialization failed')
zkclient.stop()
def _stop_on_lost(tm_env, state):
_LOGGER.debug('ZK connection state: %s', state)
if state == zkutils.states.KazooState.LOST:
_LOGGER.info('ZK connection lost, stopping node.')
_LOGGER.info('Terminating svscan in %s', tm_env.init_dir)
supervisor.control_svscan(
tm_env.init_dir,
supervisor.SvscanControlAction.quit
)
# server_init should be terminated at this point but exit just in case.
utils.sys_exit(-1)
def _main_loop(tm_env, zkclient, zk_presence_path):
"""Main loop.
Wait for zk event and check watchdogs.
"""
down_reason = None
# Now that the server is registered, setup the stop-on-delete
# trigger and the deadman's trigger.
node_deleted_event = zkclient.handler.event_object()
node_deleted_event.clear()
@zkclient.DataWatch(zk_presence_path)
@utils.exit_on_unhandled
def _exit_on_delete(data, _stat, event):
"""Force exit if server node is deleted."""
if (data is None or
(event is not None and event.type == 'DELETED')):
# The node is deleted
node_deleted_event.set()
return False
else:
# Reestablish the watch.
return True
while not node_deleted_event.wait(_WATCHDOG_CHECK_INTERVAL):
# NOTE: The loop time above is tailored to the kernel watchdog time.
# Be very careful before changing it.
# Check our watchdogs
result = tm_env.watchdogs.check()
if result:
# Something is wrong with the node, shut it down
down_reason = 'watchdogs %r failed.' % result
break
return down_reason
|
|
# -*- coding: utf-8 -*-
"""
wakatime.base
~~~~~~~~~~~~~
wakatime module entry point.
:copyright: (c) 2013 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import base64
import logging
import os
import platform
import re
import sys
import time
import traceback
import socket
try:
import ConfigParser as configparser
except ImportError:
import configparser
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'packages'))
from .__about__ import __version__
from .compat import u, open, is_py3
from .logger import setup_logging
from .offlinequeue import Queue
from .packages import argparse
from .packages import simplejson as json
from .packages.requests.exceptions import RequestException
from .project import get_project_info
from .session_cache import SessionCache
from .stats import get_file_stats
try:
from .packages import tzlocal
except:
from .packages import tzlocal3 as tzlocal
log = logging.getLogger('WakaTime')
class FileAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
values = os.path.realpath(values)
setattr(namespace, self.dest, values)
def upgradeConfigFile(configFile):
"""For backwards-compatibility, upgrade the existing config file
to work with configparser and rename from .wakatime.conf to .wakatime.cfg.
"""
if os.path.isfile(configFile):
# if upgraded cfg file already exists, don't overwrite it
return
oldConfig = os.path.join(os.path.expanduser('~'), '.wakatime.conf')
try:
configs = {
'ignore': [],
}
with open(oldConfig, 'r', encoding='utf-8') as fh:
for line in fh.readlines():
line = line.split('=', 1)
if len(line) == 2 and line[0].strip() and line[1].strip():
if line[0].strip() == 'ignore':
configs['ignore'].append(line[1].strip())
else:
configs[line[0].strip()] = line[1].strip()
with open(configFile, 'w', encoding='utf-8') as fh:
fh.write("[settings]\n")
for name, value in configs.items():
if isinstance(value, list):
fh.write("%s=\n" % name)
for item in value:
fh.write(" %s\n" % item)
else:
fh.write("%s = %s\n" % (name, value))
os.remove(oldConfig)
except IOError:
pass
def parseConfigFile(configFile=None):
"""Returns a configparser.SafeConfigParser instance with configs
read from the config file. Default location of the config file is
at ~/.wakatime.cfg.
"""
if not configFile:
configFile = os.path.join(os.path.expanduser('~'), '.wakatime.cfg')
upgradeConfigFile(configFile)
configs = configparser.SafeConfigParser()
try:
with open(configFile, 'r', encoding='utf-8') as fh:
try:
configs.readfp(fh)
except configparser.Error:
print(traceback.format_exc())
return None
except IOError:
print(u('Error: Could not read from config file {0}').format(u(configFile)))
return configs
def parseArguments(argv):
"""Parse command line arguments and configs from ~/.wakatime.cfg.
Command line arguments take precedence over config file settings.
Returns instances of ArgumentParser and SafeConfigParser.
"""
try:
sys.argv
except AttributeError:
sys.argv = argv
# define supported command line arguments
parser = argparse.ArgumentParser(
description='Common interface for the WakaTime api.')
parser.add_argument('--file', dest='targetFile', metavar='file',
action=FileAction, required=True,
help='absolute path to file for current heartbeat')
parser.add_argument('--key', dest='key',
help='your wakatime api key; uses api_key from '+
'~/.wakatime.conf by default')
parser.add_argument('--write', dest='isWrite',
action='store_true',
help='when set, tells api this heartbeat was triggered from '+
'writing to a file')
parser.add_argument('--plugin', dest='plugin',
help='optional text editor plugin name and version '+
'for User-Agent header')
parser.add_argument('--time', dest='timestamp', metavar='time',
type=float,
help='optional floating-point unix epoch timestamp; '+
'uses current time by default')
parser.add_argument('--lineno', dest='lineno',
help='optional line number; current line being edited')
parser.add_argument('--cursorpos', dest='cursorpos',
help='optional cursor position in the current file')
parser.add_argument('--notfile', dest='notfile', action='store_true',
help='when set, will accept any value for the file. for example, '+
'a domain name or other item you want to log time towards.')
parser.add_argument('--proxy', dest='proxy',
help='optional https proxy url; for example: '+
'https://user:pass@localhost:8080')
parser.add_argument('--project', dest='project',
help='optional project name')
parser.add_argument('--alternate-project', dest='alternate_project',
help='optional alternate project name; auto-discovered project takes priority')
parser.add_argument('--workplace', dest='workplace', help='workplace you are currently logging.')
parser.add_argument('--disableoffline', dest='offline',
action='store_false',
help='disables offline time logging instead of queuing logged time')
parser.add_argument('--hidefilenames', dest='hidefilenames',
action='store_true',
help='obfuscate file names; will not send file names to api')
parser.add_argument('--exclude', dest='exclude', action='append',
help='filename patterns to exclude from logging; POSIX regex '+
'syntax; can be used more than once')
parser.add_argument('--include', dest='include', action='append',
help='filename patterns to log; when used in combination with '+
'--exclude, files matching include will still be logged; '+
'POSIX regex syntax; can be used more than once')
parser.add_argument('--ignore', dest='ignore', action='append',
help=argparse.SUPPRESS)
parser.add_argument('--logfile', dest='logfile',
help='defaults to ~/.wakatime.log')
parser.add_argument('--apiurl', dest='api_url',
help='heartbeats api url; for debugging with a local server')
parser.add_argument('--config', dest='config',
help='defaults to ~/.wakatime.conf')
parser.add_argument('--verbose', dest='verbose', action='store_true',
help='turns on debug messages in log file')
parser.add_argument('--version', action='version', version=__version__)
# parse command line arguments
args = parser.parse_args(args=argv[1:])
# use current unix epoch timestamp by default
if not args.timestamp:
args.timestamp = time.time()
# parse ~/.wakatime.cfg file
configs = parseConfigFile(args.config)
if configs is None:
return args, configs
# update args from configs
if not args.key:
default_key = None
if configs.has_option('settings', 'api_key'):
default_key = configs.get('settings', 'api_key')
elif configs.has_option('settings', 'apikey'):
default_key = configs.get('settings', 'apikey')
if default_key:
args.key = default_key
else:
parser.error('Missing api key')
if not args.exclude:
args.exclude = []
if configs.has_option('settings', 'ignore'):
try:
for pattern in configs.get('settings', 'ignore').split("\n"):
if pattern.strip() != '':
args.exclude.append(pattern)
except TypeError:
pass
if configs.has_option('settings', 'exclude'):
try:
for pattern in configs.get('settings', 'exclude').split("\n"):
if pattern.strip() != '':
args.exclude.append(pattern)
except TypeError:
pass
if not args.include:
args.include = []
if configs.has_option('settings', 'include'):
try:
for pattern in configs.get('settings', 'include').split("\n"):
if pattern.strip() != '':
args.include.append(pattern)
except TypeError:
pass
if args.offline and configs.has_option('settings', 'offline'):
args.offline = configs.getboolean('settings', 'offline')
if not args.hidefilenames and configs.has_option('settings', 'hidefilenames'):
args.hidefilenames = configs.getboolean('settings', 'hidefilenames')
if not args.proxy and configs.has_option('settings', 'proxy'):
args.proxy = configs.get('settings', 'proxy')
if not args.verbose and configs.has_option('settings', 'verbose'):
args.verbose = configs.getboolean('settings', 'verbose')
if not args.verbose and configs.has_option('settings', 'debug'):
args.verbose = configs.getboolean('settings', 'debug')
if not args.logfile and configs.has_option('settings', 'logfile'):
args.logfile = configs.get('settings', 'logfile')
if not args.api_url and configs.has_option('settings', 'api_url'):
args.api_url = configs.get('settings', 'api_url')
return args, configs
def should_exclude(fileName, include, exclude):
if fileName is not None and fileName.strip() != '':
try:
for pattern in include:
try:
compiled = re.compile(pattern, re.IGNORECASE)
if compiled.search(fileName):
return False
except re.error as ex:
log.warning(u('Regex error ({msg}) for include pattern: {pattern}').format(
msg=u(ex),
pattern=u(pattern),
))
except TypeError:
pass
try:
for pattern in exclude:
try:
compiled = re.compile(pattern, re.IGNORECASE)
if compiled.search(fileName):
return pattern
except re.error as ex:
log.warning(u('Regex error ({msg}) for exclude pattern: {pattern}').format(
msg=u(ex),
pattern=u(pattern),
))
except TypeError:
pass
return False
def get_user_agent(plugin):
ver = sys.version_info
python_version = '%d.%d.%d.%s.%d' % (ver[0], ver[1], ver[2], ver[3], ver[4])
user_agent = u('wakatime/{ver} ({platform}) Python{py_ver}').format(
ver=u(__version__),
platform=u(platform.platform()),
py_ver=python_version,
)
if plugin:
user_agent = u('{user_agent} {plugin}').format(
user_agent=user_agent,
plugin=u(plugin),
)
else:
user_agent = u('{user_agent} Unknown/0').format(
user_agent=user_agent,
)
return user_agent
def get_hostname():
return socket.gethostname()
def send_heartbeat(project=None, branch=None, workplace=None, stats={}, key=None, targetFile=None,
timestamp=None, isWrite=None, plugin=None, offline=None, notfile=False,
hidefilenames=None, proxy=None, api_url=None, **kwargs):
"""Sends heartbeat as POST request to WakaTime api server.
"""
if not api_url:
api_url = 'https://wakatime.com/api/v1/heartbeats'
log.debug('Sending heartbeat to api at %s' % api_url)
data = {
'time': timestamp,
'entity': targetFile,
'type': 'file',
}
if hidefilenames and targetFile is not None and not notfile:
data['entity'] = data['entity'].rsplit('/', 1)[-1].rsplit('\\', 1)[-1]
if len(data['entity'].strip('.').split('.', 1)) > 1:
data['entity'] = u('HIDDEN.{ext}').format(ext=u(data['entity'].strip('.').rsplit('.', 1)[-1]))
else:
data['entity'] = u('HIDDEN')
if stats.get('lines'):
data['lines'] = stats['lines']
if stats.get('language'):
data['language'] = stats['language']
if stats.get('dependencies'):
data['dependencies'] = stats['dependencies']
if stats.get('lineno'):
data['lineno'] = stats['lineno']
if stats.get('cursorpos'):
data['cursorpos'] = stats['cursorpos']
if isWrite:
data['is_write'] = isWrite
if project:
data['project'] = project
if branch:
data['branch'] = branch
log.debug(data)
# setup api request
request_body = json.dumps(data)
api_key = u(base64.b64encode(str.encode(key) if is_py3 else key))
auth = u('Basic {api_key}').format(api_key=api_key)
headers = {
'User-Agent': get_user_agent(plugin),
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': auth,
'X-Machine-Name': workplace
}
log.debug(headers)
proxies = {}
if proxy:
proxies['https'] = proxy
# add Olson timezone to request
try:
tz = tzlocal.get_localzone()
except:
tz = None
if tz:
headers['TimeZone'] = u(tz.zone)
session_cache = SessionCache()
session = session_cache.get()
# log time to api
response = None
try:
response = session.post(api_url, data=request_body, headers=headers,
proxies=proxies)
except RequestException:
exception_data = {
sys.exc_info()[0].__name__: u(sys.exc_info()[1]),
}
if log.isEnabledFor(logging.DEBUG):
exception_data['traceback'] = traceback.format_exc()
if offline:
queue = Queue()
queue.push(data, json.dumps(stats), plugin)
if log.isEnabledFor(logging.DEBUG):
log.warn(exception_data)
else:
log.error(exception_data)
else:
response_code = response.status_code if response is not None else None
response_content = response.text if response is not None else None
if response_code == 201:
log.debug({
'response_code': response_code,
})
session_cache.save(session)
return True
if offline:
if response_code != 400:
queue = Queue()
queue.push(data, json.dumps(stats), plugin)
if response_code == 401:
log.error({
'response_code': response_code,
'response_content': response_content,
})
elif log.isEnabledFor(logging.DEBUG):
log.warn({
'response_code': response_code,
'response_content': response_content,
})
else:
log.error({
'response_code': response_code,
'response_content': response_content,
})
else:
log.error({
'response_code': response_code,
'response_content': response_content,
})
session_cache.delete()
return False
def main(argv=None):
if not argv:
argv = sys.argv
args, configs = parseArguments(argv)
if configs is None:
return 103 # config file parsing error
setup_logging(args, __version__)
exclude = should_exclude(args.targetFile, args.include, args.exclude)
if exclude is not False:
log.debug(u('File not logged because matches exclude pattern: {pattern}').format(
pattern=u(exclude),
))
return 0
if os.path.isfile(args.targetFile) or args.notfile:
stats = get_file_stats(args.targetFile, notfile=args.notfile,
lineno=args.lineno, cursorpos=args.cursorpos)
project, branch = None, None
if not args.notfile:
project, branch = get_project_info(configs=configs, args=args)
workplace = args.workplace
if not args.workplace:
workplace = get_hostname()
kwargs = vars(args)
kwargs['project'] = project
kwargs['branch'] = branch
kwargs['stats'] = stats
kwargs['workplace'] = workplace
if send_heartbeat(**kwargs):
queue = Queue()
while True:
heartbeat = queue.pop()
if heartbeat is None:
break
sent = send_heartbeat(
project=heartbeat['project'],
targetFile=heartbeat['file'],
timestamp=heartbeat['time'],
branch=heartbeat['branch'],
workplace=heartbeat['workplace'],
stats=json.loads(heartbeat['stats']),
key=args.key,
isWrite=heartbeat['is_write'],
plugin=heartbeat['plugin'],
offline=args.offline,
hidefilenames=args.hidefilenames,
notfile=args.notfile,
proxy=args.proxy,
api_url=args.api_url,
)
if not sent:
break
return 0 # success
return 102 # api error
else:
log.debug('File does not exist; ignoring this heartbeat.')
return 0
|
|
# (C) Datadog, Inc. 2010-2016
# (C) Jon Glick <[email protected]> 2014
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from collections import defaultdict
# 3rd party
from boto.s3.connection import S3Connection
import simplejson as json
# project
from checks import AgentCheck
from config import _is_affirmative
def multidict(ordered_pairs):
"""Convert duplicate keys values to lists."""
# read all values into lists
d = defaultdict(list)
for k, v in ordered_pairs:
d[k].append(v)
# unpack lists that have only 1 item
for k, v in d.items():
if len(v) == 1:
d[k] = v[0]
return dict(d)
class RiakCs(AgentCheck):
STATS_BUCKET = 'riak-cs'
STATS_KEY = 'stats'
SERVICE_CHECK_NAME = 'riakcs.can_connect'
def check(self, instance):
s3, aggregation_key, tags, metrics = self._connect(instance)
stats = self._get_stats(s3, aggregation_key)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
tags=["aggregation_key:{0}".format(aggregation_key)])
self.process_stats(stats, tags, metrics)
def process_stats(self, stats, tags, metrics):
if not stats:
raise Exception("No stats were collected")
if "legend" not in stats:
# riak cs 2.1+ stats format
if metrics:
metrics = set(metrics)
metrics.update(V21_DEFAULT_METRICS)
else:
metrics = V21_DEFAULT_METRICS
for key, value in stats.iteritems():
if key not in metrics:
continue
suffix = key.rsplit("_", 1)[-1]
method = STATS_METHODS.get(suffix, "gauge")
getattr(self, method)("riakcs.{}".format(key), value, tags=tags)
else:
# pre 2.1 stats format
legends = dict([(len(k), k) for k in stats["legend"]])
del stats["legend"]
for key, values in stats.iteritems():
legend = legends[len(values)]
for i, value in enumerate(values):
metric_name = "riakcs.{0}.{1}".format(key, legend[i])
self.gauge(metric_name, value, tags=tags)
def _connect(self, instance):
for e in ("access_id", "access_secret"):
if e not in instance:
raise Exception("{0} parameter is required.".format(e))
s3_settings = {
"aws_access_key_id": instance.get('access_id', None),
"aws_secret_access_key": instance.get('access_secret', None),
"proxy": instance.get('host', 'localhost'),
"proxy_port": int(instance.get('port', 8080)),
"is_secure": _is_affirmative(instance.get('is_secure', True))
}
if instance.get('s3_root'):
s3_settings['host'] = instance['s3_root']
aggregation_key = s3_settings['proxy'] + ":" + str(s3_settings['proxy_port'])
try:
s3 = S3Connection(**s3_settings)
except Exception as e:
self.log.error("Error connecting to {0}: {1}".format(aggregation_key, e))
self.service_check(
self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=["aggregation_key:{0}".format(aggregation_key)],
message=str(e))
raise
tags = instance.get("tags", [])
tags.append("aggregation_key:{0}".format(aggregation_key))
metrics = instance.get("metrics", [])
return s3, aggregation_key, tags, metrics
def _get_stats(self, s3, aggregation_key):
try:
bucket = s3.get_bucket(self.STATS_BUCKET, validate=False)
key = bucket.get_key(self.STATS_KEY)
stats_str = key.get_contents_as_string()
stats = self.load_json(stats_str)
except Exception as e:
self.log.error("Error retrieving stats from {0}: {1}".format(aggregation_key, e))
self.service_check(
self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=["aggregation_key:{0}".format(aggregation_key)],
message=str(e))
raise
return stats
@classmethod
def load_json(cls, text):
data = json.loads(text)
if "legend" in data:
# riak cs before v2.1 had duplicate keys
data = json.JSONDecoder(object_pairs_hook=multidict).decode(text)
return data
STATS_METHODS = {
"one": "count",
}
# This list includes most S3 API metrics as well as memory stats. Some
# have been excluded, mainly just to keep size of the default set of
# metrics somewhat reasonable.
#
# Excluded S3 metrics:
# - bucket_acl_(get|put)
# - object_acl_(get|put)
# - bucket_policy_(get|put|delete)
# - *_in_(one|total)
# - *_time_error_*
# - *_time_100
#
# Any of these excluded metrics in addition to many others (there are
# over 1000 to choose from) can be added by specifying them in the
# riakcs.yaml config file under the "metrics" key in the instance
# config; the value should be a list of metric names.
#
# Helpful references:
# - https://github.com/basho/riak_cs/wiki/Riak-cs-and-stanchion-metrics
V21_DEFAULT_METRICS = set([
"memory_atom",
"memory_atom_used",
"memory_binary",
"memory_code",
"memory_ets",
"memory_processes",
"memory_processes_used",
"memory_system",
"memory_total",
"service_get_out_error_one",
"service_get_out_error_total",
"service_get_out_one",
"service_get_out_total",
"service_get_time_95",
"service_get_time_99",
"service_get_time_mean",
"service_get_time_median",
"bucket_delete_out_error_one",
"bucket_delete_out_error_total",
"bucket_delete_out_one",
"bucket_delete_out_total",
"bucket_delete_time_95",
"bucket_delete_time_99",
"bucket_delete_time_mean",
"bucket_delete_time_median",
"bucket_head_out_error_one",
"bucket_head_out_error_total",
"bucket_head_out_one",
"bucket_head_out_total",
"bucket_head_time_95",
"bucket_head_time_99",
"bucket_head_time_mean",
"bucket_head_time_median",
"bucket_put_out_error_one",
"bucket_put_out_error_total",
"bucket_put_out_one",
"bucket_put_out_total",
"bucket_put_time_95",
"bucket_put_time_99",
"bucket_put_time_mean",
"bucket_put_time_median",
"bucket_location_get_out_error_one",
"bucket_location_get_out_error_total",
"bucket_location_get_out_one",
"bucket_location_get_out_total",
"bucket_location_get_time_95",
"bucket_location_get_time_99",
"bucket_location_get_time_mean",
"bucket_location_get_time_median",
"list_uploads_get_out_error_one",
"list_uploads_get_out_error_total",
"list_uploads_get_out_one",
"list_uploads_get_out_total",
"list_uploads_get_time_95",
"list_uploads_get_time_99",
"list_uploads_get_time_mean",
"list_uploads_get_time_median",
"multiple_delete_post_out_error_one",
"multiple_delete_post_out_error_total",
"multiple_delete_post_out_one",
"multiple_delete_post_out_total",
"multiple_delete_post_time_95",
"multiple_delete_post_time_99",
"multiple_delete_post_time_mean",
"multiple_delete_post_time_median",
"list_objects_get_out_error_one",
"list_objects_get_out_error_total",
"list_objects_get_out_one",
"list_objects_get_out_total",
"list_objects_get_time_95",
"list_objects_get_time_99",
"list_objects_get_time_mean",
"list_objects_get_time_median",
"object_put_out_error_one",
"object_put_out_error_total",
"object_put_out_one",
"object_put_out_total",
"object_put_time_95",
"object_put_time_99",
"object_put_time_mean",
"object_put_time_median",
"object_delete_out_error_one",
"object_delete_out_error_total",
"object_delete_out_one",
"object_delete_out_total",
"object_delete_time_95",
"object_delete_time_99",
"object_delete_time_mean",
"object_delete_time_median",
"object_get_out_error_one",
"object_get_out_error_total",
"object_get_out_one",
"object_get_out_total",
"object_get_time_95",
"object_get_time_99",
"object_get_time_mean",
"object_get_time_median",
"object_head_out_error_one",
"object_head_out_error_total",
"object_head_out_one",
"object_head_out_total",
"object_head_time_95",
"object_head_time_99",
"object_head_time_mean",
"object_head_time_median",
"object_put_copy_out_error_one",
"object_put_copy_out_error_total",
"object_put_copy_out_one",
"object_put_copy_out_total",
"object_put_copy_time_95",
"object_put_copy_time_99",
"object_put_copy_time_mean",
"object_put_copy_time_median",
"multipart_post_out_error_one",
"multipart_post_out_error_total",
"multipart_post_out_one",
"multipart_post_out_total",
"multipart_post_time_95",
"multipart_post_time_99",
"multipart_post_time_mean",
"multipart_post_time_median",
"multipart_upload_delete_out_error_one",
"multipart_upload_delete_out_error_total",
"multipart_upload_delete_out_one",
"multipart_upload_delete_out_total",
"multipart_upload_delete_time_95",
"multipart_upload_delete_time_99",
"multipart_upload_delete_time_mean",
"multipart_upload_delete_time_median",
"multipart_upload_get_out_error_one",
"multipart_upload_get_out_error_total",
"multipart_upload_get_out_one",
"multipart_upload_get_out_total",
"multipart_upload_get_time_95",
"multipart_upload_get_time_99",
"multipart_upload_get_time_mean",
"multipart_upload_get_time_median",
"multipart_upload_post_out_error_one",
"multipart_upload_post_out_error_total",
"multipart_upload_post_out_one",
"multipart_upload_post_out_total",
"multipart_upload_post_time_95",
"multipart_upload_post_time_99",
"multipart_upload_post_time_mean",
"multipart_upload_post_time_median",
"multipart_upload_put_out_error_one",
"multipart_upload_put_out_error_total",
"multipart_upload_put_out_one",
"multipart_upload_put_out_total",
"multipart_upload_put_time_95",
"multipart_upload_put_time_99",
"multipart_upload_put_time_mean",
"multipart_upload_put_time_median",
])
|
|
#
# Copyright 2015-2016 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wskutil import addAuthenticatedCommand, apiBase, bold, request, responseError, parseQName, getQName, getPrettyJson, getParameterNamesFromAnnotations, getDescriptionFromAnnotations
import urllib
import abc
import json
import httplib
import sys
#
# Common superclass for action, trigger, and rule CLI commands.
# All of these share some common CRUD CLI commands, defined here.
#
class Item:
__metaclass__ = abc.ABCMeta
name = False
collection = False
# @param name: the singular form of the noun -- used on the command line
# @parma collection: the plural form of the noun -- used in REST APIs
def __init__(self, name, collection):
self.name = name
self.collection = collection
def getCommands(self, parser, props):
commands = parser.add_parser(self.name, help='work with %s' % self.collection)
subcmds = commands.add_subparsers(title='available commands', dest='subcmd')
self.getItemSpecificCommands(subcmds, props)
# default commands are get, delete and list
def addDefaultCommands(self, subcmds, props, which = ['get', 'delete', 'list']):
if ('get' in which):
subcmd = subcmds.add_parser('get', help='get %s' % self.name)
subcmd.add_argument('name', help='the name of the %s' % self.name)
subcmd.add_argument('project', nargs='?', help='project only this property')
addAuthenticatedCommand(subcmd, props)
subcmd.add_argument('-s', '--summary', help='summarize entity details', action='store_true')
if ('delete' in which):
subcmd = subcmds.add_parser('delete', help='delete %s' % self.name)
subcmd.add_argument('name', help='the name of the %s' % self.name)
addAuthenticatedCommand(subcmd, props)
if ('list' in which):
subcmd = subcmds.add_parser('list', help='list all %s' % self.collection)
subcmd.add_argument('name', nargs='?', help='the namespace to list')
addAuthenticatedCommand(subcmd, props)
subcmd.add_argument('-s', '--skip', help='skip this many entities from the head of the collection', type=int, default=0)
subcmd.add_argument('-l', '--limit', help='only return this many entities from the collection', type=int, default=30)
def cmd(self, args, props):
if args.subcmd == 'create':
return self.create(args, props, False)
elif args.subcmd == 'update':
return self.create(args, props, True)
elif args.subcmd == 'get':
return self.get(args, props)
elif args.subcmd == 'list':
return self.list(args, props)
elif args.subcmd == 'delete':
return self.delete(args, props)
else:
print 'error: unexpected sub command'
return 2
@abc.abstractmethod
def getItemSpecificCommands(self, parser, props):
"""add command parsers specific to item"""
return parser
@abc.abstractmethod
def create(self, args, props, update):
"""creates item"""
return 2
# Return summary string of an entity.
def getEntitySummary(self, entity, includeParams = True, kind = None, namespace = None):
kind = self.name if kind is None else kind
namespace = entity['namespace'] if 'namespace' in entity else namespace
fullName = getQName(entity['name'], namespace)
annotations = entity['annotations']
description = getDescriptionFromAnnotations(annotations)
summary = '%s %s' % (bold(kind), fullName)
if description:
summary += ': %s' % (description)
if includeParams:
parameterNames = getParameterNamesFromAnnotations(annotations)
if parameterNames:
summary += '\n (%s: %s)' % (bold('params'), ' '.join(parameterNames))
if 'actions' in entity:
for a in entity['actions']:
actionSummary = self.getEntitySummary(a, False, 'action', fullName)
summary += '\n %s' % (actionSummary)
if 'feeds' in entity:
for a in entity['feeds']:
actionSummary = self.getEntitySummary(a, False, 'feed ', fullName)
summary += '\n %s' % (actionSummary)
return summary
# allows "get" response to be post processed before rendering
def postProcessGet(self, entity):
return entity
# allows "delete" pre-processing, override as needed
def preProcessDelete(self, args, props):
return 0
def put(self, args, props, update, payload):
res = self.httpPut(args, props, update, payload)
return self.putResponse(res, update)
def get(self, args, props):
res = self.httpGet(args, props)
if res.status == httplib.OK:
result = self.postProcessGet(json.loads(res.read()))
if args.summary:
summary = self.getEntitySummary(result)
print summary
elif args.project:
if args.project in result:
print 'ok: got %(item)s %(name)s, projecting %(p)s' % {'item': self.name, 'name': args.name, 'p': args.project }
print getPrettyJson(result[args.project])
return 0
else:
print 'ok: got %(item)s %(name)s, but it does not contain property %(p)s' % {'item': self.name, 'name': args.name, 'p': args.project }
return 148
else:
print 'ok: got %(item)s %(name)s' % {'item': self.name, 'name': args.name }
print getPrettyJson(result)
return 0
else:
return responseError(res)
def delete(self, args, props):
res = self.httpDelete(args, props)
return self.deleteResponse(args, res)
def list(self, args, props):
namespace, pname = parseQName(args.name, props)
if pname:
pname = ('/%s' % pname) if pname.endswith('/') else '/%s/' % pname
url = '%(apibase)s/namespaces/%(namespace)s/%(collection)s%(package)s?skip=%(skip)s&limit=%(limit)s%(public)s' % {
'apibase': apiBase(props),
'namespace': urllib.quote(namespace),
'collection': self.collection,
'package': pname if pname else '',
'skip': args.skip,
'limit': args.limit,
'public': '&public=true' if 'shared' in args and args.shared else ''
}
res = request('GET', url, auth=args.auth, verbose=args.verbose)
if res.status == httplib.OK:
result = json.loads(res.read())
print bold(self.collection)
for e in result:
print self.formatListEntity(e)
return 0
else:
return responseError(res)
# returns the HTTP response for saving an item.
def httpPut(self, args, props, update, payload):
namespace, pname = parseQName(args.name, props)
url = '%(apibase)s/namespaces/%(namespace)s/%(collection)s/%(name)s%(update)s' % {
'apibase': apiBase(props),
'namespace': urllib.quote(namespace),
'collection': self.collection,
'name': self.getSafeName(pname),
'update': '?overwrite=true' if update else ''
}
headers= {
'Content-Type': 'application/json'
}
res = request('PUT', url, payload, headers, auth=args.auth, verbose=args.verbose)
return res
# returns the HTTP response of getting an item.
def httpGet(self, args, props, name = None):
if name is None:
name = args.name
namespace, pname = parseQName(name, props)
if pname is None or pname.strip() == '':
print 'error: entity name missing, did you mean to list collection'
sys.exit(2)
url = '%(apibase)s/namespaces/%(namespace)s/%(collection)s/%(name)s' % {
'apibase': apiBase(props),
'namespace': urllib.quote(namespace),
'collection': self.collection,
'name': self.getSafeName(pname)
}
return request('GET', url, auth=args.auth, verbose=args.verbose)
# returns the HTTP response for deleting an item.
def httpDelete(self, args, props):
code = self.preProcessDelete(args, props)
if (code != 0):
return code
namespace, pname = parseQName(args.name, props)
url = '%(apibase)s/namespaces/%(namespace)s/%(collection)s/%(name)s' % {
'apibase': apiBase(props),
'namespace': urllib.quote(namespace),
'collection': self.collection,
'name': self.getSafeName(pname)
}
res = request('DELETE', url, auth=args.auth, verbose=args.verbose)
return res
# processes delete response and emit console message
def deleteResponse(self, args, res):
if res.status == httplib.OK:
print 'ok: deleted %(name)s' % {'name': args.name }
return 0
else:
return responseError(res)
# process put response and emit console message
def putResponse(self, res, update):
if res.status == httplib.OK:
result = json.loads(res.read())
print 'ok: %(mode)s %(item)s %(name)s' % {
'mode': 'updated' if update else 'created',
'item': self.name,
'name': result['name']
}
return 0
else:
return responseError(res)
# returns a name escaped so it can be used in a url.
def getSafeName(self, name):
safeChars = '@:./'
return urllib.quote(name, safeChars)
# adds publish parameter to payloads
def addPublish(self, payload, args):
if args.shared != None and not ('update' in args and args.update):
payload['publish'] = True if args.shared == 'yes' else False
# formats an entity for printing in a list
def formatListEntity(self, e):
ns = e['namespace']
name = getQName(e['name'], ns)
return '{:<65} {:<8}'.format(name, 'shared' if (e['publish'] or e['publish'] == 'true') else 'private')
|
|
"""Provides abstraction for couchbase objects."""
from couchbase.bucket import Bucket as CouchbaseBucket
from couchbase.exceptions import CouchbaseError, NotFoundError
import lib
from collections import OrderedDict
class CB(object):
"""Couchbase Abstraction Object."""
def __init__(self, override_config=None):
"""
Initialise the Couchbase object.
If an override dictionary is passed then use that, otherwise use the
defaults.
:param override_config:
"""
if override_config:
self.cbbucket = override_config.get('bucket')
self.cburl = override_config.get("url")
self.cbpassword = override_config.get("password")
else:
self.cbbucket = lib.options.get("couchbase", "bucket")
self.cburl = lib.options.get("couchbase", "url")
self.cbpassword = lib.options.get("couchbase", "password")
self.cb = None
def create_connection(self):
"""Create a connection to Couchbase."""
try:
self.cb = CouchbaseBucket(connection_string=self.cburl,
password=self.cbpassword)
except:
raise CB_Connection_Exception
def save(self, key, value, cas=None, **kwargs):
"""
Save a couchbase document.
:param key:
:param value:
:param cas:
:return:
"""
if not self.cb:
self.create_connection()
if cas is not None:
return self.cb.set(key, value, cas=cas)
else:
return self.cb.set(key, value)
def save_multi(self, data, **kwargs):
"""
Save multiple couchbase documents.
:param data:
:param kwargs:
:return:
"""
if not self.cb:
self.create_connection()
return self.cb.upsert_multi(data, **kwargs)
def fetch(self, key):
"""
Fetch a document by key.
:param key:
:return:
"""
if not self.cb:
self.create_connection()
try:
result = self.cb.get(key)
return result
except NotFoundError:
return False
except CouchbaseError:
return False
def fetch_multi(self, keys):
"""
Fetch multiple documents based on a list of keys.
:param keys:
:return:
"""
if not self.cb:
self.create_connection()
try:
result = self.cb.get_multi(keys)
return result
except NotFoundError:
return False
except CouchbaseError:
return False
def delete(self, key):
"""
Delete a specified document.
:param key:
:return:
"""
if not self.cb:
self.create_connection()
try:
result = self.cb.delete(key)
return result
except CouchbaseError:
return False
def fetch_view(self, doc, view, **kwargs):
"""
Fetch a view.
:param doc:
:param view:
:param key:
:return:
"""
if not self.cb:
self.create_connection()
try:
result = self.cb.query(doc, view, **kwargs)
return result
except CouchbaseError:
raise
def execute_query(self, query, single_result=False, additional_creds=None):
"""
Execute a N1QL query.
:param query: N1QLQuery object
:param single_result:
:param additional_creds: List containing dictionaries of additional
credentials for cross-bucket joins.
:return:
"""
if not self.cb:
self.create_connection()
try:
credentials = [{
"user": "local:" + self.cbbucket,
"pass": self.cbpassword
}]
if additional_creds:
credentials += additional_creds
query.set_option("creds", credentials)
query.set_option("max_parallelism", "0")
if single_result:
result = self.cb.n1ql_query(query).get_single_result()
else:
result = self.cb.n1ql_query(query)
return result
except CouchbaseError:
raise
def mutate(self, key, *args):
"""
Mutate a document using the Couchbase Subdocument API.
:param key: The Couchbase Document ID to mutate
:param args: One or more mutations to make against the document
:return:
"""
if not self.cb:
self.create_connection()
try:
return self.cb.mutate_in(key, *args)
except CouchbaseError:
raise
def fts_search(self, index_name, query, fields=None, highlight_fields=None,
highlight_style='html', limit=None, offset=None,
facets=None):
"""
Search using FTS.
:param index_name: The name of the FTS index against which the search
is run.
:param query:
:param fields:
:param highlight_fields:
:param highlight_style:
:param limit:
:param offset:
:param facets:
:return:
"""
if self.cb is None:
self.create_connection()
try:
params = {
'highlight_style': highlight_style
}
if limit is not None:
params['limit'] = limit
if offset is not None:
params['skip'] = offset
if fields is not None:
params['fields'] = fields
if highlight_fields is not None:
params['highlight_fields'] = highlight_fields
if facets is not None:
params['facets'] = facets
return self.cb.search(index_name, query, **params)
except CouchbaseError:
raise
def process_fts_result(self, result):
"""
Run through an FTS result set.
Fetches the documents and stores them in an ordered dictionary.
:param result:
:return: OrderedDict
"""
if self.cb is None:
self.create_connection()
processed_results = {
'total_hits': 0,
'rows': OrderedDict()
}
ids_to_fetch = []
for row in result:
ids_to_fetch.append(row['id'])
processed_results['rows'][row['id']] = row
processed_results['total_hits'] = result.total_hits
docs = self.fetch_multi(ids_to_fetch)
if docs is False:
return processed_results
for key, doc in docs.items():
processed_results['rows'][key]['doc'] = doc.value
return processed_results
class CB_Connection_Exception(Exception):
pass
|
|
# Copyright (c) 2012-2014 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
# Nathan Binkert
# Andrew Bardsley
from m5.defines import buildEnv
from m5.params import *
from m5.proxy import *
from m5.SimObject import SimObject
from BaseCPU import BaseCPU
from DummyChecker import DummyChecker
from BranchPredictor import *
from TimingExpr import TimingExpr
from FuncUnit import OpClass
class MinorOpClass(SimObject):
"""Boxing of OpClass to get around build problems and provide a hook for
future additions to OpClass checks"""
type = 'MinorOpClass'
cxx_header = "cpu/minor/func_unit.hh"
opClass = Param.OpClass("op class to match")
class MinorOpClassSet(SimObject):
"""A set of matchable op classes"""
type = 'MinorOpClassSet'
cxx_header = "cpu/minor/func_unit.hh"
opClasses = VectorParam.MinorOpClass([], "op classes to be matched."
" An empty list means any class")
class MinorFUTiming(SimObject):
type = 'MinorFUTiming'
cxx_header = "cpu/minor/func_unit.hh"
mask = Param.UInt64(0, "mask for testing ExtMachInst")
match = Param.UInt64(0, "match value for testing ExtMachInst:"
" (ext_mach_inst & mask) == match")
suppress = Param.Bool(False, "if true, this inst. is not executed by"
" this FU")
extraCommitLat = Param.Cycles(0, "extra cycles to stall commit for"
" this inst.")
extraCommitLatExpr = Param.TimingExpr(NULL, "extra cycles as a"
" run-time evaluated expression")
extraAssumedLat = Param.Cycles(0, "extra cycles to add to scoreboard"
" retire time for this insts dest registers once it leaves the"
" functional unit. For mem refs, if this is 0, the result's time"
" is marked as unpredictable and no forwarding can take place.")
srcRegsRelativeLats = VectorParam.Cycles("the maximum number of cycles"
" after inst. issue that each src reg can be available for this"
" inst. to issue")
opClasses = Param.MinorOpClassSet(MinorOpClassSet(),
"op classes to be considered for this decode. An empty set means any"
" class")
description = Param.String('', "description string of the decoding/inst."
" class")
def minorMakeOpClassSet(op_classes):
"""Make a MinorOpClassSet from a list of OpClass enum value strings"""
def boxOpClass(op_class):
return MinorOpClass(opClass=op_class)
return MinorOpClassSet(opClasses=map(boxOpClass, op_classes))
class MinorFU(SimObject):
type = 'MinorFU'
cxx_header = "cpu/minor/func_unit.hh"
opClasses = Param.MinorOpClassSet(MinorOpClassSet(), "type of operations"
" allowed on this functional unit")
opLat = Param.Cycles(1, "latency in cycles")
issueLat = Param.Cycles(1, "cycles until another instruction can be"
" issued")
timings = VectorParam.MinorFUTiming([], "extra decoding rules")
cantForwardFromFUIndices = VectorParam.Unsigned([],
"list of FU indices from which this FU can't receive and early"
" (forwarded) result")
class MinorFUPool(SimObject):
type = 'MinorFUPool'
cxx_header = "cpu/minor/func_unit.hh"
funcUnits = VectorParam.MinorFU("functional units")
class MinorDefaultIntFU(MinorFU):
opClasses = minorMakeOpClassSet(['IntAlu'])
timings = [MinorFUTiming(description="Int",
srcRegsRelativeLats=[2])]
opLat = 3
class MinorDefaultIntMulFU(MinorFU):
opClasses = minorMakeOpClassSet(['IntMult'])
timings = [MinorFUTiming(description='Mul',
srcRegsRelativeLats=[0])]
opLat = 3
class MinorDefaultIntDivFU(MinorFU):
opClasses = minorMakeOpClassSet(['IntDiv'])
issueLat = 9
opLat = 9
class MinorDefaultFloatSimdFU(MinorFU):
opClasses = minorMakeOpClassSet([
'FloatAdd', 'FloatCmp', 'FloatCvt', 'FloatMult', 'FloatDiv',
'FloatSqrt',
'SimdAdd', 'SimdAddAcc', 'SimdAlu', 'SimdCmp', 'SimdCvt',
'SimdMisc', 'SimdMult', 'SimdMultAcc', 'SimdShift', 'SimdShiftAcc',
'SimdSqrt', 'SimdFloatAdd', 'SimdFloatAlu', 'SimdFloatCmp',
'SimdFloatCvt', 'SimdFloatDiv', 'SimdFloatMisc', 'SimdFloatMult',
'SimdFloatMultAcc', 'SimdFloatSqrt'])
timings = [MinorFUTiming(description='FloatSimd',
srcRegsRelativeLats=[2])]
opLat = 6
class MinorDefaultMemFU(MinorFU):
opClasses = minorMakeOpClassSet(['MemRead', 'MemWrite'])
timings = [MinorFUTiming(description='Mem',
srcRegsRelativeLats=[1], extraAssumedLat=2)]
opLat = 1
class MinorDefaultMiscFU(MinorFU):
opClasses = minorMakeOpClassSet(['IprAccess', 'InstPrefetch'])
opLat = 1
class MinorDefaultFUPool(MinorFUPool):
funcUnits = [MinorDefaultIntFU(), MinorDefaultIntFU(),
MinorDefaultIntMulFU(), MinorDefaultIntDivFU(),
MinorDefaultFloatSimdFU(), MinorDefaultMemFU(),
MinorDefaultMiscFU()]
class MinorCPU(BaseCPU):
type = 'MinorCPU'
cxx_header = "cpu/minor/cpu.hh"
@classmethod
def memory_mode(cls):
return 'timing'
@classmethod
def require_caches(cls):
return True
@classmethod
def support_take_over(cls):
return True
fetch1FetchLimit = Param.Unsigned(1,
"Number of line fetches allowable in flight at once")
fetch1LineSnapWidth = Param.Unsigned(0,
"Fetch1 'line' fetch snap size in bytes"
" (0 means use system cache line size)")
fetch1LineWidth = Param.Unsigned(0,
"Fetch1 maximum fetch size in bytes (0 means use system cache"
" line size)")
fetch1ToFetch2ForwardDelay = Param.Cycles(1,
"Forward cycle delay from Fetch1 to Fetch2 (1 means next cycle)")
fetch1ToFetch2BackwardDelay = Param.Cycles(1,
"Backward cycle delay from Fetch2 to Fetch1 for branch prediction"
" signalling (0 means in the same cycle, 1 mean the next cycle)")
fetch2InputBufferSize = Param.Unsigned(2,
"Size of input buffer to Fetch2 in cycles-worth of insts.")
fetch2ToDecodeForwardDelay = Param.Cycles(1,
"Forward cycle delay from Fetch2 to Decode (1 means next cycle)")
fetch2CycleInput = Param.Bool(True,
"Allow Fetch2 to cross input lines to generate full output each"
" cycle")
decodeInputBufferSize = Param.Unsigned(3,
"Size of input buffer to Decode in cycles-worth of insts.")
decodeToExecuteForwardDelay = Param.Cycles(1,
"Forward cycle delay from Decode to Execute (1 means next cycle)")
decodeInputWidth = Param.Unsigned(2,
"Width (in instructions) of input to Decode (and implicitly"
" Decode's own width)")
decodeCycleInput = Param.Bool(True,
"Allow Decode to pack instructions from more than one input cycle"
" to fill its output each cycle")
executeInputWidth = Param.Unsigned(2,
"Width (in instructions) of input to Execute")
executeCycleInput = Param.Bool(True,
"Allow Execute to use instructions from more than one input cycle"
" each cycle")
executeIssueLimit = Param.Unsigned(2,
"Number of issuable instructions in Execute each cycle")
executeMemoryIssueLimit = Param.Unsigned(1,
"Number of issuable memory instructions in Execute each cycle")
executeCommitLimit = Param.Unsigned(2,
"Number of committable instructions in Execute each cycle")
executeMemoryCommitLimit = Param.Unsigned(1,
"Number of committable memory references in Execute each cycle")
executeInputBufferSize = Param.Unsigned(7,
"Size of input buffer to Execute in cycles-worth of insts.")
executeMemoryWidth = Param.Unsigned(0,
"Width (and snap) in bytes of the data memory interface. (0 mean use"
" the system cacheLineSize)")
executeMaxAccessesInMemory = Param.Unsigned(2,
"Maximum number of concurrent accesses allowed to the memory system"
" from the dcache port")
executeLSQMaxStoreBufferStoresPerCycle = Param.Unsigned(2,
"Maximum number of stores that the store buffer can issue per cycle")
executeLSQRequestsQueueSize = Param.Unsigned(1,
"Size of LSQ requests queue (address translation queue)")
executeLSQTransfersQueueSize = Param.Unsigned(2,
"Size of LSQ transfers queue (memory transaction queue)")
executeLSQStoreBufferSize = Param.Unsigned(5,
"Size of LSQ store buffer")
executeBranchDelay = Param.Cycles(1,
"Delay from Execute deciding to branch and Fetch1 reacting"
" (1 means next cycle)")
executeFuncUnits = Param.MinorFUPool(MinorDefaultFUPool(),
"FUlines for this processor")
executeSetTraceTimeOnCommit = Param.Bool(True,
"Set inst. trace times to be commit times")
executeSetTraceTimeOnIssue = Param.Bool(False,
"Set inst. trace times to be issue times")
executeAllowEarlyMemoryIssue = Param.Bool(True,
"Allow mem refs to be issued to the LSQ before reaching the head of"
" the in flight insts queue")
enableIdling = Param.Bool(True,
"Enable cycle skipping when the processor is idle\n");
branchPred = Param.BranchPredictor(TournamentBP(
numThreads = Parent.numThreads), "Branch Predictor")
def addCheckerCpu(self):
print "Checker not yet supported by MinorCPU"
exit(1)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides an API for generating Event protocol buffers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import time
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import plugin_asset
from tensorflow.python.summary.writer.event_file_writer import EventFileWriter
_PLUGINS_DIR = "plugins"
class SummaryToEventTransformer(object):
"""Abstractly implements the SummaryWriter API.
This API basically implements a number of endpoints (add_summary,
add_session_log, etc). The endpoints all generate an event protobuf, which is
passed to the contained event_writer.
"""
def __init__(self, event_writer, graph=None, graph_def=None):
"""Creates a `SummaryWriter` and an event file.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.summary.FileWriter(<some-directory>, sess.graph)
```
Args:
event_writer: An EventWriter. Implements add_event and get_logdir.
graph: A `Graph` object, such as `sess.graph`.
graph_def: DEPRECATED: Use the `graph` argument instead.
"""
self.event_writer = event_writer
# For storing used tags for session.run() outputs.
self._session_run_tags = {}
if graph is not None or graph_def is not None:
# Calling it with both graph and graph_def for backward compatibility.
self.add_graph(graph=graph, graph_def=graph_def)
# Also export the meta_graph_def in this case.
# graph may itself be a graph_def due to positional arguments
maybe_graph_as_def = (
graph.as_graph_def(add_shapes=True) if isinstance(graph, ops.Graph)
else graph)
self.add_meta_graph(
meta_graph.create_meta_graph_def(
graph_def=graph_def or maybe_graph_as_def))
def add_summary(self, summary, global_step=None):
"""Adds a `Summary` protocol buffer to the event file.
This method wraps the provided summary in an `Event` protocol buffer
and adds it to the event file.
You can pass the result of evaluating any summary op, using
@{tf.Session.run} or
@{tf.Tensor.eval}, to this
function. Alternatively, you can pass a `tf.Summary` protocol
buffer that you populate with your own data. The latter is
commonly done to report evaluation results in event files.
Args:
summary: A `Summary` protocol buffer, optionally serialized as a string.
global_step: Number. Optional global step value to record with the
summary.
"""
if isinstance(summary, bytes):
summ = summary_pb2.Summary()
summ.ParseFromString(summary)
summary = summ
event = event_pb2.Event(summary=summary)
self._add_event(event, global_step)
def add_session_log(self, session_log, global_step=None):
"""Adds a `SessionLog` protocol buffer to the event file.
This method wraps the provided session in an `Event` protocol buffer
and adds it to the event file.
Args:
session_log: A `SessionLog` protocol buffer.
global_step: Number. Optional global step value to record with the
summary.
"""
event = event_pb2.Event(session_log=session_log)
self._add_event(event, global_step)
def _add_graph_def(self, graph_def, global_step=None):
graph_bytes = graph_def.SerializeToString()
event = event_pb2.Event(graph_def=graph_bytes)
self._add_event(event, global_step)
def add_graph(self, graph, global_step=None, graph_def=None):
"""Adds a `Graph` to the event file.
The graph described by the protocol buffer will be displayed by
TensorBoard. Most users pass a graph in the constructor instead.
Args:
graph: A `Graph` object, such as `sess.graph`.
global_step: Number. Optional global step counter to record with the
graph.
graph_def: DEPRECATED. Use the `graph` parameter instead.
Raises:
ValueError: If both graph and graph_def are passed to the method.
"""
if graph is not None and graph_def is not None:
raise ValueError("Please pass only graph, or graph_def (deprecated), "
"but not both.")
if isinstance(graph, ops.Graph) or isinstance(graph_def, ops.Graph):
# The user passed a `Graph`.
# Check if the user passed it via the graph or the graph_def argument and
# correct for that.
if not isinstance(graph, ops.Graph):
logging.warning("When passing a `Graph` object, please use the `graph`"
" named argument instead of `graph_def`.")
graph = graph_def
# Serialize the graph with additional info.
true_graph_def = graph.as_graph_def(add_shapes=True)
self._write_plugin_assets(graph)
elif (isinstance(graph, graph_pb2.GraphDef) or
isinstance(graph_def, graph_pb2.GraphDef)):
# The user passed a `GraphDef`.
logging.warning("Passing a `GraphDef` to the SummaryWriter is deprecated."
" Pass a `Graph` object instead, such as `sess.graph`.")
# Check if the user passed it via the graph or the graph_def argument and
# correct for that.
if isinstance(graph, graph_pb2.GraphDef):
true_graph_def = graph
else:
true_graph_def = graph_def
else:
# The user passed neither `Graph`, nor `GraphDef`.
raise TypeError("The passed graph must be an instance of `Graph` "
"or the deprecated `GraphDef`")
# Finally, add the graph_def to the summary writer.
self._add_graph_def(true_graph_def, global_step)
def _write_plugin_assets(self, graph):
plugin_assets = plugin_asset.get_all_plugin_assets(graph)
logdir = self.event_writer.get_logdir()
for asset_container in plugin_assets:
plugin_name = asset_container.plugin_name
plugin_dir = os.path.join(logdir, _PLUGINS_DIR, plugin_name)
gfile.MakeDirs(plugin_dir)
assets = asset_container.assets()
for (asset_name, content) in assets.items():
asset_path = os.path.join(plugin_dir, asset_name)
with gfile.Open(asset_path, "w") as f:
f.write(content)
def add_meta_graph(self, meta_graph_def, global_step=None):
"""Adds a `MetaGraphDef` to the event file.
The `MetaGraphDef` allows running the given graph via
`saver.import_meta_graph()`.
Args:
meta_graph_def: A `MetaGraphDef` object, often as returned by
`saver.export_meta_graph()`.
global_step: Number. Optional global step counter to record with the
graph.
Raises:
TypeError: If both `meta_graph_def` is not an instance of `MetaGraphDef`.
"""
if not isinstance(meta_graph_def, meta_graph_pb2.MetaGraphDef):
raise TypeError("meta_graph_def must be type MetaGraphDef, saw type: %s"
% type(meta_graph_def))
meta_graph_bytes = meta_graph_def.SerializeToString()
event = event_pb2.Event(meta_graph_def=meta_graph_bytes)
self._add_event(event, global_step)
def add_run_metadata(self, run_metadata, tag, global_step=None):
"""Adds a metadata information for a single session.run() call.
Args:
run_metadata: A `RunMetadata` protobuf object.
tag: The tag name for this metadata.
global_step: Number. Optional global step counter to record with the
StepStats.
Raises:
ValueError: If the provided tag was already used for this type of event.
"""
if tag in self._session_run_tags:
raise ValueError("The provided tag was already used for this event type")
self._session_run_tags[tag] = True
tagged_metadata = event_pb2.TaggedRunMetadata()
tagged_metadata.tag = tag
# Store the `RunMetadata` object as bytes in order to have postponed
# (lazy) deserialization when used later.
tagged_metadata.run_metadata = run_metadata.SerializeToString()
event = event_pb2.Event(tagged_run_metadata=tagged_metadata)
self._add_event(event, global_step)
def _add_event(self, event, step):
event.wall_time = time.time()
if step is not None:
event.step = int(step)
self.event_writer.add_event(event)
class FileWriter(SummaryToEventTransformer):
"""Writes `Summary` protocol buffers to event files.
The `FileWriter` class provides a mechanism to create an event file in a
given directory and add summaries and events to it. The class updates the
file contents asynchronously. This allows a training program to call methods
to add data to the file directly from the training loop, without slowing down
training.
"""
def __init__(self,
logdir,
graph=None,
max_queue=10,
flush_secs=120,
graph_def=None):
"""Creates a `FileWriter` and an event file.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.summary.FileWriter(<some-directory>, sess.graph)
```
The other arguments to the constructor control the asynchronous writes to
the event file:
* `flush_secs`: How often, in seconds, to flush the added summaries
and events to disk.
* `max_queue`: Maximum number of summaries or events pending to be
written to disk before one of the 'add' calls block.
Args:
logdir: A string. Directory where event file will be written.
graph: A `Graph` object, such as `sess.graph`.
max_queue: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
graph_def: DEPRECATED: Use the `graph` argument instead.
"""
event_writer = EventFileWriter(logdir, max_queue, flush_secs)
super(FileWriter, self).__init__(event_writer, graph, graph_def)
def get_logdir(self):
"""Returns the directory where event file will be written."""
return self.event_writer.get_logdir()
def add_event(self, event):
"""Adds an event to the event file.
Args:
event: An `Event` protocol buffer.
"""
self.event_writer.add_event(event)
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
self.event_writer.flush()
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
self.event_writer.close()
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close()` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
self.event_writer.reopen()
|
|
from TASSELpy.java.lang.Comparable import Comparable
from TASSELpy.java.lang.Number import Number, metaNumber
from TASSELpy.java.lang.Integer import Integer, metaInteger
from TASSELpy.java.lang.String import metaString
from TASSELpy.utils.Overloading import javaOverload,javaConstructorOverload
from TASSELpy.utils.helper import make_sig
from TASSELpy.javaObj import javaObj
from TASSELpy.utils.DocInherit import DocInherit
from abc import ABCMeta
import numpy as np
java_imports = {'Byte':'java/lang/Byte',
'String':'java/lang/String'}
class metaByte:
__metaclass__ = ABCMeta
@classmethod
def __subclasshook__(cls, C):
if C == np.int8:
return True
elif C == np.uint8:
return True
elif issubclass(C, Byte):
return True
else:
return False
## Class for wrapping java type Byte
class Byte(Comparable, Number):
"""
public final class Byte
extends Number
implements Comparable<Byte>
The Byte class wraps a value of primitive type byte in an object.
An object of type Byte contains a single field whose type is byte.
In addition, this class provides several methods for converting a
byte to a String and a String to a byte, as well as other constants and
methods useful when dealing with a byte.
"""
_java_name = java_imports['Byte']
@javaConstructorOverload(java_imports['Byte'],
(make_sig(['byte'],'void'),(metaByte,)),
(make_sig([java_imports['String']],'void'),(metaString,)))
def __init__(self, *args, **kwargs):
"""
Instantiates a Byte object
Signatures:
Byte(byte value)
Byte(String s)
Arguments:
Byte(byte value)
value -- a byte value
Byte(String s)
s -- the string to be converted to a Byte
"""
super(Byte, self).__init__(*args, generic=(Byte,), **kwargs)
@DocInherit
@javaOverload("compareTo",
(make_sig([java_imports['Byte']],'int'),(metaByte,),None))
def compareTo(self, *args):
pass
def __repr__(self):
return "Byte(%d)" % self.byteValue()
###################################
## Numeric magic methods
###################################
def __pos__(self):
return Byte(+self.toPrimative())
def __neg__(self):
return Byte(-self.toPrimative())
def __abs__(self):
return Byte(abs(self.toPrimative()))
def __invert__(self):
return Byte(~self.toPrimative())
def __floor__(self):
return Byte(np.floor(self.toPrimative()))
def __ceil__(self):
return Byte(np.ceil(self.toPrimative()))
###################################
## Arithmetic magic methods
###################################
def __add__(self, other):
if isinstance(other, metaInteger):
if isinstance(other, Number):
return Integer(np.int32(self.toPrimative() + other.toPrimative()))
else:
return Integer(np.int32(self.toPrimative() + other))
elif isinstance(other, metaNumber):
if isinstance(other, Number):
return Byte(self.toPrimative() + other.toPrimative())
else:
return Byte(self.toPrimative() + other)
def __radd__(self, other):
return self.__add__(other)
def __iadd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, metaInteger):
if isinstance(other, Number):
return Integer(np.int32(self.toPrimative() - other.toPrimative()))
else:
return Integer(np.int32(self.toPrimative() - other))
elif isinstance(other, metaNumber):
if isinstance(other, Number):
return Byte(self.toPrimative() - other.toPrimative())
else:
return Byte(self.toPrimative() - other)
def __rsub__(self, other):
if isinstance(other, metaInteger):
if isinstance(other, Number):
return Integer(np.int32(other.toPrimative()-self.toPrimative()))
else:
return Integer(np.int32(other-self.toPrimative()))
elif isinstance(other, metaNumber):
if isinstance(other, Number):
return Byte(other.toPrimative()-self.toPrimative())
else:
return Byte(other-self.toPrimative())
def __isub__(self, other):
return self.__sub__(other)
def __mul__(self, other):
if isinstance(other, metaInteger):
if isinstance(other, Number):
return Integer(np.int32(self.toPrimative() * other.toPrimative()))
else:
return Integer(np.int32(self.toPrimative() * other))
elif isinstance(other, metaNumber):
if isinstance(other, Number):
return Byte(self.toPrimative() * other.toPrimative())
else:
return Byte(self.toPrimative() * other)
def __rmul__(self, other):
return self.__mul__(other)
def __imul__(self, other):
return self.__mul__(other)
def __floordiv__(self, other):
if isinstance(other, metaInteger):
if isinstance(other, Number):
return Integer(np.int32(self.toPrimative() // other.toPrimative()))
else:
return Integer(np.int32(self.toPrimative() // other))
elif isinstance(other, metaNumber):
if isinstance(other, Number):
return Byte(self.toPrimative() // other.toPrimative())
else:
return Byte(self.toPrimative() // other)
def __rfloordiv__(self, other):
if isinstance(other, metaInteger):
if isinstance(other, Number):
return Integer(np.int32(other.toPrimative() // self.toPrimative()))
else:
return Integer(np.int32(other // self.toPrimative()))
elif isinstance(other, metaNumber):
if isinstance(other, Number):
return Byte(other.toPrimative() // self.toPrimative())
else:
return Byte(other // self.toPrimative())
def __ifloordiv__(self, other):
return self.__floordiv__(other)
def __div__(self, other):
if isinstance(other, metaInteger):
if isinstance(other, Number):
return Integer(np.int32(self.toPrimative() / other.toPrimative()))
else:
return Integer(np.int32(self.toPrimative() / other))
elif isinstance(other, metaNumber):
if isinstance(other, Number):
return Byte(self.toPrimative() / other.toPrimative())
else:
return Byte(self.toPrimative() / other)
def __rdiv__(self, other):
if isinstance(other, metaInteger):
if isinstance(other, Number):
return Integer(np.int32(other.toPrimative() / self.toPrimative()))
else:
return Integer(np.int32(other / self.toPrimative()))
elif isinstance(other, metaNumber):
if isinstance(other, Number):
return Byte(other.toPrimative() / self.toPrimative())
else:
return Byte(other/self.toPrimative())
def __idiv__(self, other):
return self.__div__(other)
def __mod__(self, other):
if isinstance(other, metaInteger):
if isinstance(other, Number):
return Integer(np.int32(self.toPrimative() % other.toPrimative()))
else:
return Integer(np.int32(self.toPrimative() % other))
elif isinstance(other, metaNumber):
if isinstance(other, Number):
return Byte(self.toPrimative() % other.toPrimative())
else:
return Byte(self.toPrimative() % other)
def __rmod__(self, other):
if isinstance(other, metaInteger):
if isinstance(other, Number):
return Integer(np.int32(other.toPrimative() % self.toPrimative()))
else:
return Integer(np.int32(other % self.toPrimative()))
elif isinstance(other, metaNumber):
if isinstance(other, Number):
return Byte(other.toPrimative() % self.toPrimative())
else:
return Byte(other % self.toPrimative())
def __imod__(self, other):
return self.__mod__(other)
def __pow__(self, other):
if isinstance(other, metaInteger):
if isinstance(other, Number):
return Integer(np.int32(self.toPrimative() ** other.toPrimative()))
else:
return Integer(np.int32(self.toPrimative() ** other))
elif isinstance(other, metaNumber):
if isinstance(other, Number):
return Byte(self.toPrimative() ** other.toPrimative())
else:
return Byte(self.toPrimative() ** other)
def __rpow__(self, other):
if isinstance(other, metaInteger):
if isinstance(other, Number):
return Integer(np.int32(other.toPrimative() ** self.toPrimative()))
else:
return Integer(np.int32(other ** self.toPrimative()))
elif isinstance(other, metaNumber):
if isinstance(other, Number):
return Byte(other.toPrimative() ** self.toPrimative())
else:
return Byte(other ** self.toPrimative())
def __ipow__(self, other):
return self.__pow__(other)
def __lshift__(self, other):
if isinstance(other, metaInteger):
if isinstance(other, Number):
return Integer(np.int32(self.toPrimative() << other.toPrimative()))
else:
return Integer(np.int32(self.toPrimative() << other))
elif isinstance(other, metaNumber):
if isinstance(other, Number):
return Byte(self.toPrimative() << other.toPrimative())
else:
return Byte(self.toPrimative() << other)
def __rlshift__(self, other):
if isinstance(other, metaInteger):
if isinstance(other, Number):
return Integer(np.int32(other.toPrimative() << self.toPrimative()))
else:
return Integer(np.int32(other << self.toPrimative()))
elif isinstance(other, metaNumber):
if isinstance(other, Number):
return Byte(other.toPrimative() << self.toPrimative())
else:
return Byte(other << self.toPrimative())
def __ilshift__(self, other):
return self.__lshift__(other)
def __rshift__(self, other):
if isinstance(other, metaInteger):
if isinstance(other, Number):
return Integer(np.int32(self.toPrimative() >> other.toPrimative()))
else:
return Integer(np.int32(self.toPrimative() >> other))
elif isinstance(other, metaNumber):
if isinstance(other, Number):
return Byte(self.toPrimative() >> other.toPrimative())
else:
return Byte(self.toPrimative() >> other)
def __rrlshift__(self, other):
if isinstance(other, metaInteger):
if isinstance(other, Number):
return Integer(np.int32(other.toPrimative() >> self.toPrimative()))
else:
return Integer(np.int32(other >> self.toPrimative()))
elif isinstance(other, metaNumber):
if isinstance(other, Number):
return Byte(other.toPrimative() >> self.toPrimative())
else:
return Byte(other >> self.toPrimative())
def __irshift__(self, other):
return self.__rshift__(other)
def __and__(self, other):
if isinstance(other, metaInteger):
if isinstance(other, Number):
return Integer(np.int32(self.toPrimative() & other.toPrimative()))
else:
return Integer(np.int32(self.toPrimative() & other))
elif isinstance(other, metaNumber):
if isinstance(other, Number):
return Byte(self.toPrimative() & other.toPrimative())
else:
return Byte(self.toPrimative() & other)
def __rand__(self, other):
return self.__and__(other)
def __iand__(self, other):
return self.__and__(other)
def __or__(self, other):
if isinstance(other, metaInteger):
if isinstance(other, Number):
return Integer(np.int32(self.toPrimative() | other.toPrimative()))
else:
return Integer(np.int32(self.toPrimative() | other))
elif isinstance(other, metaNumber):
if isinstance(other, Number):
return Byte(self.toPrimative() | other.toPrimative())
else:
return Byte(self.toPrimative() | other)
def __ror__(self, other):
return self.__or__(other)
def __ior__(self, other):
return self.__or__(other)
def __xor__(self, other):
if isinstance(other, metaInteger):
if isinstance(other, Number):
return Integer(np.int32(self.toPrimative() ^ other.toPrimative()))
else:
return Integer(np.int32(self.toPrimative() ^ other))
elif isinstance(other, metaNumber):
if isinstance(other, Number):
return Byte(self.toPrimative() ^ other.toPrimative())
else:
return Byte(self.toPrimative() ^ other)
def __rxor__(self, other):
return self.__xor__(other)
def __ixor__(self, other):
return self.__xor__(other)
@DocInherit
def toPrimative(self):
return self.byteValue()
|
|
#!/usr/bin/env python3
# from inspect import trace
from midi import MIDI
# import sys
# import re
import copy
import inspect
import records
from fractions import Fraction
import util
# event, ... , channel, params...
PITCH_EVENTS = ['note', 'key_after_touch']
PITCH_INDEX = {'note': 4, 'key_after_touch': 3}
DEBUG_ON = []
'Utility functions.'
def PRINT(*args, **kwargs):
if inspect.stack()[1][3] in DEBUG_ON:
print(*args, **kwargs)
def event_is_vol(event):
return event[0] == 'control_change' and event[3] == 0x07
def event_is_pan(event):
return event[0] == 'control_change' and event[3] == 0x0A
def get_channel(track):
channel = -1
for event in track:
command = event[0]
if command in util.MIDI.Event2channelindex:
channel = event[util.MIDI.Event2channelindex[command]]
break
return channel
def get_name(track):
# ['track_name', time, text]
name = 'Unnamed Track'
for event in track:
command = event[0]
if command == 'track_name':
name = event[2]
break
return name
# def filter_track(track, match_list, delete=False):
# """If delete is False, includes all matches.
# If delete is True, excludes all matches."""
#
# new_track = []
#
# for event in track:
# is_match = True
# for idx, match_param in enumerate(match_list):
# # None = skip the match.
# # All matching events are deleted.
# if match_param is not None and match_param != event[idx]:
# is_match = False
# break
# # If delete == True, then we delete all is_match == True.
# # If delete == False, we delete all is_match == False.
#
# # So we skip if the two are equal.
#
# if is_match == delete:
# continue
#
# new_track.append(event)
#
# return new_track
# def filter_get(track, get_list):
# # Essentially build a set of all matching dollar signs.
# # This can be used to get a list of all instruments, etc.
# get_index = get_list.index('$')
#
# filter_list = [x if x != '$' else None for x in get_list]
# events = filter_track(track, get_list, delete=False)
def get_instrs(track):
return {event[3] for event in track if event[0] == 'patch_change'}
def filter_instr(tracks, good_patch):
matches_idx = []
for idx, track in enumerate(tracks):
if idx == 0:
continue
track_instrs = get_instrs(track)
if len(track_instrs) > 1:
print('Track', idx, 'has', len(track_instrs), 'instruments!')
print('Continuing...')
if good_patch in track_instrs:
matches_idx.append(idx)
return matches_idx
'In-place functions'
def volume_mod(track, curve):
"""Modifies the volume CCs in-place.
Returns None.
"""
if curve is None:
print('Invalid curve!')
return
# Modify events by reference.
for event in track:
if event_is_vol(event):
# ['CC', time, channel, volume, value]
# value at idx 4
new_volume = curve(event[4])
event[4], error = util.clip_127(new_volume)
if error == 1:
print('Warning: Volume mod overflow!')
if error == -1:
print('Warning: Volume mod underflow')
def velocity_mod(track, curve):
"""Modifies the velocities in-place.
Returns None.
"""
if curve is None:
print('Invalid curve!')
return
for event in track:
if event[0] in PITCH_EVENTS:
chanidx = util.MIDI.Event2channelindex[event[0]]
# type, ... , channel, pitch, VELOCITY
# To modify velocity, query at chanidx + 2.
old_velocity = event[chanidx + 2]
new_velocity = curve(old_velocity)
new_velocity, error = util.clip_127(new_velocity)
if error == 1:
print('Warning: Velocity overflow!')
if error == -1:
print('Warning: Velocity underflow!')
event[chanidx + 2] = new_velocity
def remap(track, note_map):
"""Remaps a track in-place.
notemap must be a list of length 128.
Returns None.
"""
# type, ... , channel, PITCH, velocity
# To modify velocity, query at chanidx + 1.
for event in track:
if event[0] in PITCH_EVENTS:
pitch_idx = PITCH_INDEX[event[0]]
event[pitch_idx] = note_map[event[pitch_idx]]
def remap_instr(track, instr_map):
# MODIFIES IN PLACE
for event in track:
if event[0] == 'patch_change':
# ['patch_change', time, channel, patch]
orig_patch = event[3]
if orig_patch in instr_map:
event[3] = instr_map[orig_patch]
else:
if 'warn' in instr_map:
bad_patch_name = util.num2instr(orig_patch)
print('Warning: Missing mapping for {}'.format(bad_patch_name))
def transpose(track, amount, message='DEBUG_MAYBE?'):
for event in track:
if event[0] in PITCH_EVENTS:
pitch_idx = PITCH_INDEX[event[0]]
new_pitch = event[pitch_idx] + amount
new_pitch, error = util.clip_127(new_pitch)
if error == 1:
print('Warning: transpose pitch overflow!')
print(message, event, 'transpose', amount)
if error == -1:
print('Warning: transpose pitch under!')
print(message, event, 'transpose', amount)
event[pitch_idx] = new_pitch
def loop_track(track, start_ticks, end_ticks):
"""Loops the global track. Returns None."""
# ['text_event', time, text]
# text = loopStart or loopEnd
start_event = ['marker', start_ticks, 'loopStart']
stop_event = ['marker', end_ticks, 'loopEnd']
# Add the events into the array
# Starting time = index 1
util.insert_keyed(track, start_event, key=lambda e: e[1])
util.insert_keyed_after(track, stop_event, key=lambda e: e[1])
def channel_mod(track, new_channel):
""" Changes the channel of a track. In place."""
for event in track:
if event[0] in util.MIDI.Event2channelindex:
channel_idx = util.MIDI.Event2channelindex[event[0]]
event[channel_idx] = new_channel
'Return functions'
def vol_combine(track, new_vol=127):
"""Combine volume for an entire track.
Configurable output volume now."""
channel = get_channel(track)
if channel == -1:
channel = 15
vol = 127
# Dummy channel volume event
# ['control_change', time, channel, controller(0-127), value(0-127)]
new_track = [['control_change', 0, channel, 0x07, new_vol]]
for event in track:
# Change volume = record new volume.
if event_is_vol(event):
vol = event[4]
# Note = combine volume and velocity.
elif event[0] == 'note':
# vol2 * vel2 = vol1 * vel1
# vel2 = vol1 * vel1 / vol2
out_vel = round(Fraction(event[5]) * Fraction(vol) / new_vol)
if out_vel > 127:
print('vol_combine clipped!')
out_vel = 127
out_event = event[:5] + [out_vel]
new_track.append(out_event)
# Pass through.
else:
new_track.append(event[:])
return new_track
def trim_notes(track, length):
return [event[:2] + [length] + event[3:] if event[0] == 'note' and event[2] >= length
else event
for event in track]
def clone_rename(track, new_name):
"""Returns a new track. In order to preserve numbering, it should be appended to the end of "tracks"."""
# ['track_name', time, text]
new_track = [['track_name', 0, new_name]]
for event in track:
new_event = event[:]
if event[0] == 'track_name':
# Omit.
pass
else:
new_track.append(new_event)
return new_track
# Deletion
def del_cfg(track, cls):
out = []
for event in track:
# ['control_change', time, channel, controller(0-127), value(0-127)]
if event[0] == 'control_change':
# Exclude control-change other than volume and pan.
try:
cls(event)
except records.RecordException:
continue
else:
out.append(event)
continue
if event[0] in [
'sysex_f0', 'sysex_f7',
'synthesizer_specific',
'sequencer_specific']:
continue
else:
out.append(event)
return out
def del_stuff(track):
return del_cfg(track, records.Record)
def del_loose(track):
return del_cfg(track, records.RecordLoose)
'One -> many'
def split_track(track):
"""Splits a track by program changes.
Returns a list of new tracks.
You must manually fix the channels afterwards.
"""
used_insts = {event[3] for event in track if event[0] == 'patch_change'}
if len(used_insts) == 0:
used_insts.add(0)
idx2inst = list(used_insts)
inst2idx = {v: k for k, v in enumerate(idx2inst)}
# Placeholder set of out tracks. Each one has a single instrument.
out_tracks = []
for i in range(len(idx2inst)):
out_tracks.append([])
curr_track = 0
channel = get_channel(track)
if channel == -1:
channel = 15
# Add the program change events at the beginning.
for idx, inst in enumerate(idx2inst):
out_tracks[idx].append(['patch_change', 0, channel, inst])
# Add the events.
# Only note commands are filtered. All others are passed through to all tracks.
# Program change events change curr_track and are discarded.
# A subsequent pass through Anvil Studio's MIDI repair tool will fix it up?
for event in track:
event = event[:]
if event[0] in PITCH_EVENTS:
# PRINT(curr_track, event)
out_tracks[curr_track].append(event)
elif event[0] == 'patch_change':
# PRINT(event)
curr_track = inst2idx[event[3]]
elif event[0] == 'track_name':
# track_name is a meta-event and does not have a channel associated.
# type, time, NAME [2]
# Add the instrument name to each track's name.
for idx, out_track in enumerate(out_tracks):
ecopy = event[:]
ecopy[2] += ' ' + util.num2instr(idx2inst[idx])
out_track.append(ecopy)
else:
for out_track in out_tracks:
ecopy = event[:]
out_track.append(ecopy)
return out_tracks
'Multi-track functions.'
def label_tracks(tracks):
for track_num, track in enumerate(tracks):
tracks[track_num] = clone_rename(track, 'Track {}'.format(track_num))
# def foreach_track(tracks, func):
# for idx, track in enumerate(tracks):
# if idx == 0:
# continue
# func(track)
# def foreach_set(tracks, func):
# for idx, track in enumerate(tracks):
# if idx == 0:
# continue
# tracks[idx] = func(track)
def split_all(tracks):
new_tracks = []
for idx, track in enumerate(tracks):
if idx == 0:
new_tracks.append(track)
continue
new_tracks.extend(split_track(track))
return new_tracks
def channel_all(tracks):
"""Fixes all channels to avoid overlapping. In-place."""
count = 0
is_wrap = False
# Chan 10->9 is excluded.
count2chan = list(range(9)) + list(range(10, 16))
for idx, track in enumerate(tracks):
track_chan = get_channel(track)
if track_chan != (10 - 1):
if track_chan == -1:
if idx != 0:
print('Warning! Track {} contains no channel events!'.format(idx))
continue
# Don't leave a slot empty, because there are ZERO events which require a free channel slot.
if count == 0:
if is_wrap:
print('Warning! Too many tracks, wrapping to channel 0!')
is_wrap = True
track_chan = count2chan[count]
count = (count + 1) % len(count2chan)
for event in track:
event_type = event[0]
if event_type in util.MIDI.Event2channelindex:
chan_idx = util.MIDI.Event2channelindex[event_type]
event[chan_idx] = track_chan
if idx == 0:
print('Channel event in Track 0?', event)
|
|
import pytest
import mock
from marshmallow import ValidationError
from puzzle_engine.hitori.schemas import (
CellSchema,
BoardSchema,
HitoriSolutionSchema
)
class TestCellSchema:
@pytest.fixture
def data(self):
return {
'id': 1,
'row_number': 1,
'column_number': 2,
'value': 5
}
@pytest.fixture
def patched_cell(self):
patcher = mock.patch('puzzle_engine.hitori.schemas.Cell')
yield patcher.start()
patcher.stop()
def test_cell_schema_loads(self, data, patched_cell):
loaded_data = CellSchema(strict=True).load(data).data
assert loaded_data is patched_cell.return_value
patched_cell.assert_called_once_with(**data)
bad_data = [
{
'id': 1,
'row_number': 3,
'column_number': -1,
'value': 5
},
{
'id': 1,
'row_number': -3,
'column_number': 5,
'value': 5
},
{
'id': 1,
'row_number': -3,
'column_number': -5,
'value': 2
}
]
@pytest.mark.parametrize('data', bad_data)
def test_cell_schema_validates(self, data):
with pytest.raises(ValidationError):
CellSchema(strict=True).load(data)
class TestBoardSchema:
@pytest.fixture
def data(self):
return {
'id': 1,
'number_of_rows': 5,
'number_of_columns': 5,
'cells': [
{
'id': 1,
'row_number': 1,
'column_number': 2,
'value': 3
}
]
}
@pytest.fixture
def patched_board(self):
patcher = mock.patch('puzzle_engine.hitori.schemas.Board')
yield patcher.start()
patcher.stop()
def test_board_schema_loads(self, data, patched_board):
loaded_data = BoardSchema(strict=True).load(data).data
assert patched_board.return_value is loaded_data
call = patched_board.call_args[1]
assert call['number_of_rows'] == data['number_of_rows']
assert call['number_of_columns'] == data['number_of_columns']
assert call['cells']
assert len(call['cells']) == 1
cell = call['cells'][0]
assert cell.row_number == 1
assert cell.column_number == 2
assert cell.value == 3
bad_data = [
{
'id': 1,
'number_of_rows': -5,
'number_of_columns': 5,
'cells': [
{
'id': 1,
'row_number': 1,
'column_number': 2,
'value': 3
}
]
},
{
'id': 1,
'number_of_rows': 5,
'number_of_columns': -5,
'cells': [
{
'id': 1,
'row_number': 1,
'column_number': 2,
'value': 3
}
]
},
{
'id': 1,
'number_of_rows': -5,
'number_of_columns': -5,
'cells': [
{
'id': 1,
'row_number': 1,
'column_number': 2,
'value': 3
}
]
},
{
'id': 1,
'number_of_rows': 5,
'number_of_columns': 5,
'cells': [
{
'id': 1,
'row_number': 10,
'column_number': 2,
'value': 3
}
]
},
{
'id': 1,
'number_of_rows': 5,
'number_of_columns': 5,
'cells': [
{
'id': 2,
'row_number': 3,
'column_number': 12,
'value': 3
}
]
},
{
'id': 1,
'number_of_rows': 5,
'number_of_columns': 5,
'cells': [
{
'id': 2,
'row_number': 10,
'column_number': 12,
'value': 3
}
]
},
{
'id': 1,
'number_of_rows': 5,
'number_of_columns': 5,
'cells': [
{
'id': 3,
'row_number': 1,
'column_number': 6,
'value': 3
},
{
'id': 5,
'row_number': 3,
'column_number': 2,
'value': 3
}
]
},
{
'id': 1,
'number_of_rows': 5,
'number_of_columns': 5,
'cells': [
{
'id': 1,
'row_number': 5,
'column_number': 3,
'value': 3
}
]
},
{
'id': 1,
'number_of_rows': 5,
'number_of_columns': 5,
'cells': [
{
'id': 2,
'row_number': 3,
'column_number': 5,
'value': 3
}
]
},
]
@pytest.mark.parametrize('data', bad_data)
def test_board_schema_validates(self, data):
with pytest.raises(ValidationError):
BoardSchema(strict=True).load(data)
class TestHitoriSolutionSchema:
@pytest.fixture
def cells_on(self):
return [
{
'id': 3,
'row_number': 1,
'column_number': 2,
'value': 3
},
{
'id': 5,
'row_number': 3,
'column_number': 2,
'value': 3
}
]
@pytest.fixture
def cells_off(self):
return [
{
'id': 3,
'row_number': 1,
'column_number': 6,
'value': 3
}
]
@pytest.fixture
def board(self):
return {'id': 2, 'cells': []}
@pytest.fixture
def hitori_solution(self, cells_on, cells_off, board):
return {
'cells_on': cells_on,
'cells_off': cells_off,
'board': board
}
@pytest.fixture
def expected_dumped_hitori_solution(self, cells_on, cells_off, board):
return {
'cells_on': [cell['id'] for cell in cells_on],
'cells_off': [cell['id'] for cell in cells_off],
'board': board['id']
}
def test_hitori_solution_schema_dump(self, hitori_solution, expected_dumped_hitori_solution):
data = HitoriSolutionSchema(strict=True).dump(hitori_solution).data
assert data == expected_dumped_hitori_solution
|
|
###################################################
# header_common.py
# This file contains common declarations.
# DO NOT EDIT THIS FILE!
###################################################
server_event_preset_message = 0
server_event_play_sound = 1
server_event_scene_prop_play_sound = 2
server_event_play_sound_at_position = 3
server_event_agent_equip_armor = 4
server_event_player_set_slot = 5
server_event_scene_prop_set_slot = 6
server_event_faction_set_slot = 7
server_event_troop_set_slot = 8
server_event_agent_set_slot = 9
server_event_show_inventory = 10
server_event_chat_message_recieved = 11
server_event_local_chat = 12
server_event_local_chat_shout = 13
server_event_faction_set_name = 14
server_event_return_game_rules = 15
server_event_return_server_name = 16
server_event_return_password = 17
server_event_set_player_score_kill_death = 18
server_event_show_poll = 19
server_event_set_overflow_gold = 20
server_event_faction_chat = 21
server_event_faction_chat_announce = 22
server_event_admin_chat = 23
server_event_admin_chat_announce = 24
server_event_admin_set_permissions = 25
server_event_set_attached_scene_prop = 26
server_event_local_animation = 27
client_event_request_animation = 95
client_event_reveal_money_pouch = 96
client_event_agent_loot_armor = 97
client_event_toggle_drop_armor = 98
client_event_admin_equip_item = 99
client_event_poll_vote = 100
client_event_request_poll = 101
client_event_request_spawn_point = 102
client_event_request_game_rules = 103
client_event_admin_set_server_name = 104
client_event_admin_set_password = 105
client_event_admin_set_welcome_message = 106
client_event_admin_set_game_rule = 107
client_event_admin_action = 108
client_event_faction_admin_action = 109
client_event_chat_message_begin = 110
client_event_chat_message_end = 120
client_event_chat_message_type = 120
client_event_transfer_gold = 121
client_event_request_stock_count = 122
client_event_drop_money_bag = 123
client_event_change_faction_banner = 124
client_event_transfer_inventory = 125
client_event_control_scene_prop = 126
client_event_attach_scene_prop = 127
# Network events are limited to numbers between 0 and 127 by the game engine.
preset_message_default = 0x0
preset_message_item = 0x2 # converts value 1 from item id into name string
preset_message_agent = 0x3 # converts value 1 from agent id into name string
preset_message_player = 0x4 # converts value 1 from player id into username string
preset_message_faction = 0x5 # converts value 1 from faction id into name string
preset_message_faction_castle = 0x6 # converts value 1 from castle id into name string
preset_message_params_mask = 0xF
preset_message_white = 0x00
preset_message_red = 0x10
preset_message_green = 0x20
preset_message_blue = 0x30
preset_message_yellow = 0x40
preset_message_color_mask = 0xF0
preset_message_small = 0x000
preset_message_big = 0x100
preset_message_read_object = 0x200 # displays the presentation for reading a book
preset_message_chat_only = 0x300 # only displays in the chat log
preset_message_type_mask = 0xF00
preset_message_log = 0x1000 # add message to the chat log
preset_message_fail_sound = 0x2000 # play a failure sound
preset_message_error = preset_message_red|preset_message_fail_sound
preset_message_info = preset_message_yellow
preset_message_chat_log = preset_message_chat_only|preset_message_log
# Module system commands
command_get_bot_count = 1
command_set_bot_count = 2
command_get_round_max_seconds = 3
command_set_round_max_seconds = 4
command_get_respawn_period = 5
command_set_respawn_period = 6
command_get_num_bots_voteable = 7
command_set_num_bots_voteable = 8
command_get_maps_voteable = 9
command_set_maps_voteable = 10
command_get_factions_voteable = 11
command_set_factions_voteable = 12
command_get_player_respawn_as_bot = 13
command_set_player_respawn_as_bot = 14
command_get_kick_voteable = 15
command_set_kick_voteable = 16
command_get_ban_voteable = 17
command_set_ban_voteable = 18
command_get_valid_vote_ratio = 19
command_set_valid_vote_ratio = 20
command_get_auto_team_balance_limit = 21
command_set_auto_team_balance_limit = 22
command_get_starting_gold = 23
command_set_starting_gold = 24
command_get_combat_gold_bonus = 25
command_set_combat_gold_bonus = 26
command_get_round_gold_bonus = 27
command_set_round_gold_bonus = 28
command_get_player_banners_allowed = 29
command_set_player_banners_allowed = 30
command_get_force_default_armor = 31
command_set_force_default_armor = 32
command_get_team_points_gained_for_flags = 33
command_set_team_points_gained_for_flags = 34
command_get_points_gained_for_capturing_flags = 35
command_set_points_gained_for_capturing_flags = 36
command_get_map_time_limit = 37
command_set_map_time_limit = 38
command_get_team_point_limit = 39
command_set_team_point_limit = 40
command_get_defender_spawn_count = 41
command_set_defender_spawn_count = 42
command_get_disallow_ranged_weapons = 43
command_set_disallow_ranged_weapons = 44
# Napoleonic Wars commands
command_use_class_limits = 50
command_class_limit_player_count = 51
command_squad_size = 52
command_scale_squad = 53
command_build_points_team1 = 54
command_build_points_team2 = 55
command_allow_multiple_firearms = 56
command_enable_bonuses = 57
command_bonus_strength = 58
command_bonus_range = 59
command_fall_off_horse = 60
command_horse_dying = 61
command_auto_kick = 62
command_max_teamkills_before_kick = 63
command_auto_horse = 64
command_auto_swap = 65
command_limit_grenadier = 66
command_limit_skirmisher = 67
command_limit_rifle = 68
command_limit_cavalry = 69
command_limit_lancer = 70
command_limit_hussar = 71
command_limit_dragoon = 72
command_limit_cuirassier = 73
command_limit_heavycav = 74
command_limit_artillery = 75
command_limit_rocket = 76
command_limit_sapper = 77
command_limit_musician = 78
command_limit_sergeant = 79
command_limit_officer = 80
command_limit_general = 81
# Hard coded commands
command_get_max_players = 101
command_set_max_players = 102
command_get_friendly_fire = 103
command_set_friendly_fire = 104
command_get_melee_friendly_fire = 105
command_set_melee_friendly_fire = 106
command_get_friendly_fire_damage_self_ratio = 107
command_set_friendly_fire_damage_self_ratio = 108
command_get_friendly_fire_damage_friend_ratio = 109
command_set_friendly_fire_damage_friend_ratio = 110
command_get_ghost_mode = 111
command_set_ghost_mode = 112
command_get_control_block_direction = 113
command_set_control_block_direction = 114
command_get_combat_speed = 115
command_set_combat_speed = 116
command_get_add_to_game_servers_list = 117
command_set_add_to_game_servers_list = 118
command_get_anti_cheat = 119
command_set_anti_cheat = 120
command_get_renaming_server_allowed = 121
command_set_renaming_server_allowed = 122
command_get_changing_game_type_allowed = 123
command_set_changing_game_type_allowed = 124
command_start_scene = 130
command_open_admin_panel = 132
command_open_game_rules = 134
command_set_server_mission_timer = 136
commands_module_system_begin = command_get_bot_count
commands_module_system_end = command_set_disallow_ranged_weapons + 1
commands_napoleonic_wars_begin = command_use_class_limits
commands_napoleonic_wars_end = command_limit_general + 1
commands_hard_coded_begin = command_get_max_players
commands_hard_coded_end = command_set_anti_cheat + 1
min_num_players = 2
max_num_players = 250 # limited by the game engine
min_respawn_period = 3
max_respawn_period = 31 # dead agents are removed after approximately this interval
team_default = 0 # default team, members can attack each other like deathmatch - since multiplayer is hard coded to handle only 2 teams
team_spawn_invulnerable = 1 # team set to be neutral to each other and the default team, so they can't attack or be attacked
team_spectators = 2 # hard coded spectators team
net_value_upper_bound = 1 << 31
net_sound_shift = 16
net_sound_mask = (1 << net_sound_shift) - 1
net_pack_3_shift_2 = 10
net_pack_3_shift_3 = 20
net_pack_3_value_upper_bound = 1 << net_pack_3_shift_2
net_pack_3_mask_1 = net_pack_3_value_upper_bound - 1
net_pack_3_mask_2 = net_pack_3_mask_1 << net_pack_3_shift_2
net_pack_3_mask_3 = net_pack_3_mask_1 << net_pack_3_shift_3
net_chat_type_shift = 8
net_chat_param_1_shift = net_chat_type_shift * 2
net_chat_event_mask = (1 << net_chat_type_shift) - 1
stats_chart_score_shift = 8
stats_chart_ranking_shift = 24
stats_chart_score_max = 1 << (stats_chart_ranking_shift - stats_chart_score_shift)
stats_chart_player_mask = (1 << stats_chart_score_shift) - 1
admin_action_kick_player = 0
admin_action_ban_player_temp = 1
admin_action_ban_player_perm = 2
admin_action_mute_player = 3
admin_action_kill_player = 4
admin_action_fade_player_out = 5
admin_action_freeze_player = 6
admin_action_teleport_to_player = 7
admin_action_teleport_behind_player = 8
admin_action_teleport_forwards = 9
admin_action_get_armor = 10
admin_action_get_invisible = 11
admin_action_refill_health = 12
admin_action_become_godlike = 13
admin_action_get_horse = 14
admin_action_remove_horses = 15
admin_action_remove_stray_horses = 16
admin_action_teleport_to_ships = 17
admin_action_reset_ships = 18
admin_action_lock_faction = 19
faction_admin_action_change_banner = 0
faction_admin_action_kick_player = 1
faction_admin_action_toggle_player_door_key = 2
faction_admin_action_toggle_player_money_key = 3
faction_admin_action_toggle_player_item_key = 4
faction_admin_action_set_relation_hostile = 5
faction_admin_action_set_relation_peaceful = 6
faction_admin_action_outlaw_player = 7
faction_admin_action_mute_player = 8
faction_admin_action_toggle_player_announce = 9
max_possible_gold = 1000000000
max_correctly_displayed_gold = 131071 # player gold over this value will not be updated correctly by the game engine
max_correctly_displayed_hp = 15000 # scene prop hit points over approximately this value will not be displayed correctly in the engine hit points bar
min_scene_prop_hit_points = 1
profile_banner_id_option_bits_begin = 9
profile_banner_id_option_bits_end = 30
profile_banner_id_mask = (1 << profile_banner_id_option_bits_begin) - 1
bignum = 0x40000000000000000000000000000000
op_num_value_bits = 24 + 32
tag_register = 1
tag_variable = 2
tag_string = 3
tag_item = 4
tag_troop = 5
tag_faction = 6
tag_quest = 7
tag_party_tpl = 8
tag_party = 9
tag_scene = 10
tag_mission_tpl = 11
tag_menu = 12
tag_script = 13
tag_particle_sys = 14
tag_scene_prop = 15
tag_sound = 16
tag_local_variable = 17
tag_map_icon = 18
tag_skill = 19
tag_mesh = 20
tag_presentation = 21
tag_quick_string = 22
tag_track = 23
tag_tableau = 24
tag_animation = 25
tags_end = 26
opmask_register = tag_register << op_num_value_bits
opmask_variable = tag_variable << op_num_value_bits
opmask_string = tag_string << op_num_value_bits
opmask_item_index = tag_item << op_num_value_bits
opmask_troop_index = tag_troop << op_num_value_bits
opmask_faction_index = tag_faction << op_num_value_bits
opmask_quest_index = tag_quest << op_num_value_bits
opmask_p_template_index = tag_party_tpl << op_num_value_bits
opmask_party_index = tag_party << op_num_value_bits
opmask_scene_index = tag_scene << op_num_value_bits
opmask_mission_tpl_index = tag_mission_tpl << op_num_value_bits
opmask_menu_index = tag_menu << op_num_value_bits
opmask_script = tag_script << op_num_value_bits
opmask_particle_sys = tag_particle_sys << op_num_value_bits
opmask_scene_prop = tag_scene_prop << op_num_value_bits
opmask_sound = tag_sound << op_num_value_bits
opmask_map_icon = tag_map_icon << op_num_value_bits
opmask_local_variable = tag_local_variable << op_num_value_bits
opmask_quick_string = tag_quick_string << op_num_value_bits
def reg(reg_no):
if not 0 < reg_no < 128:
raise Exception("ERROR: invalid register number.")
return opmask_register | reg_no
s0 = 0
s1 = 1
s2 = 2
s3 = 3
s4 = 4
s5 = 5
s6 = 6
s7 = 7
s8 = 8
s9 = 9
s10 = 10
s11 = 11
s12 = 12
s13 = 13
s14 = 14
s15 = 15
s16 = 16
s17 = 17
s18 = 18
s19 = 19
s20 = 20
s21 = 21
s22 = 22
s23 = 23
s24 = 24
s25 = 25
s26 = 26
s27 = 27
s28 = 28
s29 = 29
s30 = 30
s31 = 31
s32 = 32
s33 = 33
s34 = 34
s35 = 35
s36 = 36
s37 = 37
s38 = 38
s39 = 39
s40 = 40
s41 = 41
s42 = 42
s43 = 43
s44 = 44
s45 = 45
s46 = 46
s47 = 47
s48 = 48
s49 = 49
s50 = 50
s51 = 51
s52 = 52
s53 = 53
s54 = 54
s55 = 55
s56 = 56
s57 = 57
s58 = 58
s59 = 59
s60 = 60
s61 = 61
s62 = 62
s63 = 63
s64 = 64
s65 = 65
s66 = 66
s67 = 67
s68 = 68
s69 = 69
s70 = 70
s71 = 71
s72 = 72
s73 = 73
s74 = 74
s75 = 75
s76 = 76
s77 = 77
s78 = 78
s79 = 79
s80 = 80
s81 = 81
s82 = 82
s83 = 83
s84 = 84
s85 = 85
s86 = 86
s87 = 87
s88 = 88
s89 = 89
s90 = 90
s91 = 91
s92 = 92
s93 = 93
s94 = 94
s95 = 95
s96 = 96
s97 = 97
s98 = 98
s99 = 99
s100 = 100
s101 = 101
s102 = 102
s103 = 103
s104 = 104
s105 = 105
s106 = 106
s107 = 107
s108 = 108
s109 = 109
s110 = 110
s111 = 111
s112 = 112
s113 = 113
s114 = 114
s115 = 115
s116 = 116
s117 = 117
s118 = 118
s119 = 119
s120 = 120
s121 = 121
s122 = 122
s123 = 123
s124 = 124
s125 = 125
s126 = 126
s127 = 127
pos0 = 0
pos1 = 1
pos2 = 2
pos3 = 3
pos4 = 4
pos5 = 5
pos6 = 6
pos7 = 7
pos8 = 8
pos9 = 9
pos10 = 10
pos11 = 11
pos12 = 12
pos13 = 13
pos14 = 14
pos15 = 15
pos16 = 16
pos17 = 17
pos18 = 18
pos19 = 19
pos20 = 20
pos21 = 21
pos22 = 22
pos23 = 23
pos24 = 24
pos25 = 25
pos26 = 26
pos27 = 27
pos28 = 28
pos29 = 29
pos30 = 30
pos31 = 31
pos32 = 32
pos33 = 33
pos34 = 34
pos35 = 35
pos36 = 36
pos37 = 37
pos38 = 38
pos39 = 39
pos40 = 40
pos41 = 41
pos42 = 42
pos43 = 43
pos44 = 44
pos45 = 45
pos46 = 46
pos47 = 47
pos48 = 48
pos49 = 49
pos50 = 50
pos51 = 51
pos52 = 52
pos53 = 53
pos54 = 54
pos55 = 55
pos56 = 56
pos57 = 57
pos58 = 58
pos59 = 59
pos60 = 60
pos61 = 61
pos62 = 62
pos63 = 63
pos64 = 64
pos65 = 65
pos66 = 66
pos67 = 67
pos68 = 68
pos69 = 69
pos70 = 70
pos71 = 71
pos72 = 72
pos73 = 73
pos74 = 74
pos75 = 75
pos76 = 76
pos77 = 77
pos78 = 78
pos79 = 79
pos80 = 80
pos81 = 81
pos82 = 82
pos83 = 83
pos84 = 84
pos85 = 85
pos86 = 86
pos87 = 87
pos88 = 88
pos89 = 89
pos90 = 90
pos91 = 91
pos92 = 92
pos93 = 93
pos94 = 94
pos95 = 95
pos96 = 96
pos97 = 97
pos98 = 98
pos99 = 99
pos100 = 100
pos101 = 101
pos102 = 102
pos103 = 103
pos104 = 104
pos105 = 105
pos106 = 106
pos107 = 107
pos108 = 108
pos109 = 109
pos110 = 110
pos111 = 111
pos112 = 112
pos113 = 113
pos114 = 114
pos115 = 115
pos116 = 116
pos117 = 117
pos118 = 118
pos119 = 119
pos120 = 120
pos121 = 121
pos122 = 122
pos123 = 123
pos124 = 124
pos125 = 125
pos126 = 126
pos127 = 127
reg0 = opmask_register| 0
reg1 = opmask_register| 1
reg2 = opmask_register| 2
reg3 = opmask_register| 3
reg4 = opmask_register| 4
reg5 = opmask_register| 5
reg6 = opmask_register| 6
reg7 = opmask_register| 7
reg8 = opmask_register| 8
reg9 = opmask_register| 9
reg10 = opmask_register|10
reg11 = opmask_register|11
reg12 = opmask_register|12
reg13 = opmask_register|13
reg14 = opmask_register|14
reg15 = opmask_register|15
reg16 = opmask_register|16
reg17 = opmask_register|17
reg18 = opmask_register|18
reg19 = opmask_register|19
reg20 = opmask_register|20
reg21 = opmask_register|21
reg22 = opmask_register|22
reg23 = opmask_register|23
reg24 = opmask_register|24
reg25 = opmask_register|25
reg26 = opmask_register|26
reg27 = opmask_register|27
reg28 = opmask_register|28
reg29 = opmask_register|29
reg30 = opmask_register|30
reg31 = opmask_register|31
reg32 = opmask_register|32
reg33 = opmask_register|33
reg34 = opmask_register|34
reg35 = opmask_register|35
reg36 = opmask_register|36
reg37 = opmask_register|37
reg38 = opmask_register|38
reg39 = opmask_register|39
reg40 = opmask_register|40
reg41 = opmask_register|41
reg42 = opmask_register|42
reg43 = opmask_register|43
reg44 = opmask_register|44
reg45 = opmask_register|45
reg46 = opmask_register|46
reg47 = opmask_register|47
reg48 = opmask_register|48
reg49 = opmask_register|49
reg50 = opmask_register|50
reg51 = opmask_register|51
reg52 = opmask_register|52
reg53 = opmask_register|53
reg54 = opmask_register|54
reg55 = opmask_register|55
reg56 = opmask_register|56
reg57 = opmask_register|57
reg58 = opmask_register|58
reg59 = opmask_register|59
reg60 = opmask_register|60
reg61 = opmask_register|61
reg62 = opmask_register|62
reg63 = opmask_register|63
reg64 = opmask_register|64
reg65 = opmask_register|65
reg66 = opmask_register|66
reg67 = opmask_register|67
reg68 = opmask_register|68
reg69 = opmask_register|69
reg70 = opmask_register|70
reg71 = opmask_register|71
reg72 = opmask_register|72
reg73 = opmask_register|73
reg74 = opmask_register|74
reg75 = opmask_register|75
reg76 = opmask_register|76
reg77 = opmask_register|77
reg78 = opmask_register|78
reg79 = opmask_register|79
reg80 = opmask_register|80
reg81 = opmask_register|81
reg82 = opmask_register|82
reg83 = opmask_register|83
reg84 = opmask_register|84
reg85 = opmask_register|85
reg86 = opmask_register|86
reg87 = opmask_register|87
reg88 = opmask_register|88
reg89 = opmask_register|89
reg90 = opmask_register|90
reg91 = opmask_register|91
reg92 = opmask_register|92
reg93 = opmask_register|93
reg94 = opmask_register|94
reg95 = opmask_register|95
reg96 = opmask_register|96
reg97 = opmask_register|97
reg98 = opmask_register|98
reg99 = opmask_register|99
reg100 = opmask_register|100
reg101 = opmask_register|101
reg102 = opmask_register|102
reg103 = opmask_register|103
reg104 = opmask_register|104
reg105 = opmask_register|105
reg106 = opmask_register|106
reg107 = opmask_register|107
reg108 = opmask_register|108
reg109 = opmask_register|109
reg110 = opmask_register|110
reg111 = opmask_register|111
reg112 = opmask_register|112
reg113 = opmask_register|113
reg114 = opmask_register|114
reg115 = opmask_register|115
reg116 = opmask_register|116
reg117 = opmask_register|117
reg118 = opmask_register|118
reg119 = opmask_register|119
reg120 = opmask_register|120
reg121 = opmask_register|121
reg122 = opmask_register|122
reg123 = opmask_register|123
reg124 = opmask_register|124
reg125 = opmask_register|125
reg126 = opmask_register|126
reg127 = opmask_register|127
spf_all_teams_are_enemy = 0x00000001
spf_is_horseman = 0x00000002
spf_examine_all_spawn_points = 0x00000004
spf_team_0_spawn_far_from_entry_32 = 0x00000008
spf_team_1_spawn_far_from_entry_0 = 0x00000010
spf_team_1_spawn_far_from_entry_66 = 0x00000020
spf_team_0_spawn_near_entry_0 = 0x00000040
spf_team_0_spawn_near_entry_66 = 0x00000080
spf_team_1_spawn_near_entry_32 = 0x00000100
spf_team_0_walkers_spawn_at_high_points = 0x00000200
spf_team_1_walkers_spawn_at_high_points = 0x00000400
spf_try_to_spawn_close_to_at_least_one_enemy = 0x00000800
spf_care_agent_to_agent_distances_less = 0x00001000
|
|
#!/usr/bin/env python3
#
# Copyright 2016 WebAssembly Community Group participants
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Convert a JSON descrption of a spec test into a JavaScript."""
import argparse
import io
import json
import os
import re
import struct
import sys
import find_exe
from utils import ChangeDir, ChangeExt, Error, Executable
import utils
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
F32_INF = 0x7f800000
F32_NEG_INF = 0xff800000
F32_NEG_ZERO = 0x80000000
F32_SIGN_BIT = F32_NEG_ZERO
F32_SIG_MASK = 0x7fffff
F32_QUIET_NAN = 0x7fc00000
F32_QUIET_NAN_TAG = 0x400000
F64_INF = 0x7ff0000000000000
F64_NEG_INF = 0xfff0000000000000
F64_NEG_ZERO = 0x8000000000000000
F64_SIGN_BIT = F64_NEG_ZERO
F64_SIG_MASK = 0xfffffffffffff
F64_QUIET_NAN = 0x7ff8000000000000
F64_QUIET_NAN_TAG = 0x8000000000000
def I32ToJS(value):
# JavaScript will return all i32 values as signed.
if value >= 2**31:
value -= 2**32
return str(value)
def IsNaNF32(f32_bits):
return (F32_INF < f32_bits < F32_NEG_ZERO) or (f32_bits > F32_NEG_INF)
def ReinterpretF32(f32_bits):
return struct.unpack('<f', struct.pack('<I', f32_bits))[0]
def NaNF32ToString(f32_bits):
result = '-' if f32_bits & F32_SIGN_BIT else ''
result += 'nan'
sig = f32_bits & F32_SIG_MASK
if sig != F32_QUIET_NAN_TAG:
result += ':0x%x' % sig
return result
def F32ToWasm(f32_bits):
if IsNaNF32(f32_bits):
return 'f32.const %s' % NaNF32ToString(f32_bits)
elif f32_bits == F32_INF:
return 'f32.const infinity'
elif f32_bits == F32_NEG_INF:
return 'f32.const -infinity'
else:
return 'f32.const %s' % float.hex(ReinterpretF32(f32_bits))
def F32ToJS(f32_bits):
assert not IsNaNF32(f32_bits)
if f32_bits == F32_INF:
return 'Infinity'
elif f32_bits == F32_NEG_INF:
return '-Infinity'
else:
return 'f32(%s)' % ReinterpretF32(f32_bits)
def IsNaNF64(f64_bits):
return (F64_INF < f64_bits < F64_NEG_ZERO) or (f64_bits > F64_NEG_INF)
def ReinterpretF64(f64_bits):
return struct.unpack('<d', struct.pack('<Q', f64_bits))[0]
def NaNF64ToString(f64_bits):
result = '-' if f64_bits & F64_SIGN_BIT else ''
result += 'nan'
sig = f64_bits & F64_SIG_MASK
if sig != F64_QUIET_NAN_TAG:
result += ':0x%x' % sig
return result
def F64ToWasm(f64_bits):
if IsNaNF64(f64_bits):
return 'f64.const %s' % NaNF64ToString(f64_bits)
elif f64_bits == F64_INF:
return 'f64.const infinity'
elif f64_bits == F64_NEG_INF:
return 'f64.const -infinity'
else:
return 'f64.const %s' % float.hex(ReinterpretF64(f64_bits))
def F64ToJS(f64_bits):
assert not IsNaNF64(f64_bits)
if f64_bits == F64_INF:
return 'Infinity'
elif f64_bits == F64_NEG_INF:
return '-Infinity'
else:
# Use repr to get full precision
return repr(ReinterpretF64(f64_bits))
def UnescapeWasmString(s):
# Wat allows for more escape characters than this, but we assume that
# wasm2wat will only use the \xx escapes.
result = ''
i = 0
while i < len(s):
c = s[i]
if c == '\\':
x = s[i + 1:i + 3]
if len(x) != 2:
raise Error('String with invalid escape: \"%s\"' % s)
result += chr(int(x, 16))
i += 3
else:
result += c
i += 1
return result
def EscapeJSString(s):
result = ''
for c in s:
if 32 <= ord(c) < 127 and c not in '"\\':
result += c
else:
result += '\\x%02x' % ord(c)
return result
def IsValidJSConstant(const):
type_ = const['type']
value = const['value']
if type_ in ('f32', 'f64') and value in ('nan:canonical', 'nan:arithmetic'):
return True
if type_ == 'i32':
return True
elif type_ == 'i64':
return False
elif type_ == 'f32':
return not IsNaNF32(int(value))
elif type_ == 'f64':
return not IsNaNF64(int(value))
def IsValidJSAction(action):
return all(IsValidJSConstant(x) for x in action.get('args', []))
def IsValidJSCommand(command):
type_ = command['type']
action = command['action']
if type_ == 'assert_return':
expected = command['expected']
return (IsValidJSAction(action) and
all(IsValidJSConstant(x) for x in expected))
elif type_ in ('assert_trap', 'assert_exhaustion'):
return IsValidJSAction(action)
def CollectInvalidModuleCommands(commands):
modules = []
module_map = {}
for command in commands:
if command['type'] == 'module':
pair = (command, [])
modules.append(pair)
module_name = command.get('name')
if module_name:
module_map[module_name] = pair
elif command['type'] in ('assert_return', 'assert_trap',
'assert_exhaustion'):
if IsValidJSCommand(command):
continue
action = command['action']
module_name = action.get('module')
if module_name:
pair = module_map[module_name]
else:
pair = modules[-1]
pair[1].append(command)
return modules
class ModuleExtender(object):
def __init__(self, wat2wasm, wasm2wat, temp_dir):
self.wat2wasm = wat2wasm
self.wasm2wat = wasm2wat
self.temp_dir = temp_dir
self.lines = []
self.exports = {}
def Extend(self, wasm_path, commands):
wat_path = self._RunWasm2Wat(wasm_path)
with open(wat_path) as wat_file:
wat = wat_file.read()
self.lines = []
self.exports = self._GetExports(wat)
for i, command in enumerate(commands):
self._Command(i, command)
wat = wat[:wat.rindex(')')] + '\n\n'
wat += '\n'.join(self.lines) + ')'
# print wat
with open(wat_path, 'w') as wat_file:
wat_file.write(wat)
return self._RunWat2Wasm(wat_path)
def _Command(self, index, command):
command_type = command['type']
new_field = 'assert_%d' % index
if command_type == 'assert_return':
self.lines.append('(func (export "%s")' % new_field)
self.lines.append('block')
self._Action(command['action'])
for expected in command['expected']:
self._Reinterpret(expected['type'])
if expected['value'] in ('nan:canonical', 'nan:arithmetic'):
self._NanBitmask(expected['value'] == 'nan:canonical', expected['type'])
self._And(expected['type'])
self._QuietNan(expected['type'])
else:
self._Constant(expected)
self._Reinterpret(expected['type'])
self._Eq(expected['type'])
self.lines.extend(['i32.eqz', 'br_if 0'])
self.lines.extend(['return', 'end', 'unreachable', ')'])
elif command_type in ('assert_trap', 'assert_exhaustion'):
self.lines.append('(func (export "%s")' % new_field)
self._Action(command['action'])
self.lines.extend(['br 0', ')'])
else:
raise Error('Unexpected command: %s' % command_type)
# Update command to point to the new exported function.
command['action']['field'] = new_field
command['action']['args'] = []
command['expected'] = []
def _GetExports(self, wat):
result = {}
pattern = r'^\s*\(export \"(.*?)\"\s*\((\w+) (\d+)'
for name, type_, index in re.findall(pattern, wat, re.MULTILINE):
result[UnescapeWasmString(name)] = (type_, index)
return result
def _Action(self, action):
export = self.exports[action['field']]
if action['type'] == 'invoke':
for arg in action['args']:
self._Constant(arg)
self.lines.append('call %s' % export[1])
elif action['type'] == 'get':
self.lines.append('global.get %s' % export[1])
else:
raise Error('Unexpected action: %s' % action['type'])
def _Reinterpret(self, type_):
self.lines.extend({
'i32': [],
'i64': [],
'f32': ['i32.reinterpret_f32'],
'f64': ['i64.reinterpret_f64']
}[type_])
def _Eq(self, type_):
self.lines.append({
'i32': 'i32.eq',
'i64': 'i64.eq',
'f32': 'i32.eq',
'f64': 'i64.eq'
}[type_])
def _And(self, type_):
self.lines.append({
'i32': 'i32.and',
'i64': 'i64.and',
'f32': 'i32.and',
'f64': 'i64.and'
}[type_])
def _NanBitmask(self, canonical, type_):
# When checking for canonical NaNs, the value can differ only in the sign
# bit from +nan. For arithmetic NaNs, the sign bit and the rest of the tag
# can differ as well.
assert(type_ in ('f32', 'f64'))
if not canonical:
return self._QuietNan(type_)
if type_ == 'f32':
line = 'i32.const 0x7fffffff'
else:
line = 'i64.const 0x7fffffffffffffff'
self.lines.append(line)
def _QuietNan(self, type_):
assert(type_ in ('f32', 'f64'))
if type_ == 'f32':
line = 'i32.const 0x%x' % F32_QUIET_NAN
else:
line = 'i64.const 0x%x' % F64_QUIET_NAN
self.lines.append(line)
def _Constant(self, const):
inst = None
type_ = const['type']
value = const['value']
assert value not in ('nan:canonical', 'nan:arithmetic')
if type_ == 'i32':
inst = 'i32.const %s' % value
elif type_ == 'i64':
inst = 'i64.const %s' % value
elif type_ == 'f32':
inst = F32ToWasm(int(value))
elif type_ == 'f64':
inst = F64ToWasm(int(value))
self.lines.append(inst)
def _RunWasm2Wat(self, wasm_path):
wat_path = ChangeDir(ChangeExt(wasm_path, '.wat'), self.temp_dir)
self.wasm2wat.RunWithArgs(wasm_path, '-o', wat_path)
return wat_path
def _RunWat2Wasm(self, wat_path):
wasm_path = ChangeDir(ChangeExt(wat_path, '.wasm'), self.temp_dir)
self.wat2wasm.RunWithArgs(wat_path, '-o', wasm_path)
return wasm_path
class JSWriter(object):
def __init__(self, base_dir, spec_json, out_file):
self.base_dir = base_dir
self.source_filename = os.path.basename(spec_json['source_filename'])
self.commands = spec_json['commands']
self.out_file = out_file
self.module_idx = 0
def Write(self):
for command in self.commands:
self._WriteCommand(command)
def _WriteFileAndLine(self, command):
self.out_file.write('// %s:%d\n' % (self.source_filename, command['line']))
def _WriteCommand(self, command):
command_funcs = {
'module': self._WriteModuleCommand,
'action': self._WriteActionCommand,
'register': self._WriteRegisterCommand,
'assert_malformed': self._WriteAssertModuleCommand,
'assert_invalid': self._WriteAssertModuleCommand,
'assert_unlinkable': self._WriteAssertModuleCommand,
'assert_uninstantiable': self._WriteAssertModuleCommand,
'assert_return': self._WriteAssertReturnCommand,
'assert_trap': self._WriteAssertActionCommand,
'assert_exhaustion': self._WriteAssertActionCommand,
}
func = command_funcs.get(command['type'])
if func is None:
raise Error('Unexpected type: %s' % command['type'])
self._WriteFileAndLine(command)
func(command)
self.out_file.write('\n')
def _ModuleIdxName(self):
return '$%d' % self.module_idx
def _WriteModuleCommand(self, command):
self.module_idx += 1
idx_name = self._ModuleIdxName()
self.out_file.write('let %s = instance("%s");\n' %
(idx_name, self._Module(command['filename'])))
if 'name' in command:
self.out_file.write('let %s = %s;\n' % (command['name'], idx_name))
def _WriteActionCommand(self, command):
self.out_file.write('%s;\n' % self._Action(command['action']))
def _WriteRegisterCommand(self, command):
self.out_file.write('register("%s", %s)\n' % (
command['as'], command.get('name', self._ModuleIdxName())))
def _WriteAssertModuleCommand(self, command):
# Don't bother writing out text modules; they can't be parsed by JS.
if command['module_type'] == 'binary':
self.out_file.write('%s("%s");\n' % (command['type'],
self._Module(command['filename'])))
def _WriteAssertReturnCommand(self, command):
expected = command['expected']
if len(expected) == 1:
self.out_file.write('assert_return(() => %s, %s);\n' %
(self._Action(command['action']),
self._ConstantList(expected)))
elif len(expected) == 0:
self._WriteAssertActionCommand(command)
else:
raise Error('Unexpected result with multiple values: %s' % expected)
def _WriteAssertActionCommand(self, command):
self.out_file.write('%s(() => %s);\n' % (command['type'],
self._Action(command['action'])))
def _Module(self, filename):
with open(os.path.join(self.base_dir, filename), 'rb') as wasm_file:
return ''.join('\\x%02x' % c for c in bytearray(wasm_file.read()))
def _Constant(self, const):
assert IsValidJSConstant(const), 'Invalid JS const: %s' % const
type_ = const['type']
value = const['value']
if type_ in ('f32', 'f64') and value in ('nan:canonical', 'nan:arithmetic'):
return value
if type_ == 'i32':
return I32ToJS(int(value))
elif type_ == 'f32':
return F32ToJS(int(value))
elif type_ == 'f64':
return F64ToJS(int(value))
else:
assert False
def _ConstantList(self, consts):
return ', '.join(self._Constant(const) for const in consts)
def _Action(self, action):
type_ = action['type']
module = action.get('module', self._ModuleIdxName())
field = EscapeJSString(action['field'])
if type_ == 'invoke':
args = '[%s]' % self._ConstantList(action.get('args', []))
return 'call(%s, "%s", %s)' % (module, field, args)
elif type_ == 'get':
return 'get(%s, "%s")' % (module, field)
else:
raise Error('Unexpected action type: %s' % type_)
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-o', '--output', metavar='PATH', help='output file.')
parser.add_argument('-P', '--prefix', metavar='PATH', help='prefix file.',
default=os.path.join(SCRIPT_DIR, 'gen-spec-prefix.js'))
parser.add_argument('--bindir', metavar='PATH',
default=find_exe.GetDefaultPath(),
help='directory to search for all executables.')
parser.add_argument('--temp-dir', metavar='PATH',
help='set the directory that temporary wasm/wat'
' files are written.')
parser.add_argument('--no-error-cmdline',
help='don\'t display the subprocess\'s commandline when'
' an error occurs', dest='error_cmdline',
action='store_false')
parser.add_argument('-p', '--print-cmd',
help='print the commands that are run.',
action='store_true')
parser.add_argument('file', help='spec json file.')
options = parser.parse_args(args)
wat2wasm = Executable(
find_exe.GetWat2WasmExecutable(options.bindir),
error_cmdline=options.error_cmdline)
wasm2wat = Executable(
find_exe.GetWasm2WatExecutable(options.bindir),
error_cmdline=options.error_cmdline)
wat2wasm.verbose = options.print_cmd
wasm2wat.verbose = options.print_cmd
with open(options.file) as json_file:
json_dir = os.path.dirname(options.file)
spec_json = json.load(json_file)
all_commands = spec_json['commands']
# modules is a list of pairs: [(module_command, [assert_command, ...]), ...]
modules = CollectInvalidModuleCommands(all_commands)
with utils.TempDirectory(options.temp_dir, 'gen-spec-js-') as temp_dir:
extender = ModuleExtender(wat2wasm, wasm2wat, temp_dir)
for module_command, assert_commands in modules:
if assert_commands:
wasm_path = os.path.join(json_dir, module_command['filename'])
new_module_filename = extender.Extend(wasm_path, assert_commands)
module_command['filename'] = new_module_filename
output = io.StringIO()
if options.prefix:
with open(options.prefix) as prefix_file:
output.write(prefix_file.read())
output.write('\n')
JSWriter(json_dir, spec_json, output).Write()
if options.output:
out_file = open(options.output, 'w')
else:
out_file = sys.stdout
try:
out_file.write(output.getvalue())
finally:
out_file.close()
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except Error as e:
sys.stderr.write(str(e) + '\n')
sys.exit(1)
|
|
from copy import deepcopy
from dpath.exceptions import InvalidGlob, InvalidKeyName, PathNotFound
from dpath import options
from fnmatch import fnmatchcase
def kvs(node):
'''
Return a (key, value) iterator for the node.
kvs(node) -> (generator -> (key, value))
'''
try:
return iter(node.items())
except AttributeError:
try:
return zip(range(len(node)), node)
except TypeError:
# This can happen in cases where the node isn't leaf(node) == True,
# but also isn't actually iterable. Instead of this being an error
# we will treat this node as if it has no children.
return enumerate([])
def leaf(thing):
'''
Return True if thing is a leaf, otherwise False.
leaf(thing) -> bool
'''
leaves = (bytes, str, int, float, bool, type(None))
return isinstance(thing, leaves)
def leafy(thing):
'''
Same as leaf(thing), but also treats empty sequences and
dictionaries as True.
leafy(thing) -> bool
'''
try:
return leaf(thing) or len(thing) == 0
except TypeError:
# In case thing has no len()
return False
def walk(obj, location=()):
'''
Yield all valid (segments, value) pairs (from a breadth-first
search, right-to-left on sequences).
walk(obj) -> (generator -> (segments, value))
'''
if not leaf(obj):
for k, v in kvs(obj):
length = None
try:
length = len(k)
except:
pass
if length is not None and length == 0 and not options.ALLOW_EMPTY_STRING_KEYS:
raise InvalidKeyName("Empty string keys not allowed without "
"dpath.options.ALLOW_EMPTY_STRING_KEYS=True: "
"{}".format(location + (k,)))
yield ((location + (k,)), v)
for k, v in kvs(obj):
for found in walk(v, location + (k,)):
yield found
def get(obj, segments):
'''
Return the value at the path indicated by segments.
get(obj, segments) -> value
'''
current = obj
for (i, segment) in enumerate(segments):
if leaf(current):
raise PathNotFound('Path: {}[{}]'.format(segments, i))
current = current[segment]
return current
def has(obj, segments):
'''
Return True if the path exists in the obj. Otherwise return False.
has(obj, segments) -> bool
'''
try:
get(obj, segments)
return True
except:
return False
def expand(segments):
'''
Yield a tuple of segments for each possible length of segments.
Starting from the shortest length of segments and increasing by 1.
expand(keys) -> (..., keys[:-2], keys[:-1])
'''
index = 0
for segment in segments:
index += 1
yield segments[:index]
def types(obj, segments):
'''
For each segment produce a tuple of (segment, type(value)).
types(obj, segments) -> ((segment[0], type0), (segment[1], type1), ...)
'''
result = []
for depth in expand(segments):
result.append((depth[-1], type(get(obj, depth))))
return tuple(result)
def leaves(obj):
'''
Yield all leaves as (segment, value) pairs.
leaves(obj) -> (generator -> (segment, value))
'''
return filter(lambda p: leafy(p[1]), walk(obj))
def int_str(segment):
'''
If the segment is an integer, return the string conversion.
Otherwise return the segment unchanged. The conversion uses 'str'.
int_str(segment) -> str
'''
if isinstance(segment, int):
return str(segment)
return segment
class Star(object):
'''
Used to create a global STAR symbol for tracking stars added when
expanding star-star globs.
'''
pass
STAR = Star()
def match(segments, glob):
'''
Return True if the segments match the given glob, otherwise False.
For the purposes of matching, integers are converted to their string
equivalent (via str(segment)). This conversion happens on both the
segments and the glob. This implies you cannot (with this function)
differentiate a list index 0 from a dictionary key '0'.
Star-star segments are a special case in that they will expand to 0
or more star segments and the type will be coerced to match that of
the segment.
A segment is considered to match a glob if the function
fnmatch.fnmatchcase returns True. If fnmatchcase returns False or
throws an exception the result will be False.
match(segments, glob) -> bool
'''
segments = tuple(segments)
glob = tuple(glob)
path_len = len(segments)
glob_len = len(glob)
# Index of the star-star in the glob.
ss = -1
# The star-star normalized glob ('**' has been removed).
ss_glob = glob
if '**' in glob:
ss = glob.index('**')
if '**' in glob[ss + 1:]:
raise InvalidGlob("Invalid glob. Only one '**' is permitted per glob: {}"
"".format(glob))
# Convert '**' segment into multiple '*' segments such that the
# lengths of the path and glob match. '**' also can collapse and
# result in the removal of 1 segment.
if path_len >= glob_len:
# Path and glob have the same number of stars or the glob
# needs more stars (which we add).
more_stars = (STAR,) * (path_len - glob_len + 1)
ss_glob = glob[:ss] + more_stars + glob[ss + 1:]
elif path_len == glob_len - 1:
# Glob has one more segment than the path. Here we remove
# the '**' segment altogether to match the lengths up.
ss_glob = glob[:ss] + glob[ss + 1:]
# If we were successful in matching up the lengths, then we can
# compare them using fnmatch.
if path_len == len(ss_glob):
for (s, g) in zip(map(int_str, segments), map(int_str, ss_glob)):
# Match the stars we added to the glob to the type of the
# segment itself.
if g is STAR:
if isinstance(s, bytes):
g = b'*'
else:
g = u'*'
# Let's see if the glob matches. We will turn any kind of
# exception while attempting to match into a False for the
# match.
try:
if not fnmatchcase(s, g):
return False
except:
return False
# All of the segments matched so we have a complete match.
return True
# Otherwise the lengths aren't the same and we couldn't have a
# match.
return False
def extend(thing, index, value=None):
'''
Extend a sequence like thing such that it contains at least index +
1 many elements. The extension values will be None (default).
extend(thing, int) -> [thing..., None, ...]
'''
try:
expansion = (type(thing)())
# Using this rather than the multiply notation in order to support a
# wider variety of sequence like things.
extra = (index + 1) - len(thing)
for i in range(extra):
expansion += [value]
thing.extend(expansion)
except TypeError:
# We attempted to extend something that doesn't support it. In
# this case we assume thing is actually more like a dictionary
# and doesn't need to be extended.
pass
return thing
def __default_creator__(current, segments, i, hints=()):
'''
Create missing path components. If the segment is an int, then it will
create a list. Otherwise a dictionary is created.
set(obj, segments, value) -> obj
'''
segment = segments[i]
length = len(segments)
if isinstance(segment, int):
extend(current, segment)
# Infer the type from the hints provided.
if i < len(hints):
current[segment] = hints[i][1]()
else:
# Peek at the next segment to determine if we should be
# creating an array for it to access or dictionary.
if i + 1 < length:
segment_next = segments[i + 1]
else:
segment_next = None
if isinstance(segment_next, int):
current[segment] = []
else:
current[segment] = {}
def set(obj, segments, value, creator=__default_creator__, hints=()):
'''
Set the value in obj at the place indicated by segments. If creator is not
None (default __default_creator__), then call the creator function to
create any missing path components.
set(obj, segments, value) -> obj
'''
current = obj
length = len(segments)
# For everything except the last value, walk down the path and
# create if creator is set.
for (i, segment) in enumerate(segments[:-1]):
try:
# Optimistically try to get the next value. This makes the
# code agnostic to whether current is a list or a dict.
# Unfortunately, for our use, 'x in thing' for lists checks
# values, not keys whereas dicts check keys.
current[segment]
except:
if creator is not None:
creator(current, segments, i, hints=hints)
else:
raise
current = current[segment]
if i != length - 1 and leaf(current):
raise PathNotFound('Path: {}[{}]'.format(segments, i))
if isinstance(segments[-1], int):
extend(current, segments[-1])
current[segments[-1]] = value
return obj
def fold(obj, f, acc):
'''
Walk obj applying f to each path and returning accumulator acc.
The function f will be called, for each result in walk(obj):
f(obj, (segments, value), acc)
If the function f returns False (exactly False), then processing
will stop. Otherwise processing will continue with the next value
retrieved from the walk.
fold(obj, f(obj, (segments, value), acc) -> bool, acc) -> acc
'''
for pair in walk(obj):
if f(obj, pair, acc) is False:
break
return acc
def foldm(obj, f, acc):
'''
Same as fold(), but permits mutating obj.
This requires all paths in walk(obj) to be loaded into memory
(whereas fold does not).
foldm(obj, f(obj, (segments, value), acc) -> bool, acc) -> acc
'''
pairs = tuple(walk(obj))
for pair in pairs:
(segments, value) = pair
if f(obj, pair, acc) is False:
break
return acc
def view(obj, glob):
'''
Return a view of the object where the glob matches. A view retains
the same form as the obj, but is limited to only the paths that
matched. Views are new objects (a deepcopy of the matching values).
view(obj, glob) -> obj'
'''
def f(obj, pair, result):
(segments, value) = pair
if match(segments, glob):
if not has(result, segments):
set(result, segments, deepcopy(value), hints=types(obj, segments))
return fold(obj, f, type(obj)())
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 Big Datext Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from luigi.target import Target
class MongoTarget(Target):
""" Target for a resource in MongoDB """
def __init__(self, mongo_client, index, collection):
"""
:param mongo_client: MongoClient instance
:type mongo_client: MongoClient
:param index: database index
:type index: str
:param collection: index collection
:type collection: str
"""
self._mongo_client = mongo_client
self._index = index
self._collection = collection
def get_collection(self):
"""
Return targeted mongo collection to query on
"""
db_mongo = self._mongo_client[self._index]
return db_mongo[self._collection]
def get_index(self):
"""
Return targeted mongo index to query on
"""
return self._mongo_client[self._index]
class MongoCellTarget(MongoTarget):
""" Target for a ressource in a specific field from a MongoDB document """
def __init__(self, mongo_client, index, collection, document_id, path):
"""
:param document_id: targeted mongo document
:type document_id: str
:param path: full path to the targeted field in the mongo document
:type path: str
"""
super(MongoCellTarget, self).__init__(mongo_client, index, collection)
self._document_id = document_id
self._path = path
def exists(self):
"""
Test if target has been run
Target is considered run if the targeted field exists
"""
return self.read() is not None
def read(self):
"""
Read the target value
Use $project aggregate operator in order to support nested objects
"""
result = self.get_collection().aggregate([
{'$match': {'_id': self._document_id}},
{'$project': {'_value': '$' + self._path, '_id': False}}
])
for doc in result:
if '_value' not in doc:
break
return doc['_value']
def write(self, value):
"""
Write value to the target
"""
self.get_collection().update_one(
{'_id': self._document_id},
{'$set': {self._path: value}},
upsert=True
)
class MongoRangeTarget(MongoTarget):
""" Target for a level 0 field in a range of documents """
def __init__(self, mongo_client, index, collection, document_ids, field):
"""
:param document_ids: targeted mongo documents
:type documents_ids: list of str
:param field: targeted field in documents
:type field: str
"""
super(MongoRangeTarget, self).__init__(mongo_client, index, collection)
self._document_ids = document_ids
self._field = field
def exists(self):
"""
Test if target has been run
Target is considered run if the targeted field exists in ALL documents
"""
return not self.get_empty_ids()
def read(self):
"""
Read the targets value
"""
cursor = self.get_collection().find(
{
'_id': {'$in': self._document_ids},
self._field: {'$exists': True}
},
{self._field: True}
)
return {doc['_id']: doc[self._field] for doc in cursor}
def write(self, values):
"""
Write values to the targeted documents
Values need to be a dict as : {document_id: value}
"""
# Insert only for docs targeted by the target
filtered = {_id: value for _id, value in values.items() if _id in self._document_ids}
if not filtered:
return
bulk = self.get_collection().initialize_ordered_bulk_op()
for _id, value in filtered.items():
bulk.find({'_id': _id}).upsert() \
.update_one({'$set': {self._field: value}})
bulk.execute()
def get_empty_ids(self):
"""
Get documents id with missing targeted field
"""
cursor = self.get_collection().find(
{
'_id': {'$in': self._document_ids},
self._field: {'$exists': True}
},
{'_id': True}
)
return set(self._document_ids) - set([doc['_id'] for doc in cursor])
class MongoCollectionTarget(MongoTarget):
""" Target for existing collection """
def __init__(self, mongo_client, index, collection):
super(MongoCollectionTarget, self).__init__(mongo_client, index, collection)
def exists(self):
"""
Test if target has been run
Target is considered run if the targeted collection exists in the database
"""
return self.read()
def read(self):
"""
Return if the target collection exists in the database
"""
return self._collection in self.get_index().collection_names()
class MongoCountTarget(MongoTarget):
""" Target for documents count """
def __init__(self, mongo_client, index, collection, target_count):
"""
:param target_count: Value of the desired item count in the target
:type field: int
"""
super(MongoCountTarget, self).__init__(mongo_client, index, collection)
self._target_count = target_count
def exists(self):
"""
Test if the target has been run
Target is considered run if the number of items in the target matches value of self._target_count
"""
return self.read() == self._target_count
def read(self):
"""
Returns the count number of the target, -1 if the collection doesn't exist
Using the aggregate method to avoid inaccurate count if using a sharded cluster
https://docs.mongodb.com/manual/reference/method/db.collection.count/#behavior
"""
return next(self.get_collection().aggregate([{'$group': {'_id': None, 'count': {'$sum': 1}}}])).get('count')
|
|
from django.db.models import Case, CharField, Q, Value, When
from guardian.shortcuts import get_objects_for_user
from rest_framework.exceptions import ValidationError
from rest_framework import generics
from rest_framework import permissions as drf_permissions
from rest_framework.exceptions import NotAuthenticated, NotFound
from api.base import permissions as base_permissions
from api.base.exceptions import InvalidFilterValue, InvalidFilterOperator, Conflict
from api.base.filters import PreprintFilterMixin, ListFilterMixin
from api.base.views import JSONAPIBaseView
from api.base.pagination import MaxSizePagination, IncreasedPageSizePagination
from api.base.utils import get_object_or_error, get_user_auth, is_truthy
from api.licenses.views import LicenseList
from api.collections.permissions import CanSubmitToCollectionOrPublic
from api.collections.serializers import CollectedMetaSerializer, CollectedMetaCreateSerializer
from api.requests.serializers import PreprintRequestSerializer
from api.preprints.permissions import PreprintPublishedOrAdmin
from api.preprints.serializers import PreprintSerializer
from api.providers.permissions import CanAddModerator, CanDeleteModerator, CanUpdateModerator, CanSetUpProvider, GROUP_FORMAT, GroupHelper, MustBeModerator, PERMISSIONS
from api.providers.serializers import CollectionProviderSerializer, PreprintProviderSerializer, ModeratorSerializer
from api.taxonomies.serializers import TaxonomySerializer
from api.taxonomies.utils import optimize_subject_query
from framework.auth.oauth_scopes import CoreScopes
from osf.models import AbstractNode, CollectionProvider, CollectedGuidMetadata, NodeLicense, OSFUser, Subject, PreprintRequest, PreprintProvider, WhitelistedSHAREPreprintProvider
from osf.utils.workflows import RequestTypes
class GenericProviderList(JSONAPIBaseView, generics.ListAPIView, ListFilterMixin):
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.ALWAYS_PUBLIC]
required_write_scopes = [CoreScopes.NULL]
pagination_class = MaxSizePagination
ordering = ('name', )
def get_default_queryset(self):
return self.model_class.objects.all()
# overrides ListAPIView
def get_queryset(self):
return self.get_queryset_from_request()
class CollectionProviderList(GenericProviderList):
model_class = CollectionProvider
serializer_class = CollectionProviderSerializer
view_category = 'collection-providers'
view_name = 'collection-providers-list'
class PreprintProviderList(GenericProviderList):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/preprint_provider_list).
"""
model_class = PreprintProvider
serializer_class = PreprintProviderSerializer
view_category = 'preprint-providers'
view_name = 'preprint-providers-list'
def get_renderer_context(self):
context = super(PreprintProviderList, self).get_renderer_context()
context['meta'] = {
'whitelisted_providers': WhitelistedSHAREPreprintProvider.objects.all().values_list('provider_name', flat=True)
}
return context
def build_query_from_field(self, field_name, operation):
if field_name == 'permissions':
if operation['op'] != 'eq':
raise InvalidFilterOperator(value=operation['op'], valid_operators=['eq'])
auth = get_user_auth(self.request)
auth_user = getattr(auth, 'user', None)
if not auth_user:
raise NotAuthenticated()
value = operation['value'].lstrip('[').rstrip(']')
permissions = [v.strip() for v in value.split(',')]
if any(p not in PERMISSIONS for p in permissions):
valid_permissions = ', '.join(PERMISSIONS.keys())
raise InvalidFilterValue('Invalid permission! Valid values are: {}'.format(valid_permissions))
return Q(id__in=get_objects_for_user(auth_user, permissions, PreprintProvider, any_perm=True))
return super(PreprintProviderList, self).build_query_from_field(field_name, operation)
class GenericProviderDetail(JSONAPIBaseView, generics.RetrieveAPIView):
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.ALWAYS_PUBLIC]
required_write_scopes = [CoreScopes.PROVIDERS_WRITE]
def get_object(self):
provider = get_object_or_error(self.model_class, self.kwargs['provider_id'], self.request, display_name=self.model_class.__name__)
self.check_object_permissions(self.request, provider)
return provider
class CollectionProviderDetail(GenericProviderDetail):
model_class = CollectionProvider
serializer_class = CollectionProviderSerializer
view_category = 'collection-providers'
view_name = 'collection-provider-detail'
class PreprintProviderDetail(GenericProviderDetail, generics.UpdateAPIView):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/preprint_provider_detail).
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
CanSetUpProvider,
)
model_class = PreprintProvider
serializer_class = PreprintProviderSerializer
view_category = 'preprint-providers'
view_name = 'preprint-provider-detail'
def perform_update(self, serializer):
if serializer.instance.is_reviewed:
raise Conflict('Reviews settings may be set only once. Contact [email protected] if you need to update them.')
super(PreprintProviderDetail, self).perform_update(serializer)
class GenericProviderTaxonomies(JSONAPIBaseView, generics.ListAPIView):
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.ALWAYS_PUBLIC]
required_write_scopes = [CoreScopes.NULL]
serializer_class = TaxonomySerializer
pagination_class = IncreasedPageSizePagination
view_name = 'taxonomy-list'
ordering = ('-id',)
def is_valid_subject(self, allows_children, allowed_parents, sub):
# TODO: Delet this when all PreprintProviders have a mapping
if sub._id in allowed_parents:
return True
if sub.parent:
if sub.parent._id in allows_children:
return True
if sub.parent.parent:
if sub.parent.parent._id in allows_children:
return True
return False
def get_queryset(self):
parent = self.request.query_params.get('filter[parents]', None) or self.request.query_params.get('filter[parent]', None)
provider = get_object_or_error(self._model_class, self.kwargs['provider_id'], self.request, display_name=self._model_class.__name__)
if parent:
if parent == 'null':
return provider.top_level_subjects
if provider.subjects.exists():
return optimize_subject_query(provider.subjects.filter(parent___id=parent))
else:
# TODO: Delet this when all PreprintProviders have a mapping
# Calculate this here to only have to do it once.
allowed_parents = [id_ for sublist in provider.subjects_acceptable for id_ in sublist[0]]
allows_children = [subs[0][-1] for subs in provider.subjects_acceptable if subs[1]]
return [sub for sub in optimize_subject_query(Subject.objects.filter(parent___id=parent)) if provider.subjects_acceptable == [] or self.is_valid_subject(allows_children=allows_children, allowed_parents=allowed_parents, sub=sub)]
return optimize_subject_query(provider.all_subjects)
class CollectionProviderTaxonomies(GenericProviderTaxonomies):
view_category = 'collection-providers'
_model_class = CollectionProvider # Not actually the model being serialized, privatize to avoid issues
class PreprintProviderTaxonomies(GenericProviderTaxonomies):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/preprint_provider_taxonomies_list).
"""
view_category = 'preprint-providers'
_model_class = PreprintProvider # Not actually the model being serialized, privatize to avoid issues
class GenericProviderHighlightedSubjectList(JSONAPIBaseView, generics.ListAPIView):
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
view_name = 'highlighted-taxonomy-list'
required_read_scopes = [CoreScopes.ALWAYS_PUBLIC]
required_write_scopes = [CoreScopes.NULL]
serializer_class = TaxonomySerializer
def get_queryset(self):
provider = get_object_or_error(self._model_class, self.kwargs['provider_id'], self.request, display_name=self._model_class.__name__)
return optimize_subject_query(Subject.objects.filter(id__in=[s.id for s in provider.highlighted_subjects]).order_by('text'))
class CollectionProviderHighlightedSubjectList(GenericProviderHighlightedSubjectList):
view_category = 'collection-providers'
_model_class = CollectionProvider
class PreprintProviderHighlightedSubjectList(GenericProviderHighlightedSubjectList):
view_category = 'preprint-providers'
_model_class = PreprintProvider
class GenericProviderLicenseList(LicenseList):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/preprint_provider_licenses_list)
"""
ordering = () # TODO: should be ordered once the frontend for selecting default licenses no longer relies on order
def get_default_queryset(self):
return NodeLicense.objects.preprint_licenses()
def get_queryset(self):
provider = get_object_or_error(self._model_class, self.kwargs['provider_id'], self.request, display_name=self._model_class.__name__)
if not provider.licenses_acceptable.count():
if not provider.default_license:
return super(GenericProviderLicenseList, self).get_queryset()
return [provider.default_license] + [license for license in super(GenericProviderLicenseList, self).get_queryset() if license != provider.default_license]
if not provider.default_license:
return provider.licenses_acceptable.get_queryset()
return [provider.default_license] + [license for license in provider.licenses_acceptable.all() if license != provider.default_license]
class CollectionProviderLicenseList(GenericProviderLicenseList):
view_category = 'collection-providers'
_model_class = CollectionProvider
class PreprintProviderLicenseList(GenericProviderLicenseList):
view_category = 'preprint-providers'
_model_class = PreprintProvider
class PreprintProviderPreprintList(JSONAPIBaseView, generics.ListAPIView, PreprintFilterMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/preprint_providers_preprints_list).
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
PreprintPublishedOrAdmin,
)
ordering = ('-created')
serializer_class = PreprintSerializer
model_class = AbstractNode
required_read_scopes = [CoreScopes.NODE_PREPRINTS_READ]
required_write_scopes = [CoreScopes.NULL]
view_category = 'preprint-providers'
view_name = 'preprints-list'
def get_default_queryset(self):
auth = get_user_auth(self.request)
auth_user = getattr(auth, 'user', None)
provider = get_object_or_error(PreprintProvider, self.kwargs['provider_id'], self.request, display_name='PreprintProvider')
# Permissions on the list objects are handled by the query
return self.preprints_queryset(provider.preprint_services.all(), auth_user)
# overrides ListAPIView
def get_queryset(self):
return self.get_queryset_from_request()
# overrides APIView
def get_renderer_context(self):
context = super(PreprintProviderPreprintList, self).get_renderer_context()
show_counts = is_truthy(self.request.query_params.get('meta[reviews_state_counts]', False))
if show_counts:
# TODO don't duplicate the above
auth = get_user_auth(self.request)
auth_user = getattr(auth, 'user', None)
provider = get_object_or_error(PreprintProvider, self.kwargs['provider_id'], self.request, display_name='PreprintProvider')
if auth_user and auth_user.has_perm('view_submissions', provider):
context['meta'] = {
'reviews_state_counts': provider.get_reviewable_state_counts(),
}
return context
class CollectionProviderSubmissionList(JSONAPIBaseView, generics.ListCreateAPIView, ListFilterMixin):
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
CanSubmitToCollectionOrPublic,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.COLLECTED_META_READ]
required_write_scopes = [CoreScopes.COLLECTED_META_WRITE]
model_class = CollectedGuidMetadata
serializer_class = CollectedMetaSerializer
view_category = 'collected-metadata'
view_name = 'provider-collected-metadata-list'
def get_serializer_class(self):
if self.request.method == 'POST':
return CollectedMetaCreateSerializer
else:
return CollectedMetaSerializer
def get_default_queryset(self):
provider = get_object_or_error(CollectionProvider, self.kwargs['provider_id'], self.request, display_name='CollectionProvider')
if provider and provider.primary_collection:
return provider.primary_collection.collectedguidmetadata_set.all()
return CollectedGuidMetadata.objects.none()
def get_queryset(self):
return self.get_queryset_from_request()
def perform_create(self, serializer):
user = self.request.user
provider = get_object_or_error(CollectionProvider, self.kwargs['provider_id'], self.request, display_name='CollectionProvider')
if provider and provider.primary_collection:
return serializer.save(creator=user, collection=provider.primary_collection)
raise ValidationError('Provider {} has no primary collection to submit to.'.format(provider.name))
class PreprintProviderWithdrawRequestList(JSONAPIBaseView, generics.ListAPIView, ListFilterMixin):
permission_classes = (
drf_permissions.IsAuthenticated,
base_permissions.TokenHasScope,
MustBeModerator,
)
view_category = 'requests'
view_name = 'provider-withdrawal-request-list'
required_read_scopes = [CoreScopes.PREPRINT_REQUESTS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = PreprintRequestSerializer
def get_provider(self):
# used in perms class
return get_object_or_error(PreprintProvider, self.kwargs['provider_id'], self.request, display_name='PreprintProvider')
def get_default_queryset(self):
return PreprintRequest.objects.filter(request_type=RequestTypes.WITHDRAWAL.value, target__provider_id=self.get_provider().id)
def get_queryset(self):
return self.get_queryset_from_request()
class ModeratorMixin(object):
model_class = OSFUser
def get_provider(self):
return get_object_or_error(PreprintProvider, self.kwargs['provider_id'], self.request, display_name='PreprintProvider')
def get_serializer_context(self, *args, **kwargs):
ctx = super(ModeratorMixin, self).get_serializer_context(*args, **kwargs)
ctx.update({'provider': self.get_provider()})
return ctx
class PreprintProviderModeratorsList(ModeratorMixin, JSONAPIBaseView, generics.ListCreateAPIView, ListFilterMixin):
permission_classes = (
drf_permissions.IsAuthenticated,
base_permissions.TokenHasScope,
MustBeModerator,
CanAddModerator,
)
view_category = 'moderators'
view_name = 'provider-moderator-list'
required_read_scopes = [CoreScopes.MODERATORS_READ]
required_write_scopes = [CoreScopes.MODERATORS_WRITE]
serializer_class = ModeratorSerializer
def get_default_queryset(self):
provider = self.get_provider()
group_helper = GroupHelper(provider)
admin_group = group_helper.get_group('admin')
mod_group = group_helper.get_group('moderator')
return (admin_group.user_set.all() | mod_group.user_set.all()).annotate(permission_group=Case(
When(groups=admin_group, then=Value('admin')),
default=Value('moderator'),
output_field=CharField()
)).order_by('fullname')
def get_queryset(self):
return self.get_queryset_from_request()
class PreprintProviderModeratorsDetail(ModeratorMixin, JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView):
permission_classes = (
drf_permissions.IsAuthenticated,
base_permissions.TokenHasScope,
MustBeModerator,
CanUpdateModerator,
CanDeleteModerator,
)
view_category = 'moderators'
view_name = 'provider-moderator-detail'
required_read_scopes = [CoreScopes.MODERATORS_READ]
required_write_scopes = [CoreScopes.MODERATORS_WRITE]
serializer_class = ModeratorSerializer
def get_object(self):
provider = self.get_provider()
user = get_object_or_error(OSFUser, self.kwargs['moderator_id'], self.request, display_name='OSFUser')
try:
perm_group = user.groups.filter(name__contains=GROUP_FORMAT.format(provider_id=provider._id, group='')).order_by('name').first().name.split('_')[-1]
except AttributeError:
# Group doesn't exist -- users not moderator
raise NotFound
setattr(user, 'permission_group', perm_group)
return user
def perform_destroy(self, instance):
try:
self.get_provider().remove_from_group(instance, instance.permission_group)
except ValueError as e:
raise ValidationError(e.message)
|
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.webdriver.common.by import By
from selenium.common.exceptions import (
InvalidElementStateException,
InvalidSelectorException,
NoSuchElementException,
NoSuchWindowException,
WebDriverException)
class TestDriverElementFinding(object):
# By.id positive
def test_Should_Be_Able_To_Find_ASingle_Element_By_Id(self, driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.ID, "linkId")
assert element.get_attribute("id") == "linkId"
def test_Should_Be_Able_To_Find_ASingle_Element_By_Numeric_Id(self, driver, pages):
pages.load("nestedElements.html")
element = driver.find_element(By.ID, "2")
assert element.get_attribute("id") == "2"
def test_should_be_able_to_find_an_element_with_css_escape(self, driver, pages):
pages.load("idElements.html")
element = driver.find_element(By.ID, "with.dots")
assert element.get_attribute("id") == "with.dots"
def test_Should_Be_Able_To_Find_Multiple_Elements_By_Id(self, driver, pages):
pages.load("nestedElements.html")
elements = driver.find_elements(By.ID, "test_id")
assert len(elements) == 2
def test_Should_Be_Able_To_Find_Multiple_Elements_By_Numeric_Id(self, driver, pages):
pages.load("nestedElements.html")
elements = driver.find_elements(By.ID, "2")
assert len(elements) == 8
# By.id negative
def test_Should_Not_Be_Able_To_Locate_By_Id_ASingle_Element_That_Does_Not_Exist(self, driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.ID, "non_Existent_Button")
def test_Should_Not_Be_Able_To_Locate_By_Id_Multiple_Elements_That_Do_Not_Exist(self, driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.ID, "non_Existent_Button")
assert len(elements) == 0
@pytest.mark.xfail_phantomjs(raises=NoSuchWindowException)
def test_Finding_ASingle_Element_By_Empty_Id_Should_Throw(self, driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.ID, "")
@pytest.mark.xfail_phantomjs(raises=NoSuchElementException)
def test_Finding_Multiple_Elements_By_Empty_Id_Should_Return_Empty_List(self, driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.ID, "")
assert len(elements) == 0
def test_Finding_ASingle_Element_By_Id_With_Space_Should_Throw(self, driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.ID, "nonexistent button")
def test_Finding_Multiple_Elements_By_Id_With_Space_Should_Return_Empty_List(self, driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.ID, "nonexistent button")
assert len(elements) == 0
# By.name positive
def test_Should_Be_Able_To_Find_ASingle_Element_By_Name(self, driver, pages):
pages.load("formPage.html")
element = driver.find_element(By.NAME, "checky")
assert element.get_attribute("value") == "furrfu"
def test_Should_Be_Able_To_Find_Multiple_Elements_By_Name(self, driver, pages):
pages.load("nestedElements.html")
elements = driver.find_elements(By.NAME, "checky")
assert len(elements) > 1
def test_Should_Be_Able_To_Find_An_Element_That_Does_Not_Support_The_Name_Property(self, driver, pages):
pages.load("nestedElements.html")
element = driver.find_element(By.NAME, "div1")
assert element.get_attribute("name") == "div1"
# By.name negative
def test_Should_Not_Be_Able_To_Locate_By_Name_ASingle_Element_That_Does_Not_Exist(self, driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.NAME, "non_Existent_Button")
def test_Should_Not_Be_Able_To_Locate_By_Name_Multiple_Elements_That_Do_Not_Exist(self, driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.NAME, "non_Existent_Button")
assert len(elements) == 0
@pytest.mark.xfail_phantomjs(raises=NoSuchWindowException)
def test_Finding_ASingle_Element_By_Empty_Name_Should_Throw(self, driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.NAME, "")
@pytest.mark.xfail_phantomjs(raises=NoSuchElementException)
def test_Finding_Multiple_Elements_By_Empty_Name_Should_Return_Empty_List(self, driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.NAME, "")
assert len(elements) == 0
def test_Finding_ASingle_Element_By_Name_With_Space_Should_Throw(self, driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.NAME, "nonexistent button")
def test_Finding_Multiple_Elements_By_Name_With_Space_Should_Return_Empty_List(self, driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.NAME, "nonexistent button")
assert len(elements) == 0
# By.tag_Name positive
def test_Should_Be_Able_To_Find_ASingle_Element_By_Tag_Name(self, driver, pages):
pages.load("formPage.html")
element = driver.find_element(By.TAG_NAME, "input")
assert element.tag_name.lower() == "input"
def test_Should_Be_Able_To_Find_Multiple_Elements_By_Tag_Name(self, driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.TAG_NAME, "input")
assert len(elements) > 1
# By.tag_Name negative
def test_Should_Not_Be_Able_To_Locate_By_Tag_Name_ASingle_Element_That_Does_Not_Exist(self, driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.TAG_NAME, "non_Existent_Button")
def test_Should_Not_Be_Able_To_Locate_By_Tag_Name_Multiple_Elements_That_Do_Not_Exist(self, driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.TAG_NAME, "non_Existent_Button")
assert len(elements) == 0
@pytest.mark.xfail_chrome(
reason='https://bugs.chromium.org/p/chromedriver/issues/detail?id=1541')
@pytest.mark.xfail_phantomjs
def test_Finding_ASingle_Element_By_Empty_Tag_Name_Should_Throw(self, driver, pages):
pages.load("formPage.html")
with pytest.raises(InvalidSelectorException):
driver.find_element(By.TAG_NAME, "")
@pytest.mark.xfail_chrome(
reason='https://bugs.chromium.org/p/chromedriver/issues/detail?id=1541')
@pytest.mark.xfail_phantomjs
def test_Finding_Multiple_Elements_By_Empty_Tag_Name_Should_Throw(self, driver, pages):
pages.load("formPage.html")
with pytest.raises(InvalidSelectorException):
driver.find_elements(By.TAG_NAME, "")
def test_Finding_ASingle_Element_By_Tag_Name_With_Space_Should_Throw(self, driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.TAG_NAME, "nonexistent button")
def test_Finding_Multiple_Elements_By_Tag_Name_With_Space_Should_Return_Empty_List(self, driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.TAG_NAME, "nonexistent button")
assert len(elements) == 0
# By.class_Name positive
def test_Should_Be_Able_To_Find_ASingle_Element_By_Class(self, driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.CLASS_NAME, "extraDiv")
assert "Another div starts here." in element.text
def test_Should_Be_Able_To_Find_Multiple_Elements_By_Class_Name(self, driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.CLASS_NAME, "nameC")
assert len(elements) > 1
def test_Should_Find_Element_By_Class_When_It_Is_The_First_Name_Among_Many(self, driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.CLASS_NAME, "nameA")
assert element.text == "An H2 title"
def test_Should_Find_Element_By_Class_When_It_Is_The_Last_Name_Among_Many(self, driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.CLASS_NAME, "nameC")
assert element.text == "An H2 title"
def test_Should_Find_Element_By_Class_When_It_Is_In_The_Middle_Among_Many(self, driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.CLASS_NAME, "nameBnoise")
assert element.text == "An H2 title"
def test_Should_Find_Element_By_Class_When_Its_Name_Is_Surrounded_By_Whitespace(self, driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.CLASS_NAME, "spaceAround")
assert element.text == "Spaced out"
def test_Should_Find_Elements_By_Class_When_Its_Name_Is_Surrounded_By_Whitespace(self, driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.CLASS_NAME, "spaceAround")
assert len(elements) == 1
assert elements[0].text == "Spaced out"
# By.class_Name negative
def test_Should_Not_Find_Element_By_Class_When_The_Name_Queried_Is_Shorter_Than_Candidate_Name(self, driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.CLASS_NAME, "name_B")
@pytest.mark.xfail_phantomjs(raises=NoSuchWindowException)
def test_Finding_ASingle_Element_By_Empty_Class_Name_Should_Throw(self, driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.CLASS_NAME, "")
@pytest.mark.xfail_phantomjs(raises=NoSuchElementException)
def test_Finding_Multiple_Elements_By_Empty_Class_Name_Should_Throw(self, driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_elements(By.CLASS_NAME, "")
@pytest.mark.xfail_phantomjs(raises=WebDriverException)
def test_Finding_ASingle_Element_By_Compound_Class_Name_Should_Throw(self, driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.CLASS_NAME, "a b")
@pytest.mark.xfail_phantomjs(raises=InvalidElementStateException)
def test_Finding_ASingle_Element_By_Invalid_Class_Name_Should_Throw(self, driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.CLASS_NAME, "!@#$%^&*")
@pytest.mark.xfail_phantomjs(raises=InvalidElementStateException)
def test_Finding_Multiple_Elements_By_Invalid_Class_Name_Should_Throw(self, driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_elements(By.CLASS_NAME, "!@#$%^&*")
# By.xpath positive
def test_Should_Be_Able_To_Find_ASingle_Element_By_XPath(self, driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.XPATH, "//h1")
assert element.text == "XHTML Might Be The Future"
def test_Should_Be_Able_To_Find_Multiple_Elements_By_XPath(self, driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.XPATH, "//div")
assert len(elements) == 13
def test_Should_Be_Able_To_Find_Many_Elements_Repeatedly_By_XPath(self, driver, pages):
pages.load("xhtmlTest.html")
xpath = "//node()[contains(@id,'id')]"
assert len(driver.find_elements(By.XPATH, xpath)) == 3
xpath = "//node()[contains(@id,'nope')]"
assert len(driver.find_elements(By.XPATH, xpath)) == 0
def test_Should_Be_Able_To_Identify_Elements_By_Class(self, driver, pages):
pages.load("xhtmlTest.html")
header = driver.find_element(By.XPATH, "//h1[@class='header']")
assert header.text == "XHTML Might Be The Future"
def test_Should_Be_Able_To_Find_An_Element_By_XPath_With_Multiple_Attributes(self, driver, pages):
pages.load("formPage.html")
element = driver.find_element(
By.XPATH, "//form[@name='optional']/input[@type='submit' and @value='Click!']")
assert element.tag_name.lower() == "input"
assert element.get_attribute("value") == "Click!"
def test_Finding_ALink_By_Xpath_Should_Locate_An_Element_With_The_Given_Text(self, driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.XPATH, "//a[text()='click me']")
assert element.text == "click me"
def test_Finding_ALink_By_Xpath_Using_Contains_Keyword_Should_Work(self, driver, pages):
pages.load("nestedElements.html")
element = driver.find_element(By.XPATH, "//a[contains(.,'hello world')]")
assert "hello world" in element.text
@pytest.mark.xfail_chrome(raises=InvalidSelectorException)
@pytest.mark.xfail_firefox(raises=InvalidSelectorException)
@pytest.mark.xfail_marionette(raises=WebDriverException)
@pytest.mark.xfail_phantomjs(raises=InvalidSelectorException)
@pytest.mark.xfail_safari(raises=NoSuchElementException)
def test_Should_Be_Able_To_Find_Element_By_XPath_With_Namespace(self, driver, pages):
pages.load("svgPage.html")
element = driver.find_element(By.XPATH, "//svg:svg//svg:text")
assert element.text == "Test Chart"
def test_Should_Be_Able_To_Find_Element_By_XPath_In_Xml_Document(self, driver, pages):
pages.load("simple.xml")
element = driver.find_element(By.XPATH, "//foo")
assert "baz" in element.text
# By.xpath negative
def test_Should_Throw_An_Exception_When_There_Is_No_Link_To_Click(self, driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.XPATH, "//a[@id='Not here']")
def test_Should_Throw_InvalidSelectorException_When_XPath_Is_Syntactically_Invalid_In_Driver_Find_Element(self, driver, pages):
pages.load("formPage.html")
with pytest.raises(InvalidSelectorException):
driver.find_element(By.XPATH, "this][isnot][valid")
def test_Should_Throw_InvalidSelectorException_When_XPath_Is_Syntactically_Invalid_In_Driver_Find_Elements(self, driver, pages):
pages.load("formPage.html")
with pytest.raises(InvalidSelectorException):
driver.find_elements(By.XPATH, "this][isnot][valid")
def test_Should_Throw_InvalidSelectorException_When_XPath_Is_Syntactically_Invalid_In_Element_Find_Element(self, driver, pages):
pages.load("formPage.html")
body = driver.find_element(By.TAG_NAME, "body")
with pytest.raises(InvalidSelectorException):
body.find_element(By.XPATH, "this][isnot][valid")
def test_Should_Throw_InvalidSelectorException_When_XPath_Is_Syntactically_Invalid_In_Element_Find_Elements(self, driver, pages):
pages.load("formPage.html")
body = driver.find_element(By.TAG_NAME, "body")
with pytest.raises(InvalidSelectorException):
body.find_elements(By.XPATH, "this][isnot][valid")
def test_Should_Throw_InvalidSelectorException_When_XPath_Returns_Wrong_Type_In_Driver_Find_Element(self, driver, pages):
pages.load("formPage.html")
with pytest.raises(InvalidSelectorException):
driver.find_element(By.XPATH, "count(//input)")
def test_Should_Throw_InvalidSelectorException_When_XPath_Returns_Wrong_Type_In_Driver_Find_Elements(self, driver, pages):
pages.load("formPage.html")
with pytest.raises(InvalidSelectorException):
driver.find_elements(By.XPATH, "count(//input)")
def test_Should_Throw_InvalidSelectorException_When_XPath_Returns_Wrong_Type_In_Element_Find_Element(self, driver, pages):
pages.load("formPage.html")
body = driver.find_element(By.TAG_NAME, "body")
with pytest.raises(InvalidSelectorException):
body.find_element(By.XPATH, "count(//input)")
def test_Should_Throw_InvalidSelectorException_When_XPath_Returns_Wrong_Type_In_Element_Find_Elements(self, driver, pages):
pages.load("formPage.html")
body = driver.find_element(By.TAG_NAME, "body")
with pytest.raises(InvalidSelectorException):
body.find_elements(By.XPATH, "count(//input)")
# By.css_Selector positive
def test_Should_Be_Able_To_Find_ASingle_Element_By_Css_Selector(self, driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.CSS_SELECTOR, "div.content")
assert element.tag_name.lower() == "div"
assert element.get_attribute("class") == "content"
def test_Should_Be_Able_To_Find_Multiple_Elements_By_Css_Selector(self, driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.CSS_SELECTOR, "p")
assert len(elements) > 1
def test_Should_Be_Able_To_Find_ASingle_Element_By_Compound_Css_Selector(self, driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.CSS_SELECTOR, "div.extraDiv, div.content")
assert element.tag_name.lower() == "div"
assert element.get_attribute("class") == "content"
def test_Should_Be_Able_To_Find_Multiple_Elements_By_Compound_Css_Selector(self, driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.CSS_SELECTOR, "div.extraDiv, div.content")
assert len(elements) > 1
assert elements[0].get_attribute("class") == "content"
assert elements[1].get_attribute("class") == "extraDiv"
def test_Should_Be_Able_To_Find_An_Element_By_Boolean_Attribute_Using_Css_Selector(self, driver, pages):
pages.load("locators_tests/boolean_attribute_selected.html")
element = driver.find_element(By.CSS_SELECTOR, "option[selected='selected']")
assert element.get_attribute("value") == "two"
def test_Should_Be_Able_To_Find_An_Element_By_Boolean_Attribute_Using_Short_Css_Selector(self, driver, pages):
pages.load("locators_tests/boolean_attribute_selected.html")
element = driver.find_element(By.CSS_SELECTOR, "option[selected]")
assert element.get_attribute("value") == "two"
def test_Should_Be_Able_To_Find_An_Element_By_Boolean_Attribute_Using_Short_Css_Selector_On_Html4Page(self, driver, pages):
pages.load("locators_tests/boolean_attribute_selected_html4.html")
element = driver.find_element(By.CSS_SELECTOR, "option[selected]")
assert element.get_attribute("value") == "two"
# By.css_Selector negative
def test_Should_Not_Find_Element_By_Css_Selector_When_There_Is_No_Such_Element(self, driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.CSS_SELECTOR, ".there-is-no-such-class")
def test_Should_Not_Find_Elements_By_Css_Selector_When_There_Is_No_Such_Element(self, driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.CSS_SELECTOR, ".there-is-no-such-class")
assert len(elements) == 0
@pytest.mark.xfail_phantomjs(raises=NoSuchWindowException)
def test_Finding_ASingle_Element_By_Empty_Css_Selector_Should_Throw(self, driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.CSS_SELECTOR, "")
def test_Finding_Multiple_Elements_By_Empty_Css_Selector_Should_Throw(self, driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_elements(By.CSS_SELECTOR, "")
@pytest.mark.xfail_phantomjs(raises=InvalidElementStateException)
def test_Finding_ASingle_Element_By_Invalid_Css_Selector_Should_Throw(self, driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.CSS_SELECTOR, "//a/b/c[@id='1']")
@pytest.mark.xfail_phantomjs(raises=InvalidElementStateException)
def test_Finding_Multiple_Elements_By_Invalid_Css_Selector_Should_Throw(self, driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_elements(By.CSS_SELECTOR, "//a/b/c[@id='1']")
# By.link_Text positive
def test_Should_Be_Able_To_Find_ALink_By_Text(self, driver, pages):
pages.load("xhtmlTest.html")
link = driver.find_element(By.LINK_TEXT, "click me")
assert link.text == "click me"
def test_Should_Be_Able_To_Find_Multiple_Links_By_Text(self, driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.LINK_TEXT, "click me")
assert len(elements) == 2
def test_Should_Find_Element_By_Link_Text_Containing_Equals_Sign(self, driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.LINK_TEXT, "Link=equalssign")
assert element.get_attribute("id") == "linkWithEqualsSign"
def test_Should_Find_Multiple_Elements_By_Link_Text_Containing_Equals_Sign(self, driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.LINK_TEXT, "Link=equalssign")
assert 1 == len(elements)
assert elements[0].get_attribute("id") == "linkWithEqualsSign"
def test_finds_By_Link_Text_On_Xhtml_Page(self, driver, pages):
pages.load("actualXhtmlPage.xhtml")
link_Text = "Foo"
element = driver.find_element(By.LINK_TEXT, link_Text)
assert element.text == link_Text
def test_Link_With_Formatting_Tags(self, driver, pages):
pages.load("simpleTest.html")
elem = driver.find_element(By.ID, "links")
res = elem.find_element(By.PARTIAL_LINK_TEXT, "link with formatting tags")
assert res.text == "link with formatting tags"
def test_Driver_Can_Get_Link_By_Link_Test_Ignoring_Trailing_Whitespace(self, driver, pages):
pages.load("simpleTest.html")
link = driver.find_element(By.LINK_TEXT, "link with trailing space")
assert link.get_attribute("id") == "linkWithTrailingSpace"
assert link.text == "link with trailing space"
# By.link_Text negative
def test_Should_Not_Be_Able_To_Locate_By_Link_Text_ASingle_Element_That_Does_Not_Exist(self, driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.LINK_TEXT, "Not here either")
def test_Should_Not_Be_Able_To_Locate_By_Link_Text_Multiple_Elements_That_Do_Not_Exist(self, driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.LINK_TEXT, "Not here either")
assert len(elements) == 0
# By.partial_Link_Text positive
def test_Should_Be_Able_To_Find_Multiple_Elements_By_Partial_Link_Text(self, driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.PARTIAL_LINK_TEXT, "ick me")
assert len(elements) == 2
def test_Should_Be_Able_To_Find_ASingle_Element_By_Partial_Link_Text(self, driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.PARTIAL_LINK_TEXT, "anon")
assert "anon" in element.text
def test_Should_Find_Element_By_Partial_Link_Text_Containing_Equals_Sign(self, driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.PARTIAL_LINK_TEXT, "Link=")
assert element.get_attribute("id") == "linkWithEqualsSign"
def test_Should_Find_Multiple_Elements_By_Partial_Link_Text_Containing_Equals_Sign(self, driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.PARTIAL_LINK_TEXT, "Link=")
assert len(elements) == 1
assert elements[0].get_attribute("id") == "linkWithEqualsSign"
# Misc tests
def test_Driver_Should_Be_Able_To_Find_Elements_After_Loading_More_Than_One_Page_At_ATime(self, driver, pages):
pages.load("formPage.html")
pages.load("xhtmlTest.html")
link = driver.find_element(By.LINK_TEXT, "click me")
assert link.text == "click me"
# You don't want to ask why this is here
def test_When_Finding_By_Name_Should_Not_Return_By_Id(self, driver, pages):
pages.load("formPage.html")
element = driver.find_element(By.NAME, "id-name1")
assert element.get_attribute("value") == "name"
element = driver.find_element(By.ID, "id-name1")
assert element.get_attribute("value") == "id"
element = driver.find_element(By.NAME, "id-name2")
assert element.get_attribute("value") == "name"
element = driver.find_element(By.ID, "id-name2")
assert element.get_attribute("value") == "id"
def test_Should_Be_Able_To_Find_AHidden_Elements_By_Name(self, driver, pages):
pages.load("formPage.html")
element = driver.find_element(By.NAME, "hidden")
assert element.get_attribute("name") == "hidden"
def test_Should_Not_Be_Able_To_Find_An_Element_On_ABlank_Page(self, driver, pages):
driver.get("about:blank")
with pytest.raises(NoSuchElementException):
driver.find_element(By.TAG_NAME, "a")
|
|
# Time: O(nlogn)
# Space: O(n)
# On an infinite number line (x-axis), we drop given squares in the order they are given.
#
# The i-th square dropped (positions[i] = (left, side_length)) is a square
# with the left-most point being positions[i][0] and sidelength positions[i][1].
#
# The square is dropped with the bottom edge parallel to the number line,
# and from a higher height than all currently landed squares.
# We wait for each square to stick before dropping the next.
#
# The squares are infinitely sticky on their bottom edge, and will remain fixed
# to any positive length surface they touch (either the number line or another square).
# Squares dropped adjacent to each other will not stick together prematurely.
#
# Return a list ans of heights. Each height ans[i] represents the current highest height
# of any square we have dropped, after dropping squares represented by positions[0], positions[1], ..., positions[i].
#
# Example 1:
# Input: [[1, 2], [2, 3], [6, 1]]
# Output: [2, 5, 5]
# Explanation:
#
# After the first drop of
# positions[0] = [1, 2]:
# _aa
# _aa
# -------
# The maximum height of any square is 2.
#
# After the second drop of
# positions[1] = [2, 3]:
# __aaa
# __aaa
# __aaa
# _aa__
# _aa__
# --------------
# The maximum height of any square is 5.
# The larger square stays on top of the smaller square despite where its center
# of gravity is, because squares are infinitely sticky on their bottom edge.
#
# After the third drop of
# positions[1] = [6, 1]:
# __aaa
# __aaa
# __aaa
# _aa
# _aa___a
# --------------
# The maximum height of any square is still 5.
#
# Thus, we return an answer of
# [2, 5, 5]
# .
#
# Example 2:
# Input: [[100, 100], [200, 100]]
# Output: [100, 100]
# Explanation: Adjacent squares don't get stuck prematurely - only their bottom edge can stick to surfaces.
# Note:
#
# 1 <= positions.length <= 1000.
# 1 <= positions[0] <= 10^8.
# 1 <= positions[1] <= 10^6.
# Time: O(nlogn) ~ O(n^2)
# Space: O(n)
import bisect
class Solution(object):
def fallingSquares(self, positions):
result = []
pos = [-1]
heights = [0]
maxH = 0
for left, side in positions:
l = bisect.bisect_right(pos, left)
r = bisect.bisect_left(pos, left+side)
high = max(heights[l-1:r] or [0]) + side
pos[l:r] = [left, left+side] # Time: O(n)
heights[l:r] = [high, heights[r-1]] # Time: O(n)
maxH = max(maxH, high)
result.append(maxH)
return result
class SegmentTree(object):
def __init__(self, N, update_fn, query_fn):
self.N = N
self.H = 1
while (1 << self.H) < N:
self.H += 1
self.update_fn = update_fn
self.query_fn = query_fn
self.tree = [0] * (2 * N)
self.lazy = [0] * N
def __apply(self, x, val):
self.tree[x] = self.update_fn(self.tree[x], val)
if x < self.N:
self.lazy[x] = self.update_fn(self.lazy[x], val)
def __pull(self, x):
while x > 1:
x /= 2
self.tree[x] = self.query_fn(self.tree[x*2], self.tree[x*2 + 1])
self.tree[x] = self.update_fn(self.tree[x], self.lazy[x])
def __push(self, x):
for h in xrange(self.H, 0, -1):
y = x >> h
if self.lazy[y]:
self.__apply(y*2, self.lazy[y])
self.__apply(y*2 + 1, self.lazy[y])
self.lazy[y] = 0
def update(self, L, R, h):
L += self.N
R += self.N
L0, R0 = L, R
while L <= R:
if L & 1:
self.__apply(L, h)
L += 1
if R & 1 == 0:
self.__apply(R, h)
R -= 1
L /= 2; R /= 2
self.__pull(L0)
self.__pull(R0)
def query(self, L, R):
L += self.N
R += self.N
self.__push(L); self.__push(R)
result = 0
while L <= R:
if L & 1:
result = self.query_fn(result, self.tree[L])
L += 1
if R & 1 == 0:
result = self.query_fn(result, self.tree[R])
R -= 1
L /= 2; R /= 2
return result
# Time: O(nlogn)
# Space: O(n)
# Segment Tree solution.
class Solution2(object):
def fallingSquares(self, positions):
index = set()
for left, size in positions:
index.add(left);
index.add(left+size-1)
index = sorted(list(index))
tree = SegmentTree(len(index), max, max)
max_height = 0
result = []
for left, size in positions:
L, R = bisect.bisect_left(index, left), bisect.bisect_left(index, left+size-1)
h = tree.query(L, R) + size
tree.update(L, R, h)
max_height = max(max_height, h)
result.append(max_height)
return result
# Time: O(n * sqrt(n))
# Space: O(n)
class Solution3(object):
def fallingSquares(self, positions):
def query(heights, left, right, B, blocks, blocks_read):
result = 0
while left % B and left <= right:
result = max(result, heights[left], blocks[left//B])
left += 1
while right % B != B-1 and left <= right:
result = max(result, heights[right], blocks[right//B])
right -= 1
while left <= right:
result = max(result, blocks[left//B], blocks_read[left//B])
left += B
return result
def update(heights, left, right, B, blocks, blocks_read, h):
while left % B and left <= right:
heights[left] = max(heights[left], h)
blocks_read[left//B] = max(blocks_read[left//B], h)
left += 1
while right % B != B-1 and left <= right:
heights[right] = max(heights[right], h)
blocks_read[right//B] = max(blocks_read[right//B], h)
right -= 1
while left <= right:
blocks[left//B] = max(blocks[left//B], h)
left += B
index = set()
for left, size in positions:
index.add(left);
index.add(left+size-1)
index = sorted(list(index))
W = len(index)
B = int(W**.5)
heights = [0] * W
blocks = [0] * (B+2)
blocks_read = [0] * (B+2)
max_height = 0
result = []
for left, size in positions:
L, R = bisect.bisect_left(index, left), bisect.bisect_left(index, left+size-1)
h = query(heights, L, R, B, blocks, blocks_read) + size
update(heights, L, R, B, blocks, blocks_read, h)
max_height = max(max_height, h)
result.append(max_height)
return result
# Time: O(n^2)
# Space: O(n)
class Solution4(object):
def fallingSquares(self, positions):
"""
:type positions: List[List[int]]
:rtype: List[int]
"""
heights = [0] * len(positions)
for i in xrange(len(positions)):
left_i, size_i = positions[i]
right_i = left_i + size_i
heights[i] += size_i
for j in xrange(i+1, len(positions)):
left_j, size_j = positions[j]
right_j = left_j + size_j
if left_j < right_i and left_i < right_j: # intersect
heights[j] = max(heights[j], heights[i])
result = []
for height in heights:
result.append(max(result[-1], height) if result else height)
return result
|
|
import pygame, sys, os
from pygame.locals import *
if not pygame.font: print 'Warning, fonts disabled'
if not pygame.mixer: print 'Warning, sound disabled'
def loadimage(name, colorkey=None):
fullname = os.path.join(name)
try:
image = pygame.image.load(fullname)
except pygame.error, message:
print 'Cannot load image:', fullname
raise SystemExit, message
image = image.convert()
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, RLEACCEL)
return image
class paddle(pygame.sprite.Sprite):
def __init__(self):
global background
screen = pygame.display.get_surface()
background = pygame.Surface(screen.get_size(), pygame.SWSURFACE).convert()
pygame.sprite.Sprite.__init__(self)
self.image = loadimage(os.path.join("data", "paddle.bmp"), -1)
self.rect = self.image.get_rect()
self.rect.midtop = background.get_width()/2, background.get_height()-35
def update(self):
background = pygame.display.get_surface()
if self.rect.left < 0:
self.rect.left = 0
elif self.rect.right > background.get_width():
self.rect.right = background.get_width()
if self.rect.colliderect(Ball.rect.inflate(0,abs(Ball.y_velocity))):
if Ball.x_velocity >= 13 and Ball.rect.right > Paddle.rect.centerx:
print "Max Ball X Velocity cap reached"
elif Ball.x_velocity <= -13 and Ball.rect.right < Paddle.rect.centerx:
print "Max Ball X Velocity cap reached"
elif Ball.x_velocity > 0:
if Ball.rect.right < Paddle.rect.centerx:
Ball.x_velocity = Ball.x_velocity - 2
else:
Ball.x_velocity = Ball.x_velocity + 2
else:
if Ball.rect.right < Paddle.rect.centerx:
Ball.x_velocity = Ball.x_velocity - 2
else:
Ball.x_velocity = Ball.x_velocity + 2
Ball.y_velocity = -abs(Ball.y_velocity)
if Ball.rect.bottom > Paddle.rect.centery:
Ball.rect.bottom = self.rect.top
class ball(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = loadimage(os.path.join("data", "ball.bmp"), -1)
self.rect = self.image.get_rect()
screen = pygame.display.get_surface()
self.rect.midtop = background.get_width()/2, background.get_height()-70
self.x_velocity = 5
self.y_velocity = -20
def update(self):
self.rect = self.rect.move(self.x_velocity, self.y_velocity)
if self.rect.right >= background.get_width():
self.x_velocity = -abs(self.x_velocity)
elif self.rect.left < 0:
self.x_velocity = abs(self.x_velocity)
elif self.rect.top-abs(self.y_velocity) < 24:
self.y_velocity = abs(self.y_velocity)
elif self.rect.bottom > background.get_height()+50:
global lives
self.rect.midtop = background.get_width()/2, background.get_height()-70
lives = lives - 1
reset()
class brick(pygame.sprite.Sprite):
def __init__(self, color=1):
pygame.sprite.Sprite.__init__(self)
self.image = loadimage(os.path.join("data", "brick_"+str(color)+".bmp"))
self.value = int(color)
self.rect = self.image.get_rect()
def update(self):
if self.rect.colliderect(Ball.rect):
global score
self.value = self.value - 1
if Ball.rect.right-abs(Ball.x_velocity)-1 < self.rect.left:
Ball.x_velocity = -abs(Ball.x_velocity)
elif Ball.rect.left+abs(Ball.x_velocity)+1 > self.rect.right:
Ball.x_velocity = abs(Ball.x_velocity)
elif Ball.rect.bottom < self.rect.centery:
Ball.y_velocity = -abs(Ball.y_velocity)
else:
Ball.y_velocity = abs(Ball.y_velocity)
score = score + 100
if score >= goalscore:
global levelon
levelon = levelon + 1
if levelon <= 5:
setlevel(levelon)
reset()
screen.blit(background, (0, 0))
pygame.display.update()
else:
win()
else:
rendertop()
if self.value < 1 :
allsprites.remove(self)
def rendertop():
if pygame.font:
liv = rendertext("Lives: %d", lives)
livpos = liv.get_rect(left=5,top=5)
lvl = rendertext("Level %d", levelon)
lvlpos = lvl.get_rect(centerx=background.get_width()/2,top=5)
scor = rendertext("Score: %d", score)
scorpos = scor.get_rect(right=background.get_width()-5,top=5)
background.fill((100,100,100), toparea)
background.blit(liv, livpos)
background.blit(lvl, lvlpos)
background.blit(scor, scorpos)
screen.blit(background, (0, 0))
pygame.display.update(toparea)
def rendertext(text, stringvar=None, size=24, color=(255,255,255)):
if pygame.font:
font = pygame.font.Font(None, size)
if stringvar == None:
rend = font.render(text, 1, color)
else:
rend = font.render(text % (stringvar), 1, color)
return rend
def reset():
global spacepressed
global bottomarea
spacepressed = "False"
rendertop()
if lives <= 0:
lose()
else:
Paddle.rect.centerx = background.get_width()/2
Ball.rect.midtop = background.get_width()/2, background.get_height()-70
Ball.y_velocity = -abs(Ball.y_velocity)
Ball.x_velocity = 5
pygame.display.update(bottomarea)
def lose():
global playing
playing = "False"
background.fill((0, 0, 0))
allsprites.empty()
if pygame.font:
font = pygame.font.Font(None, 105)
txt = font.render("YOU LOSE", 1, (255, 0, 0))
pos = txt.get_rect(centerx=background.get_width()/2,centery=background.get_height()/2)
background.fill((0,0,0), pos)
background.blit(txt, pos)
screen.blit(background, (0, 0))
def win():
global playing
playing = "False"
background.fill((0, 0, 0))
screen.blit(background, (0, 0))
allsprites.empty()
if pygame.font:
font = pygame.font.Font(None, 105)
txt = font.render("YOU WIN", 1, (0, 255, 0))
pos = txt.get_rect(centerx=background.get_width()/2,centery=background.get_height()/2)
background.fill((0,0,0), pos)
background.blit(txt, pos)
def setlevel(levelnum):
global allsprites
global bricklist
global goalscore
bricknum = 0
brickx = -40
bricky = 30
bricklist = []
f = file(os.path.join("levels","level"+str(levelnum)+".txt"))
level = "".join(f.readlines()).replace("\n","")
bricks = level.split(" ")
f.close()
for brickval in bricks:
bricknum += 1
goalscore += 100 * int(brickval)
if brickx > 575:
brickx = 70
bricky += 40
else:
brickx += 110
temp = brick(brickval)
temp.rect.topleft = brickx, bricky
bricklist.append(temp)
allsprites.add(bricklist)
def main():
global screen
global background
global spacepressed
global score
global goalscore
global lives
global playing
global pause
global Paddle
global Ball
global bricklist
global levelon
global allsprites
global toparea
global bouncewall
global bouncepaddle
global bottomarea
screen = pygame.display.set_mode((800, 600), pygame.SWSURFACE|pygame.DOUBLEBUF)
background = pygame.Surface(screen.get_size(), pygame.SWSURFACE).convert()
spacepressed = "False"
score = 0
goalscore = score
lives = 3
playing = "True"
clock = pygame.time.Clock()
pause = "False"
Paddle = paddle()
Ball = ball()
bricklist = []
levelon = 1
allsprites = pygame.sprite.RenderUpdates((Paddle, Ball))
setlevel(levelon)
toparea = pygame.Rect(0, 0, background.get_width(), 24)
bottomarea = pygame.Rect(0, Paddle.rect.top, background.get_width(), Paddle.rect.bottom)
background.fill((0, 0, 0))
rendertop()
screen.blit(background, (0, 0))
pausetxt = rendertext("Paused",None,72)
pausetxtpos = pausetxt.get_rect(centerx=background.get_width()/2,centery=background.get_height()/2)
musicplay = "True"
badevents = [ACTIVEEVENT, JOYAXISMOTION, JOYBALLMOTION, JOYHATMOTION, JOYBUTTONDOWN ,JOYBUTTONUP, \
VIDEORESIZE, SYSWMEVENT, NUMEVENTS, KEYUP, MOUSEMOTION, MOUSEBUTTONDOWN, MOUSEBUTTONUP]
#pygame.event.set_blocked(badevents)
allsprites.draw(screen)
pygame.display.flip()
ticks = 0
while 1:
clock.tick(30)
for event in pygame.event.get():
if event.type == QUIT:
return
elif event.type == KEYDOWN:
if playing == "False":
return
if not pause == "True":
if event.key == K_LEFT:
if spacepressed == "False" and Paddle.rect.left > 0:
Ball.x_velocity = 15
Ball.rect = Ball.rect.move(-15,0)
Paddle.rect = Paddle.rect.move(-15, 0)
elif spacepressed == "True":
Paddle.rect = Paddle.rect.move(-15, 0)
elif event.key == K_RIGHT:
if spacepressed == "False" and Paddle.rect.right < background.get_width():
Ball.x_velocity = 15
Ball.rect = Ball.rect.move(15,0)
Paddle.rect = Paddle.rect.move(15, 0)
elif spacepressed == "True":
Paddle.rect = Paddle.rect.move(15, 0)
elif event.key == K_LCTRL:
if spacepressed == "False":
spacepressed = "True"
Ball.x_velocity = 5
elif event.key == K_UP:
if spacepressed == "False":
spacepressed = "True"
Ball.x_velocity = 5
elif event.key == K_p:
if not pause == "True":
pause = "True"
background.blit(pausetxt, pausetxtpos)
screen.blit(background, (0, 0))
pygame.display.update(pausetxtpos)
else:
background.fill((0,0,0), pausetxtpos)
screen.blit(background, (0, 0))
pygame.display.update(pausetxtpos)
pause = "False"
elif event.key == K_F1:
pygame.display.toggle_fullscreen()
elif event.key == K_ESCAPE:
return
if playing == "True":
ticks += 1
fpspos = None
screen.blit(background, (0, 0))
if ticks >= 5:
font = pygame.font.Font(None, 18)
fpstxt = font.render("FPS: %d" % clock.get_fps(), 1, (255, 255, 255))
fpspos = fpstxt.get_rect(centerx=background.get_width()-50,centery=background.get_height()-20)
background.fill((0,0,0), fpspos)
background.blit(fpstxt, fpspos)
ticks = 0
if spacepressed == "True" and pause == "False":
allsprites.update()
allsprites.draw(screen)
dirty_rects = [Ball.rect.inflate(abs(Ball.x_velocity)*2+5,abs(Ball.y_velocity)*2+5), Paddle.rect.inflate(60,10), fpspos] + bricklist
pygame.display.update()
else:
pygame.display.flip()
#pygame.time.delay(30)
|
|
###
# Copyright (c) 2013-2014, spline
# All rights reserved.
#
#
###
# libs
import os
import re
import tailer
from tbgrep import TracebackGrep
# extra supybot libs.
import supybot.conf as conf
# supybot libs
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
try:
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('LogTail')
except:
# Placeholder that allows to run the plugin on a bot
# without the i18n module
_ = lambda x:x
class LogTail(callbacks.Plugin):
"""Add the help for "@plugin help LogTail" here
This should describe *how* to use this plugin."""
threaded = True
#############################
# INTERNAL HELPER FUNCTIONS #
#############################
def _grep(self, pattern, file_obj, ln=False):
"""
grep-like function
"""
l = []
for i, line in enumerate(open(file_obj).readlines()):
if re.match(pattern, line):
if ln:
l.append("{0} {1}".format(i+i, line.rstrip()))
else:
l.append(line.rstrip())
return l
def _listlogs(self):
"""
List the available logs for tailing and display.
"""
# container for out.
l = {}
# Do we have individual log files? (Boolean)
ilf = conf.supybot.log.plugins.individualLogfiles()
# if not os.path.exists
logs = conf.supybot.directories.log()
if not os.path.exists(logs):
self.log.info("_listlogs :: Logs path ({0}) does not exist.".format(logs))
return None
# does individual logs exist?
if ilf:
ilflogs = logs+'/plugins'
if not os.path.exists(ilflogs):
self.log.reply("_listlogs :: ILF path ({0}) does not exist.".format(ilflogs))
# now lets find the logs.
mlf = logs+'/messages.log'
# main log first.
if os.path.isfile(mlf):
l['main'] = mlf
else:
self.log.reply("_listlogs :: main log file ({0}) does not exist.".format(mlf))
return None
# now if we have individual log files, lets add those.
if ilf:
matchinglogs = [f for f in os.listdir(ilflogs) if os.path.isfile(os.path.join(ilflogs, f)) and re.match('^\w+.log$', f)]
# list with matching. lets add these into the l dict. ex: Logger.log
for i in matchinglogs:
n = i.replace('.log', '').lower() # remove .log and lower to match.
l[n] = ilflogs + '/' + i # path.
# now return.
if len(l) == 0:
self.log.info("_listlogs :: ERROR no logs found.")
return None
else:
return l
def _gS(self, fn):
"""File size wrapper."""
st = os.stat(fn)
num = st.st_size
# pretty.
for x in ['b','KB','MB','GB']:
if num < 1024.0 and num > -1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
###################
# PUBLIC COMMANDS #
###################
def grep(self, irc, msg, args, optlog, optpat):
"""<log> <pattern>
Grep logfile for pattern.
"""
optlog = optlog.lower()
# next, grab our list of logs.
ll = self._listlogs()
if not ll:
irc.reply("ERROR: No logs found to display.")
return
else: # found logs. verify it works.
if optlog not in ll: # we didn't find. display a list.
irc.reply("ERROR: '{0}' is not a valid log. These are: {1}".format(optlog, " | ".join([i for i in ll.keys()])))
return
# now find.
g = self._grep(optpat, ll[optlog], ln=False)
# we get a list back.
if len(g) == 0: # no matches.
irc.reply("Sorry, I found no matches in the {0} logfile for '{1}'".format(optlog, optpat))
else: # matches.
irc.reply(g)
grep = wrap(grep, [('checkCapability', 'owner'), ('somethingWithoutSpaces'), ('text')])
def tbgrep(self, irc, msg, args, optlist, optlog):
"""[--options] <logfile>
Display tracebacks from a specific logfile.
"""
# first, lower optlog to match.
optlog = optlog.lower()
# next handle optlist.
lines, showlast = 10, False # defaults.
if optlist:
for (k, v) in optlist:
if k == 'last':
showlast = True
if k == 'n':
if v > 50:
irc.reply("Sorry, I won't display more than 50 lines.")
elif v < 1:
irc.reply("Sorry, I need a positive integer here.")
else: # under 50 so lets go.
lines = v
# next, grab our list of logs.
ll = self._listlogs()
if not ll:
irc.reply("ERROR: No logs found to display.")
return
else: # found logs. verify it works.
if optlog not in ll: # we didn't find. display a list.
irc.reply("ERROR: '{0}' is not a valid log. These are: {1}".format(optlog, " | ".join([i for i in ll.keys()])))
return
# we're here if things worked.
tbo = []
# now lets use TracebackGrep.
extractor = TracebackGrep()
for line in file(ll[optlog]):
tb = extractor.process(line)
if tb: # if we find any, add.
tbo.append(tb)
#irc.reply(type(tb))
# now lets output if we find anything.
if len(tbo) == 0:
irc.reply("I did not find any Tracebacks in {0}'s logfile.".format(optlog))
else: # found some. how to handle.
if showlast:
irc.reply("{0}".format(tbo[-1]))
else:
for each in tbo[-(lines):]:
irc.reply(each)
tbgrep = wrap(tbgrep, [('checkCapability', 'owner'), getopts({'last':'', 'n':('int') }), ('somethingWithoutSpaces')])
def tail(self, irc, msg, args, optlist, optlog):
"""[--singleline --n=# of lines] <logfile>
Tail's the last 10 messages from a logfile. Execute listlogs command for a list of logs available.
Ex: main
"""
# first, lower optlog to match.
optlog = optlog.lower()
# next handle optlist.
singleline, lines = False, 10 # defaults.
if optlist:
for (k, v) in optlist:
if k == 'singleline':
singleline = True
if k == 'n':
if v > 50:
irc.reply("Sorry, I won't display more than 50 lines.")
elif v < 1:
irc.reply("Sorry, I need a positive integer here.")
else: # under 50 so lets go.
lines = v
# next, grab our list of logs.
ll = self._listlogs()
if not ll:
irc.reply("ERROR: No logs found to display.")
return
else: # found logs. verify it works.
if optlog not in ll: # we didn't find. display a list.
irc.reply("ERROR: '{0}' is not a valid log. These are: {1}".format(optlog, " | ".join([i for i in ll.keys()])))
return
# we're here if things worked.
# lets display the last 10 lines.
lf = tailer.tail(open(ll[optlog]), lines)
# lets display.
if singleline:
irc.reply("{0} :: {1}".format(optlog, " ".join([i for i in lf])))
else: # one per line.
for l in lf:
irc.reply("{0}".format(l))
tail = wrap(tail, [('checkCapability', 'owner'), getopts({'singleline': '', 'n':('int') }), ('somethingWithoutSpaces')])
def rmlog(self, irc, msg, args, optlog):
"""<log>
Deletes logfile.
"""
# first, lower optlog to match.
optlog = optlog.lower()
# next, grab our list of logs.
ll = self._listlogs()
if not ll:
irc.reply("ERROR: No logs found to display.")
return
else: # found logs. verify it works.
if optlog not in ll: # we didn't find. display a list.
irc.reply("ERROR: '{0}' is not a valid log. These are: {1}".format(optlog, " | ".join([i for i in ll.keys()])))
return
# now lets delete the log.
fn = ll[optlog] # filname
fs = self._gS(fn) # filesize.
# now lets try to delete.
try:
os.remove(fn)
irc.reply("I have successfully removed {0} ({1})".format(fn, fs))
except Exception as e:
irc.reply("ERROR trying to delete {0} :: {1}".format(fn, e))
rmlog = wrap(rmlog, [('checkCapability', 'owner'), ('somethingWithoutSpaces')])
def listlogs(self, irc, msg, args, optlist):
"""[--size]
List log files available. Use --size to display how big.
"""
# setup input args.
s = False
if optlist:
for (k, v) in optlist:
if k == "size":
s = True
# grab and go.
ll = self._listlogs()
if not ll:
irc.reply("ERROR: No logs found to display.")
else:
for (k, v) in ll.items():
if s: # filesize.
irc.reply("{0} :: {1} :: {2}".format(k, self._gS(v), v))
else: # no size.
irc.reply("{0} :: {1}".format(k, v))
listlogs = wrap(listlogs, [('checkCapability', 'owner'), getopts({'size': ''})])
Class = LogTail
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
|
# orm/persistence.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to emit INSERT, UPDATE
and DELETE statements on behalf of a :class:`.Mapper` and its descending
mappers.
The functions here are called only by the unit of work functions
in unitofwork.py.
"""
import operator
from itertools import groupby
from .. import sql, util, exc as sa_exc, schema
from . import attributes, sync, exc as orm_exc, evaluator
from .base import _state_mapper, state_str, _attr_as_key
from ..sql import expression
from . import loading
def save_obj(base_mapper, states, uowtransaction, single=False):
"""Issue ``INSERT`` and/or ``UPDATE`` statements for a list
of objects.
This is called within the context of a UOWTransaction during a
flush operation, given a list of states to be flushed. The
base mapper in an inheritance hierarchy handles the inserts/
updates for all descendant mappers.
"""
# if batch=false, call _save_obj separately for each object
if not single and not base_mapper.batch:
for state in _sort_states(states):
save_obj(base_mapper, [state], uowtransaction, single=True)
return
states_to_insert, states_to_update = _organize_states_for_save(
base_mapper,
states,
uowtransaction)
cached_connections = _cached_connection_dict(base_mapper)
for table, mapper in base_mapper._sorted_tables.items():
insert = _collect_insert_commands(base_mapper, uowtransaction,
table, states_to_insert)
update = _collect_update_commands(base_mapper, uowtransaction,
table, states_to_update)
if update:
_emit_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
if insert:
_emit_insert_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, insert)
_finalize_insert_update_commands(base_mapper, uowtransaction,
states_to_insert, states_to_update)
def post_update(base_mapper, states, uowtransaction, post_update_cols):
"""Issue UPDATE statements on behalf of a relationship() which
specifies post_update.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_update = _organize_states_for_post_update(
base_mapper,
states, uowtransaction)
for table, mapper in base_mapper._sorted_tables.items():
update = _collect_post_update_commands(base_mapper, uowtransaction,
table, states_to_update,
post_update_cols)
if update:
_emit_post_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
def delete_obj(base_mapper, states, uowtransaction):
"""Issue ``DELETE`` statements for a list of objects.
This is called within the context of a UOWTransaction during a
flush operation.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_delete = _organize_states_for_delete(
base_mapper,
states,
uowtransaction)
table_to_mapper = base_mapper._sorted_tables
for table in reversed(list(table_to_mapper.keys())):
delete = _collect_delete_commands(base_mapper, uowtransaction,
table, states_to_delete)
mapper = table_to_mapper[table]
_emit_delete_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, delete)
for state, state_dict, mapper, has_identity, connection \
in states_to_delete:
mapper.dispatch.after_delete(mapper, connection, state)
def _organize_states_for_save(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for INSERT or
UPDATE.
This includes splitting out into distinct lists for
each, calling before_insert/before_update, obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state,
and the identity flag.
"""
states_to_insert = []
states_to_update = []
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
has_identity = bool(state.key)
instance_key = state.key or mapper._identity_key_from_state(state)
row_switch = None
# call before_XXX extensions
if not has_identity:
mapper.dispatch.before_insert(mapper, connection, state)
else:
mapper.dispatch.before_update(mapper, connection, state)
if mapper._validate_polymorphic_identity:
mapper._validate_polymorphic_identity(mapper, state, dict_)
# detect if we have a "pending" instance (i.e. has
# no instance_key attached to it), and another instance
# with the same identity key already exists as persistent.
# convert to an UPDATE if so.
if not has_identity and \
instance_key in uowtransaction.session.identity_map:
instance = \
uowtransaction.session.identity_map[instance_key]
existing = attributes.instance_state(instance)
if not uowtransaction.is_deleted(existing):
raise orm_exc.FlushError(
"New instance %s with identity key %s conflicts "
"with persistent instance %s" %
(state_str(state), instance_key,
state_str(existing)))
base_mapper._log_debug(
"detected row switch for identity %s. "
"will update %s, remove %s from "
"transaction", instance_key,
state_str(state), state_str(existing))
# remove the "delete" flag from the existing element
uowtransaction.remove_state_actions(existing)
row_switch = existing
if not has_identity and not row_switch:
states_to_insert.append(
(state, dict_, mapper, connection,
has_identity, instance_key, row_switch)
)
else:
states_to_update.append(
(state, dict_, mapper, connection,
has_identity, instance_key, row_switch)
)
return states_to_insert, states_to_update
def _organize_states_for_post_update(base_mapper, states,
uowtransaction):
"""Make an initial pass across a set of states for UPDATE
corresponding to post_update.
This includes obtaining key information for each state
including its dictionary, mapper, the connection to use for
the execution per state.
"""
return list(_connections_for_states(base_mapper, uowtransaction,
states))
def _organize_states_for_delete(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for DELETE.
This includes calling out before_delete and obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state.
"""
states_to_delete = []
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
mapper.dispatch.before_delete(mapper, connection, state)
states_to_delete.append((state, dict_, mapper,
bool(state.key), connection))
return states_to_delete
def _collect_insert_commands(base_mapper, uowtransaction, table,
states_to_insert):
"""Identify sets of values to use in INSERT statements for a
list of states.
"""
insert = []
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_insert:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
value_params = {}
has_all_pks = True
has_all_defaults = True
for col in mapper._cols_by_table[table]:
if col is mapper.version_id_col and \
mapper.version_id_generator is not False:
val = mapper.version_id_generator(None)
params[col.key] = val
else:
# pull straight from the dict for
# pending objects
prop = mapper._columntoproperty[col]
value = state_dict.get(prop.key, None)
if value is None:
if col in pks:
has_all_pks = False
elif col.default is None and \
col.server_default is None:
params[col.key] = value
elif col.server_default is not None and \
mapper.base_mapper.eager_defaults:
has_all_defaults = False
elif isinstance(value, sql.ClauseElement):
value_params[col] = value
else:
params[col.key] = value
insert.append((state, state_dict, params, mapper,
connection, value_params, has_all_pks,
has_all_defaults))
return insert
def _collect_update_commands(base_mapper, uowtransaction,
table, states_to_update):
"""Identify sets of values to use in UPDATE statements for a
list of states.
This function works intricately with the history system
to determine exactly what values should be updated
as well as how the row should be matched within an UPDATE
statement. Includes some tricky scenarios where the primary
key of an object might have been changed.
"""
update = []
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
value_params = {}
hasdata = hasnull = False
for col in mapper._cols_by_table[table]:
if col is mapper.version_id_col:
params[col._label] = \
mapper._get_committed_state_attr_by_column(
row_switch or state,
row_switch and row_switch.dict
or state_dict,
col)
prop = mapper._columntoproperty[col]
history = state.manager[prop.key].impl.get_history(
state, state_dict, attributes.PASSIVE_NO_INITIALIZE
)
if history.added:
params[col.key] = history.added[0]
hasdata = True
else:
if mapper.version_id_generator is not False:
val = mapper.version_id_generator(params[col._label])
params[col.key] = val
# HACK: check for history, in case the
# history is only
# in a different table than the one
# where the version_id_col is.
for prop in mapper._columntoproperty.values():
history = (
state.manager[prop.key].impl.get_history(
state, state_dict,
attributes.PASSIVE_NO_INITIALIZE))
if history.added:
hasdata = True
else:
prop = mapper._columntoproperty[col]
history = state.manager[prop.key].impl.get_history(
state, state_dict,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
if isinstance(history.added[0],
sql.ClauseElement):
value_params[col] = history.added[0]
else:
value = history.added[0]
params[col.key] = value
if col in pks:
if history.deleted and \
not row_switch:
# if passive_updates and sync detected
# this was a pk->pk sync, use the new
# value to locate the row, since the
# DB would already have set this
if ("pk_cascaded", state, col) in \
uowtransaction.attributes:
value = history.added[0]
params[col._label] = value
else:
# use the old value to
# locate the row
value = history.deleted[0]
params[col._label] = value
hasdata = True
else:
# row switch logic can reach us here
# remove the pk from the update params
# so the update doesn't
# attempt to include the pk in the
# update statement
del params[col.key]
value = history.added[0]
params[col._label] = value
if value is None:
hasnull = True
else:
hasdata = True
elif col in pks:
value = state.manager[prop.key].impl.get(
state, state_dict)
if value is None:
hasnull = True
params[col._label] = value
# see #3060. Need to consider an "unchanged" None
# as potentially history for now.
elif row_switch and history.unchanged == [None]:
params[col.key] = None
hasdata = True
if hasdata:
if hasnull:
raise orm_exc.FlushError(
"Can't update table "
"using NULL for primary "
"key value")
update.append((state, state_dict, params, mapper,
connection, value_params))
return update
def _collect_post_update_commands(base_mapper, uowtransaction, table,
states_to_update, post_update_cols):
"""Identify sets of values to use in UPDATE statements for a
list of states within a post_update operation.
"""
update = []
for state, state_dict, mapper, connection in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
hasdata = False
for col in mapper._cols_by_table[table]:
if col in pks:
params[col._label] = \
mapper._get_state_attr_by_column(
state,
state_dict, col)
elif col in post_update_cols:
prop = mapper._columntoproperty[col]
history = state.manager[prop.key].impl.get_history(
state, state_dict,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
value = history.added[0]
params[col.key] = value
hasdata = True
if hasdata:
update.append((state, state_dict, params, mapper,
connection))
return update
def _collect_delete_commands(base_mapper, uowtransaction, table,
states_to_delete):
"""Identify values to use in DELETE statements for a list of
states to be deleted."""
delete = util.defaultdict(list)
for state, state_dict, mapper, has_identity, connection \
in states_to_delete:
if not has_identity or table not in mapper._pks_by_table:
continue
params = {}
delete[connection].append(params)
for col in mapper._pks_by_table[table]:
params[col.key] = \
value = \
mapper._get_committed_state_attr_by_column(
state, state_dict, col)
if value is None:
raise orm_exc.FlushError(
"Can't delete from table "
"using NULL for primary "
"key value")
if mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col):
params[mapper.version_id_col.key] = \
mapper._get_committed_state_attr_by_column(
state, state_dict,
mapper.version_id_col)
return delete
def _emit_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_update_commands()."""
needs_version_id = mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col)
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
if needs_version_id:
clause.clauses.append(
mapper.version_id_col == sql.bindparam(
mapper.version_id_col._label,
type_=mapper.version_id_col.type))
stmt = table.update(clause)
if mapper.base_mapper.eager_defaults:
stmt = stmt.return_defaults()
elif mapper.version_id_col is not None:
stmt = stmt.return_defaults(mapper.version_id_col)
return stmt
statement = base_mapper._memo(('update', table), update_stmt)
rows = 0
for state, state_dict, params, mapper, \
connection, value_params in update:
if value_params:
c = connection.execute(
statement.values(value_params),
params)
else:
c = cached_connections[connection].\
execute(statement, params)
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params)
rows += c.rowcount
if connection.dialect.supports_sane_rowcount:
if rows != len(update):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
"update %d row(s); %d were matched." %
(table.description, len(update), rows))
elif needs_version_id:
util.warn("Dialect %s does not support updated rowcount "
"- versioning cannot be verified." %
c.dialect.dialect_description,
stacklevel=12)
def _emit_insert_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, insert):
"""Emit INSERT statements corresponding to value lists collected
by _collect_insert_commands()."""
statement = base_mapper._memo(('insert', table), table.insert)
for (connection, pkeys, hasvalue, has_all_pks, has_all_defaults), \
records in groupby(insert,
lambda rec: (rec[4],
list(rec[2].keys()),
bool(rec[5]),
rec[6], rec[7])
):
if \
(
has_all_defaults
or not base_mapper.eager_defaults
or not connection.dialect.implicit_returning
) and has_all_pks and not hasvalue:
records = list(records)
multiparams = [rec[2] for rec in records]
c = cached_connections[connection].\
execute(statement, multiparams)
for (state, state_dict, params, mapper_rec,
conn, value_params, has_all_pks, has_all_defaults), \
last_inserted_params in \
zip(records, c.context.compiled_parameters):
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
last_inserted_params,
value_params)
else:
if not has_all_defaults and base_mapper.eager_defaults:
statement = statement.return_defaults()
elif mapper.version_id_col is not None:
statement = statement.return_defaults(mapper.version_id_col)
for state, state_dict, params, mapper_rec, \
connection, value_params, \
has_all_pks, has_all_defaults in records:
if value_params:
result = connection.execute(
statement.values(value_params),
params)
else:
result = cached_connections[connection].\
execute(statement, params)
primary_key = result.context.inserted_primary_key
if primary_key is not None:
# set primary key attributes
for pk, col in zip(primary_key,
mapper._pks_by_table[table]):
prop = mapper_rec._columntoproperty[col]
if state_dict.get(prop.key) is None:
# TODO: would rather say:
# state_dict[prop.key] = pk
mapper_rec._set_state_attr_by_column(
state,
state_dict,
col, pk)
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
result,
result.context.compiled_parameters[0],
value_params)
def _emit_post_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_post_update_commands()."""
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
return table.update(clause)
statement = base_mapper._memo(('post_update', table), update_stmt)
# execute each UPDATE in the order according to the original
# list of states to guarantee row access order, but
# also group them into common (connection, cols) sets
# to support executemany().
for key, grouper in groupby(
update, lambda rec: (rec[4], list(rec[2].keys()))
):
connection = key[0]
multiparams = [params for state, state_dict,
params, mapper, conn in grouper]
cached_connections[connection].\
execute(statement, multiparams)
def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
mapper, table, delete):
"""Emit DELETE statements corresponding to value lists collected
by _collect_delete_commands()."""
need_version_id = mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col)
def delete_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(
col == sql.bindparam(col.key, type_=col.type))
if need_version_id:
clause.clauses.append(
mapper.version_id_col ==
sql.bindparam(
mapper.version_id_col.key,
type_=mapper.version_id_col.type
)
)
return table.delete(clause)
for connection, del_objects in delete.items():
statement = base_mapper._memo(('delete', table), delete_stmt)
connection = cached_connections[connection]
expected = len(del_objects)
rows_matched = -1
only_warn = False
if connection.dialect.supports_sane_multi_rowcount:
c = connection.execute(statement, del_objects)
if not need_version_id:
only_warn = True
rows_matched = c.rowcount
elif need_version_id:
if connection.dialect.supports_sane_rowcount:
rows_matched = 0
# execute deletes individually so that versioned
# rows can be verified
for params in del_objects:
c = connection.execute(statement, params)
rows_matched += c.rowcount
else:
util.warn(
"Dialect %s does not support deleted rowcount "
"- versioning cannot be verified." %
connection.dialect.dialect_description,
stacklevel=12)
connection.execute(statement, del_objects)
else:
connection.execute(statement, del_objects)
if base_mapper.confirm_deleted_rows and \
rows_matched > -1 and expected != rows_matched:
if only_warn:
util.warn(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched. Please set "
"confirm_deleted_rows=False within the mapper "
"configuration to prevent this warning." %
(table.description, expected, rows_matched)
)
else:
raise orm_exc.StaleDataError(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched. Please set "
"confirm_deleted_rows=False within the mapper "
"configuration to prevent this warning." %
(table.description, expected, rows_matched)
)
def _finalize_insert_update_commands(base_mapper, uowtransaction,
states_to_insert, states_to_update):
"""finalize state on states that have been inserted or updated,
including calling after_insert/after_update events.
"""
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_insert + \
states_to_update:
if mapper._readonly_props:
readonly = state.unmodified_intersection(
[p.key for p in mapper._readonly_props
if p.expire_on_flush or p.key not in state.dict]
)
if readonly:
state._expire_attributes(state.dict, readonly)
# if eager_defaults option is enabled, load
# all expired cols. Else if we have a version_id_col, make sure
# it isn't expired.
toload_now = []
if base_mapper.eager_defaults:
toload_now.extend(state._unloaded_non_object)
elif mapper.version_id_col is not None and \
mapper.version_id_generator is False:
prop = mapper._columntoproperty[mapper.version_id_col]
if prop.key in state.unloaded:
toload_now.extend([prop.key])
if toload_now:
state.key = base_mapper._identity_key_from_state(state)
loading.load_on_ident(
uowtransaction.session.query(base_mapper),
state.key, refresh_state=state,
only_load_props=toload_now)
# call after_XXX extensions
if not has_identity:
mapper.dispatch.after_insert(mapper, connection, state)
else:
mapper.dispatch.after_update(mapper, connection, state)
def _postfetch(mapper, uowtransaction, table,
state, dict_, result, params, value_params):
"""Expire attributes in need of newly persisted database state,
after an INSERT or UPDATE statement has proceeded for that
state."""
prefetch_cols = result.context.prefetch_cols
postfetch_cols = result.context.postfetch_cols
returning_cols = result.context.returning_cols
if mapper.version_id_col is not None:
prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
if returning_cols:
row = result.context.returned_defaults
if row is not None:
for col in returning_cols:
if col.primary_key:
continue
mapper._set_state_attr_by_column(state, dict_, col, row[col])
for c in prefetch_cols:
if c.key in params and c in mapper._columntoproperty:
mapper._set_state_attr_by_column(state, dict_, c, params[c.key])
if postfetch_cols:
state._expire_attributes(state.dict,
[mapper._columntoproperty[c].key
for c in postfetch_cols if c in
mapper._columntoproperty]
)
# synchronize newly inserted ids from one table to the next
# TODO: this still goes a little too often. would be nice to
# have definitive list of "columns that changed" here
for m, equated_pairs in mapper._table_to_equated[table]:
sync.populate(state, m, state, m,
equated_pairs,
uowtransaction,
mapper.passive_updates)
def _connections_for_states(base_mapper, uowtransaction, states):
"""Return an iterator of (state, state.dict, mapper, connection).
The states are sorted according to _sort_states, then paired
with the connection they should be using for the given
unit of work transaction.
"""
# if session has a connection callable,
# organize individual states with the connection
# to use for update
if uowtransaction.session.connection_callable:
connection_callable = \
uowtransaction.session.connection_callable
else:
connection = None
connection_callable = None
for state in _sort_states(states):
if connection_callable:
connection = connection_callable(base_mapper, state.obj())
elif not connection:
connection = uowtransaction.transaction.connection(
base_mapper)
mapper = _state_mapper(state)
yield state, state.dict, mapper, connection
def _cached_connection_dict(base_mapper):
# dictionary of connection->connection_with_cache_options.
return util.PopulateDict(
lambda conn: conn.execution_options(
compiled_cache=base_mapper._compiled_cache
))
def _sort_states(states):
pending = set(states)
persistent = set(s for s in pending if s.key is not None)
pending.difference_update(persistent)
return sorted(pending, key=operator.attrgetter("insert_order")) + \
sorted(persistent, key=lambda q: q.key[1])
class BulkUD(object):
"""Handle bulk update and deletes via a :class:`.Query`."""
def __init__(self, query):
self.query = query.enable_eagerloads(False)
@property
def session(self):
return self.query.session
@classmethod
def _factory(cls, lookup, synchronize_session, *arg):
try:
klass = lookup[synchronize_session]
except KeyError:
raise sa_exc.ArgumentError(
"Valid strategies for session synchronization "
"are %s" % (", ".join(sorted(repr(x)
for x in lookup))))
else:
return klass(*arg)
def exec_(self):
self._do_pre()
self._do_pre_synchronize()
self._do_exec()
self._do_post_synchronize()
self._do_post()
def _do_pre(self):
query = self.query
self.context = context = query._compile_context()
if len(context.statement.froms) != 1 or \
not isinstance(context.statement.froms[0], schema.Table):
self.primary_table = query._only_entity_zero(
"This operation requires only one Table or "
"entity be specified as the target."
).mapper.local_table
else:
self.primary_table = context.statement.froms[0]
session = query.session
if query._autoflush:
session._autoflush()
def _do_pre_synchronize(self):
pass
def _do_post_synchronize(self):
pass
class BulkEvaluate(BulkUD):
"""BulkUD which does the 'evaluate' method of session state resolution."""
def _additional_evaluators(self, evaluator_compiler):
pass
def _do_pre_synchronize(self):
query = self.query
target_cls = query._mapper_zero().class_
try:
evaluator_compiler = evaluator.EvaluatorCompiler(target_cls)
if query.whereclause is not None:
eval_condition = evaluator_compiler.process(
query.whereclause)
else:
def eval_condition(obj):
return True
self._additional_evaluators(evaluator_compiler)
except evaluator.UnevaluatableError:
raise sa_exc.InvalidRequestError(
"Could not evaluate current criteria in Python. "
"Specify 'fetch' or False for the "
"synchronize_session parameter.")
# TODO: detect when the where clause is a trivial primary key match
self.matched_objects = [
obj for (cls, pk), obj in
query.session.identity_map.items()
if issubclass(cls, target_cls) and
eval_condition(obj)]
class BulkFetch(BulkUD):
"""BulkUD which does the 'fetch' method of session state resolution."""
def _do_pre_synchronize(self):
query = self.query
session = query.session
select_stmt = self.context.statement.with_only_columns(
self.primary_table.primary_key)
self.matched_rows = session.execute(
select_stmt,
params=query._params).fetchall()
class BulkUpdate(BulkUD):
"""BulkUD which handles UPDATEs."""
def __init__(self, query, values):
super(BulkUpdate, self).__init__(query)
self.query._no_select_modifiers("update")
self.values = values
@classmethod
def factory(cls, query, synchronize_session, values):
return BulkUD._factory({
"evaluate": BulkUpdateEvaluate,
"fetch": BulkUpdateFetch,
False: BulkUpdate
}, synchronize_session, query, values)
def _do_exec(self):
update_stmt = sql.update(self.primary_table,
self.context.whereclause, self.values)
self.result = self.query.session.execute(
update_stmt, params=self.query._params)
self.rowcount = self.result.rowcount
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_update(self)
class BulkDelete(BulkUD):
"""BulkUD which handles DELETEs."""
def __init__(self, query):
super(BulkDelete, self).__init__(query)
self.query._no_select_modifiers("delete")
@classmethod
def factory(cls, query, synchronize_session):
return BulkUD._factory({
"evaluate": BulkDeleteEvaluate,
"fetch": BulkDeleteFetch,
False: BulkDelete
}, synchronize_session, query)
def _do_exec(self):
delete_stmt = sql.delete(self.primary_table,
self.context.whereclause)
self.result = self.query.session.execute(delete_stmt,
params=self.query._params)
self.rowcount = self.result.rowcount
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_delete(self)
class BulkUpdateEvaluate(BulkEvaluate, BulkUpdate):
"""BulkUD which handles UPDATEs using the "evaluate"
method of session resolution."""
def _additional_evaluators(self, evaluator_compiler):
self.value_evaluators = {}
for key, value in self.values.items():
key = _attr_as_key(key)
self.value_evaluators[key] = evaluator_compiler.process(
expression._literal_as_binds(value))
def _do_post_synchronize(self):
session = self.query.session
states = set()
evaluated_keys = list(self.value_evaluators.keys())
for obj in self.matched_objects:
state, dict_ = attributes.instance_state(obj),\
attributes.instance_dict(obj)
# only evaluate unmodified attributes
to_evaluate = state.unmodified.intersection(
evaluated_keys)
for key in to_evaluate:
dict_[key] = self.value_evaluators[key](obj)
state._commit(dict_, list(to_evaluate))
# expire attributes with pending changes
# (there was no autoflush, so they are overwritten)
state._expire_attributes(dict_,
set(evaluated_keys).
difference(to_evaluate))
states.add(state)
session._register_altered(states)
class BulkDeleteEvaluate(BulkEvaluate, BulkDelete):
"""BulkUD which handles DELETEs using the "evaluate"
method of session resolution."""
def _do_post_synchronize(self):
self.query.session._remove_newly_deleted(
[attributes.instance_state(obj)
for obj in self.matched_objects])
class BulkUpdateFetch(BulkFetch, BulkUpdate):
"""BulkUD which handles UPDATEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
states = set([
attributes.instance_state(session.identity_map[identity_key])
for identity_key in [
target_mapper.identity_key_from_primary_key(
list(primary_key))
for primary_key in self.matched_rows
]
if identity_key in session.identity_map
])
attrib = [_attr_as_key(k) for k in self.values]
for state in states:
session._expire_state(state, attrib)
session._register_altered(states)
class BulkDeleteFetch(BulkFetch, BulkDelete):
"""BulkUD which handles DELETEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
for primary_key in self.matched_rows:
# TODO: inline this and call remove_newly_deleted
# once
identity_key = target_mapper.identity_key_from_primary_key(
list(primary_key))
if identity_key in session.identity_map:
session._remove_newly_deleted(
[attributes.instance_state(
session.identity_map[identity_key]
)]
)
|
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test mining and broadcast of larger-than-1MB-blocks
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from decimal import Decimal
CACHE_DIR = "cache_bigblock"
# regression test / testnet fork params:
BASE_VERSION = 0x20000000
FORK_BLOCK_BIT = 0x10000000
FORK_DEADLINE = 1514764800
FORK_GRACE_PERIOD = 60*60*24
# Worst-case: fork happens close to the expiration time
FORK_TIME = FORK_DEADLINE-FORK_GRACE_PERIOD*4
class BigBlockTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
if not os.path.isdir(os.path.join(CACHE_DIR, "node0")):
print("Creating initial chain")
for i in range(4):
initialize_datadir(CACHE_DIR, i) # Overwrite port/rpcport in bitcoin.conf
first_block_time = FORK_TIME - 200 * 10*60
# Node 0 tries to create as-big-as-possible blocks.
# Node 1 creates really small, old-version blocks
# Node 2 creates empty up-version blocks
# Node 3 creates empty, old-version blocks
self.nodes = []
# Use node0 to mine blocks for input splitting
self.nodes.append(start_node(0, CACHE_DIR, ["-blockmaxsize=2000000", "-debug=net",
"-mocktime=%d"%(first_block_time,)]))
self.nodes.append(start_node(1, CACHE_DIR, ["-blockmaxsize=50000", "-debug=net",
"-mocktime=%d"%(first_block_time,),
"-blockversion=%d"%(BASE_VERSION,)]))
self.nodes.append(start_node(2, CACHE_DIR, ["-blockmaxsize=1000",
"-mocktime=%d"%(first_block_time,)]))
self.nodes.append(start_node(3, CACHE_DIR, ["-blockmaxsize=1000",
"-mocktime=%d"%(first_block_time,),
"-blockversion=4"]))
set_node_times(self.nodes, first_block_time)
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 2, 3)
connect_nodes_bi(self.nodes, 3, 0)
self.is_network_split = False
self.sync_all()
# Have node0 and node1 alternate finding blocks
# before the fork time, so it's 50% / 50% vote
block_time = first_block_time
for i in range(0,200):
miner = i%2
set_node_times(self.nodes, block_time)
b1hash = self.nodes[miner].generate(1)[0]
b1 = self.nodes[miner].getblock(b1hash, True)
if miner % 2: assert(not (b1['version'] & FORK_BLOCK_BIT))
else: assert(b1['version'] & FORK_BLOCK_BIT)
assert(self.sync_blocks(self.nodes[0:2]))
block_time = block_time + 10*60
# Generate 1200 addresses
addresses = [ self.nodes[3].getnewaddress() for i in range(0,1200) ]
amount = Decimal("0.00125")
send_to = { }
for address in addresses:
send_to[address] = amount
tx_file = open(os.path.join(CACHE_DIR, "txdata"), "w")
# Create four megabytes worth of transactions ready to be
# mined:
print("Creating 100 40K transactions (4MB)")
for node in range(0,2):
for i in range(0,50):
txid = self.nodes[node].sendmany("", send_to, 1)
txdata = self.nodes[node].getrawtransaction(txid)
tx_file.write(txdata+"\n")
tx_file.close()
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = []
for i in range(4):
os.remove(log_filename(CACHE_DIR, i, "debug.log"))
os.remove(log_filename(CACHE_DIR, i, "db.log"))
os.remove(log_filename(CACHE_DIR, i, "peers.dat"))
os.remove(log_filename(CACHE_DIR, i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join(CACHE_DIR, "node"+str(i))
to_dir = os.path.join(self.options.tmpdir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf
def sync_blocks(self, rpc_connections, wait=0.1, max_wait=30):
"""
Wait until everybody has the same block count
"""
for i in range(0,max_wait):
if i > 0: time.sleep(wait)
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
return True
return False
def setup_network(self):
self.nodes = []
last_block_time = FORK_TIME - 10*60
self.nodes.append(start_node(0, self.options.tmpdir, ["-blockmaxsize=2000000", "-debug=net",
"-mocktime=%d"%(last_block_time,)]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-blockmaxsize=50000", "-debug=net",
"-mocktime=%d"%(last_block_time,),
"-blockversion=%d"%(BASE_VERSION,)]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-blockmaxsize=1000",
"-mocktime=%d"%(last_block_time,)]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-blockmaxsize=1000",
"-mocktime=%d"%(last_block_time,),
"-blockversion=4"]))
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 2, 3)
connect_nodes_bi(self.nodes, 3, 0)
# Populate node0's mempool with cached pre-created transactions:
with open(os.path.join(CACHE_DIR, "txdata"), "r") as f:
for line in f:
self.nodes[0].sendrawtransaction(line.rstrip())
def copy_mempool(self, from_node, to_node):
txids = from_node.getrawmempool()
for txid in txids:
txdata = from_node.getrawtransaction(txid)
to_node.sendrawtransaction(txdata)
def TestMineBig(self, expect_big, expect_version=None):
# Test if node0 will mine big blocks.
b1hash = self.nodes[0].generate(1)[0]
b1 = self.nodes[0].getblock(b1hash, True)
assert(self.sync_blocks(self.nodes))
if expect_version:
assert b1['version'] & FORK_BLOCK_BIT
elif not expect_version==None:
assert not b1['version'] & FORK_BLOCK_BIT
if expect_big:
assert(b1['size'] > 1000*1000)
# Have node1 mine on top of the block,
# to make sure it goes along with the fork
b2hash = self.nodes[1].generate(1)[0]
b2 = self.nodes[1].getblock(b2hash, True)
assert(b2['previousblockhash'] == b1hash)
assert(self.sync_blocks(self.nodes))
else:
assert(b1['size'] <= 1000*1000)
# Reset chain to before b1hash:
for node in self.nodes:
node.invalidateblock(b1hash)
assert(self.sync_blocks(self.nodes))
def run_test(self):
# nodes 0 and 1 have 50 mature 50-BTC coinbase transactions.
# Spend them with 50 transactions, each that has
# 1,200 outputs (so they're about 41K big).
print("Testing fork conditions")
# Fork is controlled by block timestamp and miner super-majority;
# large blocks may only be created after a supermajority of miners
# produce up-version blocks plus a grace period
# At this point the chain is 200 blocks long
# alternating between version=0x20000000 and version=0x30000000
# blocks.
# Nodes will vote for 2MB until the vote expiration date; votes
# for 2MB in blocks with times past the exipration date are
# ignored.
# NOTE: the order of these test is important!
# set_node_times must advance time. Local time moving
# backwards causes problems.
# Time starts a little before fork activation time:
set_node_times(self.nodes, FORK_TIME - 100)
# No supermajority yet
self.TestMineBig(expect_big=False, expect_version=True)
# Create a block after the expiration date. This will be rejected
# by the other nodes for being more than 2 hours in the future,
# and will have FORK_BLOCK_BIT cleared.
set_node_times(self.nodes[0:1], FORK_DEADLINE + 100)
b1hash = self.nodes[0].generate(1)[0]
b1 = self.nodes[0].getblock(b1hash, True)
assert(not (b1['version'] & FORK_BLOCK_BIT))
self.nodes[0].invalidateblock(b1hash)
set_node_times(self.nodes[0:1], FORK_TIME - 100)
assert(self.sync_blocks(self.nodes))
# node2 creates empty up-version blocks; creating
# 50 in a row makes 75 of previous 100 up-version
# (which is the -regtest activation condition)
t_delta = FORK_GRACE_PERIOD/50
blocks = []
for i in range(50):
set_node_times(self.nodes, FORK_TIME + t_delta*i - 1)
blocks.append(self.nodes[2].generate(1)[0])
assert(self.sync_blocks(self.nodes))
# Earliest time for a big block is the timestamp of the
# supermajority block plus grace period:
lastblock = self.nodes[0].getblock(blocks[-1], True)
t_fork = lastblock["time"] + FORK_GRACE_PERIOD
self.TestMineBig(expect_big=False, expect_version=True) # Supermajority... but before grace period end
# Test right around the switchover time.
set_node_times(self.nodes, t_fork-1)
self.TestMineBig(expect_big=False, expect_version=True)
# Note that node's local times are irrelevant, block timestamps
# are all that count-- so node0 will mine a big block with timestamp in the
# future from the perspective of the other nodes, but as long as
# it's timestamp is not too far in the future (2 hours) it will be
# accepted.
self.nodes[0].setmocktime(t_fork)
self.TestMineBig(expect_big=True, expect_version=True)
# Shutdown then restart node[0], it should
# remember supermajority state and produce a big block.
stop_node(self.nodes[0], 0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-blockmaxsize=2000000", "-debug=net",
"-mocktime=%d"%(t_fork,)])
self.copy_mempool(self.nodes[1], self.nodes[0])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 3)
self.TestMineBig(expect_big=True, expect_version=True)
# Test re-orgs past the activation block (blocks[-1])
#
# Shutdown node[0] again:
stop_node(self.nodes[0], 0)
# Mine a longer chain with two version=4 blocks:
self.nodes[3].invalidateblock(blocks[-1])
v4blocks = self.nodes[3].generate(2)
assert(self.sync_blocks(self.nodes[1:]))
# Restart node0, it should re-org onto longer chain, reset
# activation time, and refuse to mine a big block:
self.nodes[0] = start_node(0, self.options.tmpdir, ["-blockmaxsize=2000000", "-debug=net",
"-mocktime=%d"%(t_fork,)])
self.copy_mempool(self.nodes[1], self.nodes[0])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 3)
assert(self.sync_blocks(self.nodes))
self.TestMineBig(expect_big=False, expect_version=True)
# Mine 4 FORK_BLOCK_BIT blocks and set the time past the
# grace period: bigger block OK:
self.nodes[2].generate(4)
assert(self.sync_blocks(self.nodes))
set_node_times(self.nodes, t_fork + FORK_GRACE_PERIOD)
self.TestMineBig(expect_big=True, expect_version=True)
# Finally, mine blocks well after the expiration time and make sure
# bigger blocks are still OK:
set_node_times(self.nodes, FORK_DEADLINE+FORK_GRACE_PERIOD*11)
self.nodes[2].generate(4)
assert(self.sync_blocks(self.nodes))
self.TestMineBig(expect_big=True, expect_version=False)
class BigBlockTest2(BigBlockTest):
def run_test(self):
print("Testing around deadline time")
# 49 blocks just before expiration time:
t_delta = FORK_GRACE_PERIOD/50
pre_expire_blocks = []
for i in range(49):
set_node_times(self.nodes, FORK_DEADLINE - (t_delta*(50-i)))
pre_expire_blocks.append(self.nodes[2].generate(1)[0])
assert(self.sync_blocks(self.nodes))
self.TestMineBig(expect_big=False, expect_version=True)
# Gee, darn: JUST missed the deadline!
set_node_times(self.nodes, FORK_DEADLINE+1)
block_past_expiration = self.nodes[0].generate(1)[0]
# Stuck with small blocks
set_node_times(self.nodes, FORK_DEADLINE+FORK_GRACE_PERIOD*11)
self.nodes[2].generate(4)
assert(self.sync_blocks(self.nodes))
self.TestMineBig(expect_big=False, expect_version=False)
# If vote fails, should be warned about running obsolete code:
info = self.nodes[0].getmininginfo()
assert("obsolete" in info['errors'])
if __name__ == '__main__':
print("Be patient, these tests can take 2 or more minutes to run.")
BigBlockTest().main()
BigBlockTest2().main()
print("Cached test chain and transactions left in %s"%(CACHE_DIR))
print(" (remove that directory if you will not run this test again)")
|
|
#!/usr/bin/env python
#=============================================================================#
# #
# NAME: util_plotTk.py #
# #
# PURPOSE: Plotting functions for the POSSUM pipeline Tk interface. #
# #
# MODIFIED: 19-November-2015 by C. Purcell #
# #
# CONTENTS: #
# #
# xfloat #
# xint #
# filter_range_indx #
# tweakAxFormat #
# format_ticks #
# plot_I_vs_nu_ax #
# plot_PQU_vs_nu_ax #
# plot_rmsIQU_vs_nu_ax #
# plot_pqu_vs_lamsq_ax #
# plot_psi_vs_lamsq_ax #
# plot_q_vs_u_ax #
# plot_RMSF_ax #
# gauss #
# plot_dirtyFDF_ax #
# plot_cleanFDF_ax #
# plot_hist4_ax #
# #
# #-------------------------------------------------------------------------# #
# #
# plotSpecIPQU #
# plotSpecRMS #
# plotPolang #
# plotFracPol #
# plotFracQvsU #
# plotPolsummary #
# plotRMSF #
# plotDirtyFDF #
# plotCleanFDF #
# plotStampI #
# plotStampP #
# plotSctHstQuery #
# #
#=============================================================================#
# #
# The MIT License (MIT) #
# #
# Copyright (c) 2015 Cormac R. Purcell #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the "Software"), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
#=============================================================================#
import os
import sys
import math as m
import numpy as np
import StringIO
import traceback
import astropy.io.fits as pf
import matplotlib as mpl
from matplotlib.ticker import MaxNLocator
from matplotlib.patches import Polygon
from matplotlib.ticker import FuncFormatter
from matplotlib.figure import Figure
from util_plotFITS import plot_fits_map
from util_PPC import xfloat
from normalize import APLpyNormalize
# Alter the default linewidths etc.
mpl.rcParams['lines.linewidth'] = 1.0
mpl.rcParams['axes.linewidth'] = 0.8
mpl.rcParams['xtick.major.size'] = 8.0
mpl.rcParams['xtick.minor.size'] = 4.0
mpl.rcParams['ytick.major.size'] = 8.0
mpl.rcParams['ytick.minor.size'] = 4.0
mpl.rcParams['font.family'] = 'sans-serif'
mpl.rcParams['font.size'] = 12.0
# Constants
C = 2.99792458e8
#-----------------------------------------------------------------------------#
def xint(x, default=None):
if x is None:
return default
return int(x)
#-----------------------------------------------------------------------------#
def filter_range_indx(a, dataMin=None, dataMax=None, filterNans=False):
"""Return a boolean array [True, ...] where data falls outside of the
range [dataMin <= a <= dataMax]."""
if filterNans:
iNaN = np.zeros_like(a, dtype="bool")
else:
iNaN = a!=a
if dataMin is None:
i1 = np.ones_like(a, dtype="bool")
else:
i1 = a>=dataMin
i1+=iNaN
if dataMax is None:
i2 = np.ones_like(a, dtype="bool")
else:
i2 = a<=dataMax
i2+=iNaN
return ~i1+~i2
#-----------------------------------------------------------------------------#
def tweakAxFormat(ax, pad=10, loc='upper right', linewidth=1, ncol=1,
bbox_to_anchor=(1.00, 1.00), showLeg=True):
# Axis/tic formatting
ax.tick_params(pad=pad)
for line in ax.get_xticklines() + ax.get_yticklines():
line.set_markeredgewidth(linewidth)
# Legend formatting
if showLeg:
leg = ax.legend(numpoints=1, loc=loc, shadow=False,
borderaxespad=0.3, ncol=ncol,
bbox_to_anchor=bbox_to_anchor)
for t in leg.get_texts():
t.set_fontsize('small')
leg.get_frame().set_linewidth(0.5)
leg.get_frame().set_alpha(0.5)
return ax
#-----------------------------------------------------------------------------#
def format_ticks(ax, pad=10, w=1.0):
ax.tick_params(pad=pad)
for line in ax.get_xticklines() + ax.get_yticklines():
line.set_markeredgewidth(w)
#-----------------------------------------------------------------------------#
def plot_I_vs_nu_ax(ax, freqArr_Hz, IArr_mJy, dIArr_mJy=None,
freqHirArr_Hz=None, IModArr_mJy=None, axisYright=False,
axisXtop=False):
"""Plot the I spectrum and an optional model."""
# Set the axis positions
if axisYright:
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
if axisXtop:
ax.xaxis.tick_top()
ax.xaxis.set_label_position("top")
# Default to non-high-resolution inputs
if freqHirArr_Hz is None:
freqHirArr_Hz = freqArr_Hz
# Plot I versus frequency
ax.errorbar(x=freqArr_Hz/1e9, y=IArr_mJy, yerr=dIArr_mJy, mfc='none',
ms=4, fmt='D', ecolor='grey', elinewidth=1.0, capsize=2,
label='Stokes I')
if IModArr_mJy is not None:
ax.plot(freqHirArr_Hz/1e9, IModArr_mJy, color='k', lw=0.5,
label='I Model')
#ax.text(0.05, 0.94, 'Stokes I Spectrum', transform=ax.transAxes)
ax.yaxis.set_major_locator(MaxNLocator(4))
ax.xaxis.set_major_locator(MaxNLocator(5))
xRange = (np.nanmax(freqArr_Hz)-np.nanmin(freqArr_Hz))/1e9
ax.set_xlim( np.min(freqArr_Hz)/1e9 - xRange*0.05,
np.max(freqArr_Hz)/1e9 + xRange*0.05)
# Formatting
ax.yaxis.set_major_locator(MaxNLocator(4))
ax.xaxis.set_major_locator(MaxNLocator(4))
xRange = (np.nanmax(freqArr_Hz)-np.nanmin(freqArr_Hz))/1e9
ax.set_xlim( np.min(freqArr_Hz)/1e9 - xRange*0.05,
np.max(freqArr_Hz)/1e9 + xRange*0.05)
ax.set_xlabel('$\\nu$ (GHz)')
ax.set_ylabel('Flux Density (mJy)')
# Format tweaks
ax = tweakAxFormat(ax)
ax.relim()
ax.autoscale_view()
#-----------------------------------------------------------------------------#
def plot_PQU_vs_nu_ax(ax, freqArr_Hz, QArr_mJy, UArr_mJy, dQArr_mJy=None,
dUArr_mJy=None, freqHirArr_Hz=None, QmodArr=None,
UmodArr=None, axisYright=False, axisXtop=False):
"""Plot the P, Q & U spectrum and an optional model. """
# Set the axis positions
if axisYright:
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
if axisXtop:
ax.xaxis.tick_top()
ax.xaxis.set_label_position("top")
# Default to non-high-resolution inputs
if freqHirArr_Hz is None:
freqHirArr_Hz = freqArr_Hz
# Calculate P and errors
PArr_mJy = np.sqrt(np.power(QArr_mJy,2) + np.power(UArr_mJy,2))
if dQArr_mJy is None or dUArr_mJy is None:
dPArr_mJy = None
else:
dPArr_mJy = np.sqrt(np.power(dQArr_mJy,2) + np.power(dUArr_mJy,2))
# Plot P, Q, U versus frequency
ax.errorbar(x=freqArr_Hz/1e9, y=QArr_mJy, yerr=dQArr_mJy, mec='b',
mfc='none', ms=4, fmt='D', color='g', elinewidth=1.0,
capsize=2, label='Stokes Q')
ax.errorbar(x=freqArr_Hz/1e9, y=UArr_mJy, yerr=dUArr_mJy, mec='r',
mfc='none', ms=4, fmt='D', color='r', elinewidth=1.0,
capsize=2, label='Stokes U')
ax.errorbar(x=freqArr_Hz/1e9, y=PArr_mJy, yerr=dPArr_mJy, mec='k',
mfc='none', ms=4, fmt='D', color='k', elinewidth=1.0,
capsize=2, label='Intensity P')
# Plot the models
if QmodArr is not None:
ax.plot(freqHirArr_Hz/1e9, QmodArr, color='b', lw=0.5, label='Model Q')
if UmodArr is not None:
ax.plot(freqHirArr_Hz/1e9, UmodArr, color='r', lw=0.5, label='Model U')
if QmodArr is not None and UmodArr is not None:
PmodArr = np.sqrt(QmodArr**2.0 + UmodArr**2.0 )
ax.plot(freqHirArr_Hz/1e9, PmodArr, color='k', lw=0.5, label='Model P')
# Formatting
ax.yaxis.set_major_locator(MaxNLocator(4))
ax.xaxis.set_major_locator(MaxNLocator(4))
xRange = (np.nanmax(freqArr_Hz)-np.nanmin(freqArr_Hz))/1e9
ax.set_xlim( np.min(freqArr_Hz)/1e9 - xRange*0.05,
np.max(freqArr_Hz)/1e9 + xRange*0.05)
ax.set_xlabel('$\\nu$ (GHz)')
ax.set_ylabel('Flux Density (mJy)')
ax.axhline(0, color='grey')
# Format tweaks
ax = tweakAxFormat(ax)
ax.relim()
ax.autoscale_view()
#-----------------------------------------------------------------------------#
def plot_rmsIQU_vs_nu_ax(ax, freqArr_Hz, rmsIArr_mJy, rmsQArr_mJy,
rmsUArr_mJy, axisYright=False, axisXtop=False):
"""Plot the noise spectra in Stokes I, Q & U. """
# Set the axis positions
if axisYright:
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
if axisXtop:
ax.xaxis.tick_top()
ax.xaxis.set_label_position("top")
# Plot the rms spectra in GHz and mJy
ax.plot(freqArr_Hz/1e9, rmsIArr_mJy, marker='o', color='k', lw='0.5',
label='rms I')
ax.plot(freqArr_Hz/1e9, rmsQArr_mJy, marker='o', color='b', lw='0.5',
label='rms Q')
ax.plot(freqArr_Hz/1e9, rmsUArr_mJy, marker='o', color='r', lw='0.5',
label='rms U')
#ax.text(0.05, 0.94, 'I, Q & U RMS', transform=ax.transAxes)
# Formatting
ax.yaxis.set_major_locator(MaxNLocator(4))
ax.xaxis.set_major_locator(MaxNLocator(4))
xRange = (np.nanmax(freqArr_Hz)-np.nanmin(freqArr_Hz))/1e9
ax.set_xlim( np.min(freqArr_Hz)/1e9 - xRange*0.05,
np.max(freqArr_Hz)/1e9 + xRange*0.05)
ax.set_xlabel('$\\nu$ (GHz)')
ax.set_ylabel('Flux Density (mJy bm$^{-1}$)')
# Format tweaks
ax = tweakAxFormat(ax)
ax.relim()
ax.autoscale_view()
#-----------------------------------------------------------------------------#
def plot_pqu_vs_lamsq_ax(ax, lamSqArr_m2, qArr, uArr, dqArr=None, duArr=None,
lamSqHirArr_m2=None, qModArr=None, uModArr=None,
axisYright=False, axisXtop=False):
# Set the axis positions
if axisYright:
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
if axisXtop:
ax.xaxis.tick_top()
ax.xaxis.set_label_position("top")
# Default to non-high-resolution inputs
if lamSqHirArr_m2 is None:
lamSqHirArr_m2 = lamSqArr_m2
# Calculate p and errors
pArr = np.sqrt(qArr**2.0 + uArr**2.0 )
if dqArr is None or duArr is None:
dpArr = None
else:
dpArr = np.sqrt(dqArr**2.0 + duArr**2.0 )
# Plot p, q, u versus lambda^2
ax.errorbar(x=lamSqArr_m2, y=qArr, yerr=dqArr, mec='b', mfc='none', ms=4,
fmt='D', ecolor='b', elinewidth=1.0, capsize=2,
label='Stokes q')
ax.errorbar(x=lamSqArr_m2, y=uArr, yerr=duArr, mec='r', mfc='none', ms=4,
fmt='D', ecolor='r', elinewidth=1.0, capsize=2,
label='Stokes u')
ax.errorbar(x=lamSqArr_m2, y=pArr, yerr=dpArr, mec='k', mfc='none', ms=4,
fmt='D', ecolor='k', elinewidth=1.0, capsize=2,
label='Intensity p')
# Plot the models
if qModArr is not None:
ax.plot(lamSqHirArr_m2, qModArr, color='b', lw=0.5, label='Model q')
if uModArr is not None:
ax.plot(lamSqHirArr_m2, uModArr, color='r', lw=0.5, label='Model u')
if qModArr is not None and uModArr is not None:
pModArr = np.sqrt(qModArr**2.0 + uModArr**2.0 )
ax.plot(lamSqHirArr_m2, pModArr, color='k', lw=0.5, label='Model p')
# Formatting
ax.yaxis.set_major_locator(MaxNLocator(4))
ax.xaxis.set_major_locator(MaxNLocator(4))
xRange = np.nanmax(lamSqArr_m2)-np.nanmin(lamSqArr_m2)
ax.set_xlim( np.min(lamSqArr_m2) - xRange*0.05,
np.max(lamSqArr_m2) + xRange*0.05)
ax.set_xlabel('$\\lambda^2$ (m$^2$)')
ax.set_ylabel('Fractional Polarisation')
ax.axhline(0, color='grey')
# Format tweaks
ax = tweakAxFormat(ax)
ax.relim()
ax.autoscale_view()
#-----------------------------------------------------------------------------#
def plot_psi_vs_lamsq_ax(ax, lamSqArr_m2, qArr, uArr, dqArr=None, duArr=None,
lamSqHirArr_m2=None, qModArr=None, uModArr=None,
axisYright=False, axisXtop=False):
# Set the axis positions
if axisYright:
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
if axisXtop:
ax.xaxis.tick_top()
ax.xaxis.set_label_position("top")
# Default to non-high-resolution inputs
if lamSqHirArr_m2 is None:
lamSqHirArr_m2 = lamSqArr_m2
# Plot psi versus lambda^2
pArr = np.sqrt(qArr**2.0 + uArr**2.0 )
psiArr_deg = np.degrees( np.arctan2(uArr, qArr) / 2.0 )
if dqArr is None or duArr is None:
dQUArr = None
dPsiArr_deg = None
else:
dQUArr = np.sqrt(dqArr**2.0 + duArr**2.0)
dPsiArr_deg = np.degrees( np.sqrt( (qArr * duArr)**2.0 +
(uArr * dqArr)**2.0) /
(2.0 * pArr**2.0) )
ax.errorbar(x=lamSqArr_m2, y=psiArr_deg, yerr=dPsiArr_deg, mec='k',
mfc='none', ms=4, fmt='D', ecolor='k', elinewidth=1.0,
capsize=2)
if qModArr is not None and uModArr is not None:
psiHirArr_deg = np.degrees( np.arctan2(uModArr, qModArr) / 2.0 )
ax.plot(lamSqHirArr_m2, psiHirArr_deg, color='k', lw=0.5,
label='Model $\psi$')
ax.set_ylim(-99.9, 99.9)
ax.axhline(0, color='grey')
# Formatting
ax.yaxis.set_major_locator(MaxNLocator(4))
ax.xaxis.set_major_locator(MaxNLocator(4))
xRange = np.nanmax(lamSqArr_m2)-np.nanmin(lamSqArr_m2)
ax.set_xlim( np.min(lamSqArr_m2) - xRange*0.05,
np.max(lamSqArr_m2) + xRange*0.05)
ax.set_xlabel('$\\lambda^2$ (m$^2$)')
ax.set_ylabel('$\psi$ (degrees)')
# Format tweaks
ax = tweakAxFormat(ax, showLeg=False)
ax.relim()
ax.autoscale_view()
#-----------------------------------------------------------------------------#
def plot_q_vs_u_ax(ax, lamSqArr_m2, qArr, uArr, dqArr=None, duArr=None,
lamSqHirArr_m2=None, qModArr=None, uModArr=None,
axisYright=False, axisXtop=False):
# Set the axis positions
if axisYright:
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
if axisXtop:
ax.xaxis.tick_top()
ax.xaxis.set_label_position("top")
# Default to non-high-resolution inputs
if lamSqHirArr_m2 is None:
lamSqHirArr_m2 = lamSqArr_m2
# Plot U versus Q
ax.errorbar(x=uArr, y=qArr, xerr=duArr, yerr=dqArr, mec='k', mfc='none',
ms=4, fmt='D', ecolor='k', elinewidth=1.0, capsize=2)
if qModArr is not None and uModArr is not None:
ax.plot(uModArr, qModArr, color='k', lw=0.5, label='Model q & u')
ax.axhline(0, color='grey')
ax.axvline(0, color='grey')
# Formatting
ax.yaxis.set_major_locator(MaxNLocator(4))
ax.xaxis.set_major_locator(MaxNLocator(4))
ax.set_xlabel('Stokes u')
ax.set_ylabel('Stokes q')
format_ticks(ax, 10, 1.2)
ax.relim()
ax.autoscale_view(False,True,True)
#-----------------------------------------------------------------------------#
def plot_RMSF_ax(ax, phiArr, RMSFArr, fwhmRMSF=None, axisYright=False,
axisXtop=False):
# Set the axis positions
if axisYright:
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
if axisXtop:
ax.xaxis.tick_top()
ax.xaxis.set_label_position("top")
# Plot the RMSF
ax.step(phiArr, RMSFArr.real, where='mid', color='b', lw='0.5',
label='Real')
ax.step(phiArr, RMSFArr.imag, where='mid', color='r', lw='0.5',
label='Imaginary')
ax.step(phiArr, np.abs(RMSFArr) , where='mid', color='k', lw='1.0',
label='PI')
#ax.text(0.05, 0.94, 'RMSF', transform=ax.transAxes)
# Plot the Gaussian fit
if fwhmRMSF is not None:
yGauss = gauss([1.0, 0.0, fwhmRMSF])(phiArr)
ax.plot(phiArr, yGauss, color='magenta',marker='None',mfc='w',
mec='g', ms=10, label='Gaussian Fit', lw=2.0, ls='--')
# Scaling
ax.yaxis.set_major_locator(MaxNLocator(4))
ax.xaxis.set_major_locator(MaxNLocator(4))
xRange = np.nanmax(phiArr)-np.nanmin(phiArr)
ax.set_xlim( np.min(phiArr) - xRange*0.01,
np.max(phiArr) + xRange*0.01)
ax.set_ylabel('Normalised Units')
ax.set_xlabel('$\phi$ rad m$^{-2}$')
ax.axhline(0, color='grey')
# Format tweaks
ax = tweakAxFormat(ax)
ax.relim()
ax.autoscale_view()
#-----------------------------------------------------------------------------
def gauss(p):
"""Return a fucntion to evaluate a Gaussian with parameters
p = [amp, mean, FWHM]"""
a, b, w = p
gfactor = 2.0 * m.sqrt(2.0 * m.log(2.0))
s = w / gfactor
def rfunc(x):
y = a * np.exp(-(x-b)**2.0 /(2.0 * s**2.0))
return y
return rfunc
#-----------------------------------------------------------------------------#
def plot_dirtyFDF_ax(ax, phiArr, FDFArr_mJy, gaussParm=[], title="Dirty FDF",
axisYright=False, axisXtop=False):
# Set the axis positions
if axisYright:
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
if axisXtop:
ax.xaxis.tick_top()
ax.xaxis.set_label_position("top")
# Plot the FDF
FDFpiArr_mJy = np.sqrt( np.power(FDFArr_mJy.real, 2.0) +
np.power(FDFArr_mJy.imag, 2.0) )
ax.step(phiArr, FDFArr_mJy.real, where='mid', color='b', lw='0.5',
label='Real')
ax.step(phiArr, FDFArr_mJy.imag, where='mid', color='r', lw='0.5',
label='Imaginary')
ax.step(phiArr, FDFpiArr_mJy, where='mid', color='k', lw='1.0',
label='PI')
#ax.text(0.05, 0.94, title, transform=ax.transAxes)
# Plot the Gaussian peak
if len(gaussParm)==3:
# [amp, mean, FWHM]
phiTrunkArr = np.where(phiArr>=gaussParm[1]-gaussParm[2]/3.0,
phiArr, np.nan)
phiTrunkArr = np.where(phiArr<=gaussParm[1]+gaussParm[2]/3.0,
phiTrunkArr, np.nan)
yGauss = gauss(gaussParm)(phiTrunkArr)
ax.plot(phiArr, yGauss, color='magenta',marker='None',mfc='w',
mec='g', ms=10, label='Peak Fit', lw=2.5, ls='-')
# Scaling
ax.yaxis.set_major_locator(MaxNLocator(4))
ax.xaxis.set_major_locator(MaxNLocator(4))
xRange = np.nanmax(phiArr)-np.nanmin(phiArr)
ax.set_xlim( np.min(phiArr) - xRange*0.01,
np.max(phiArr) + xRange*0.01)
ax.set_ylabel('Flux Density (mJy)')
ax.set_xlabel('$\phi$ rad m$^{-2}$')
ax.axhline(0, color='grey')
# Format tweaks
ax = tweakAxFormat(ax)
ax.relim()
ax.autoscale_view()
#-----------------------------------------------------------------------------#
def plot_cleanFDF_ax(ax, phiArr, cleanFDFArr_mJy, ccFDFArr_mJy=None,
dirtyFDFArr_mJy=None, gaussParm=[], title="Clean FDF",
cutoff_mJy=None, axisYright=False, axisXtop=False):
# Set the axis positions
if axisYright:
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
if axisXtop:
ax.xaxis.tick_top()
ax.xaxis.set_label_position("top")
# Plot the dirty FDF in the background
if not dirtyFDFArr_mJy is None:
dirtyFDFpiArr_mJy = np.sqrt( np.power(dirtyFDFArr_mJy.real, 2.0) +
np.power(dirtyFDFArr_mJy.imag, 2.0) )
ax.step(phiArr, dirtyFDFpiArr_mJy, where='mid', color='grey',
lw='0.5', label='Dirty')
# Plot the clean FDF
cleanFDFpiArr_mJy = np.sqrt( np.power(cleanFDFArr_mJy.real, 2.0) +
np.power(cleanFDFArr_mJy.imag, 2.0) )
ax.step(phiArr, cleanFDFArr_mJy.real, where='mid', color='b', lw='0.5',
label='Real')
ax.step(phiArr, cleanFDFArr_mJy.imag, where='mid', color='r', lw='0.5',
label='Imaginary')
ax.step(phiArr, cleanFDFpiArr_mJy, where='mid', color='k', lw='1.0',
label='PI')
#ax.text(0.05, 0.94, title, transform=ax.transAxes)
# Plot the CC spectrum
if not ccFDFArr_mJy is None:
ax.step(phiArr, ccFDFArr_mJy, where='mid', color='g', lw='0.5',
label='CC')
# Plot the Gaussian peak
if len(gaussParm)==3:
# [amp, mean, FWHM]
phiTrunkArr = np.where(phiArr>=gaussParm[1]-gaussParm[2]/3.0,
phiArr, np.nan)
phiTrunkArr = np.where(phiArr<=gaussParm[1]+gaussParm[2]/3.0,
phiTrunkArr, np.nan)
yGauss = gauss(gaussParm)(phiTrunkArr)
ax.plot(phiArr, yGauss, color='magenta',marker='None',mfc='w',
mec='g', ms=10, label='Peak Fit', lw=2.5, ls='-')
# Plot the clean cutoff line
if not cutoff_mJy is None:
ax.axhline(cutoff_mJy, color="r", ls='--')
# Scaling
ax.yaxis.set_major_locator(MaxNLocator(4))
ax.xaxis.set_major_locator(MaxNLocator(4))
xRange = np.nanmax(phiArr)-np.nanmin(phiArr)
ax.set_xlim( np.min(phiArr) - xRange*0.01,
np.max(phiArr) + xRange*0.01)
ax.set_ylabel('Flux Density (mJy)')
ax.set_xlabel('$\phi$ rad m$^{-2}$')
ax.axhline(0, color='grey')
# Format tweaks
ax = tweakAxFormat(ax)
ax.relim()
ax.autoscale_view()
#-----------------------------------------------------------------------------#
def plot_hist4_ax(ax, popLst, nBins=10, doXlog=False, doYlog=False, styIndx=0,
xMin=None, xMax=None, yMax=None, xLabel="", yLabel="",
title="", legLabLst=[], legLoc="tr", verbose=False):
# Format of the histogram lines and shading.
# Up to four histograms are supported and two alternative styles
edgeColourLst = [['black', 'red', 'blue', 'black'],
['grey','red', 'blue', 'black']]
fillColourLst = [['#dddddd', 'none', 'none', 'none'],
['none', 'none', 'none', 'none']]
hatchStyleLst = [['', '/', '\\', ''],
['/', '\\', '', '']]
histLinewidthLst = [[1.0,1.0,1.0,1.5],
[1.0,1.0,1.0,1.5]]
# Translate the legend location code
if legLoc not in ["tl", "tr", "bl", "br"]: legLoc = "tr"
locTab = {"tl": "upper left", "tr": "upper right",
"bl": "lower left", "br": "lower right"}
legLoc = locTab[legLoc]
# TODO: Remove extra columns in the recarrays
# Determine the max and min of the ensemble population
popEnsemble = np.concatenate(popLst).astype(np.float)
xMinData = float(np.nanmin(popEnsemble))
xMaxData = float(np.nanmax(popEnsemble))
# All valid data must have the same sign for log plots
if doXlog:
if not (xMinData<0) == (xMaxData<0):
print "\nErr: for log axis all data must have the same sign!"
return
sign = np.sign(popEnsemble)[0]
# Calculate the bin edges
if doXlog:
logBins = np.linspace(m.log10(abs(xMinData)), m.log10(abs(xMaxData)),
int(nBins+1))
b = np.power(10.0, logBins) * sign
else:
b = np.linspace(xMinData, xMaxData, int(nBins+1))
# Bin the data in each population
nLst = []
for p in popLst:
n, b = np.histogram(p.astype(np.float), bins = b)
n = np.array(n, dtype=np.float)
nLst.append(n)
# Print the binned values to the screen
if verbose:
print "\n#BIN, COUNTS ..."
binCentreArr = b[:-1]+np.diff(b)/2.0
for i in range(len(binCentreArr)):
print binCentreArr[i],
for j in range(len(nLst)):
print nLst[j][i],
print
# Set the Y-axis limits
nEnsemble = np.concatenate(nLst)
if doYlog:
yZeroPt = 0.8
yMin = yZeroPt
if yMax is None:
yMaxData = float(max(nEnsemble))
yFac = abs(yMaxData/yZeroPt)
yMax = yMaxData*(1+ m.log10(yFac)*0.3)
else:
yZeroPt = 0.0
yMin = yZeroPt
if yMax is None:
yMax = float(max(nEnsemble))*1.2
# Set the X-axis limits, incorporating a single padding bin
xFac = (len(b)-1)*0.05
if doXlog:
sign = np.sign(b)[0]
logBins = np.log10(b*sign)
logBinWidth = np.max(np.diff(logBins))
if xMin is None:
xMin = 10**(logBins[0] - logBinWidth*xFac)*sign
if xMax is None:
xMax = 10**(logBins[-1] + logBinWidth*xFac)*sign
else:
linBinWidth = np.max(np.diff(b))
if xMin is None:
xMin = b[0] - linBinWidth*xFac
if xMax is None:
xMax = b[-1] + linBinWidth*xFac
# Set the axis formatter for log scale axes
if doXlog:
ax.set_xscale('symlog')
majorFormatterX = FuncFormatter(label_format_exp(5.0))
ax.xaxis.set_major_formatter(majorFormatterX)
if doYlog:
ax.set_yscale('symlog')
majorFormatterY = FuncFormatter(label_format_exp(3.0))
ax.yaxis.set_major_formatter(majorFormatterY)
# Create individual histogram polygons. Manually creating histograms gives
# more control than inbuilt matplotlib function - when originally writing
# this code the fill styles did not work well.
for i in range(len(nLst)):
# Set the legend labels
try:
legLabel = legLabLst[i]
if legLabLst[i]=="":
raise Exception
except Exception:
legLabel = "Query %s" % (i+1)
# Create the histograms from line-segments
polyCoords = mk_hist_poly(b, nLst[i], doYlog, zeroPt=0.7)
hist = Polygon(polyCoords, closed=True, animated=False, linewidth=2.7,
label=legLabel)
hist.set_linewidth(histLinewidthLst[styIndx][i])
hist.set_edgecolor(edgeColourLst[styIndx][i])
hist.set_facecolor(fillColourLst[styIndx][i])
hist.set_hatch(hatchStyleLst[styIndx][i])
ax.add_patch(hist)
# Set the X axis limits
ax.set_xlim(xMin, xMax)
ax.set_ylim(yMin, yMax)
# Draw the labels on the plot
ax.set_xlabel(xLabel)
ax.set_ylabel(yLabel)
ax.set_title(title, size=14)
# Format tweaks
tweakAxFormat(ax, showLeg=True, loc=legLoc)
#-----------------------------------------------------------------------------#
def plot_scatter4_ax(ax, popLst, doXlog=False, doYlog=False, zPower=1.0,
styIndx=0, xMin=None, xMax=None, yMin=None,
yMax=None, zMin=None, zMax=None, xLabel="", yLabel="",
zLabel="", title="", legLabLst=[], showCbar=False,
show11Line=False, legLoc="tr", verbose=False):
# Format of the scatter points and shading.
# Up to four populations are supported and two alternative styles
edgeColourLst = [['black', 'black', 'black', 'black'],
['black','red', 'green', 'blue']]
fillColourLst = [['black', 'red', 'green', 'blue'],
['none', 'none', 'none', 'none']]
symbolLst = [['o', 's', 'd', '^'],
['o', '+', 's', 'd']]
symbolSize = [[45, 30, 30, 30],
[45, 80, 30, 30]]
# Translate the legend location code
if legLoc not in ["tl", "tr", "bl", "br"]: legLoc = "tr"
locTab = {"tl": "upper left", "tr": "upper right",
"bl": "lower left", "br": "lower right"}
legLoc = locTab[legLoc]
# Separate out the X Y and Z data
xLst = []
yLst = []
zLst = []
for i in range(len(popLst)):
colNames = popLst[i].dtype.names
nCols = len(colNames)
xLst.append(popLst[i][colNames[0]])
yLst.append(popLst[i][colNames[1]])
if nCols>2:
yLst.append(popLst[i][colNames[2]])
# Determine the max and min of the ensemble population
xEnsemble = np.concatenate(xLst).astype(np.float)
signX = np.sign(xEnsemble)[0]
xMinData = float(np.nanmin(xEnsemble))
xMaxData = float(np.nanmax(xEnsemble))
yEnsemble = np.concatenate(yLst).astype(np.float)
signY = np.sign(yEnsemble)[0]
yMinData = float(np.nanmin(yEnsemble))
yMaxData = float(np.nanmax(yEnsemble))
if not zLst==[]:
zEnsemble = np.concatenate(zLst).astype(np.float)
signZ = np.sign(zEnsemble)[0]
zMinData = float(np.nanmin(zEnsemble))
zMaxData = float(np.nanmax(zEnsemble))
# All valid data must have the same sign for log plots
if doXlog:
if not (xMinData<0) == (xMaxData<0):
print "\nErr: for log X-axis all data must have the same sign!"
sys.exit()
if doYlog:
if not (yMinData<0) == (yMaxData<0):
print "\nErr: for log Y-axis all data must have the same sign!"
sys.exit()
if zLst is not None and zPower!=1.0:
if not (zMinData<0) == (zMaxData<0):
print "\nErr: for log Z-axis all data must have the same sign!"
sys.exit()
# Set the plotting ranges (& colour limits)
if doXlog:
xFac = abs(xMaxData/xMinData)
if xMin is None:
xMin = xMinData/(1+ m.log10(xFac)*0.1)
if xMax is None:
xMax = xMaxData*(1+ m.log10(xFac)*0.1)
else:
xPad = abs(xMaxData - xMinData) * 0.04
if xMin is None:
xMin = xMinData - xPad
if xMax is None:
xMax = xMaxData + xPad
if doYlog:
yFac = abs(yMaxData/yMinData)
if yMin is None:
yMin = yMinData/(1+ m.log10(yFac)*0.1)
if yMax is None:
yMax = yMaxData*(1+ m.log10(yFac)*0.1)
else:
yPad = abs(yMaxData - yMinData) * 0.05
if yMin is None:
yMin = yMinData - yPad
if yMax is None:
yMax = yMaxData + yPad
# Set the z-colour range
if not zLst==[]:
if not np.all(np.isnan(zEnsemble)):
if zMin is None:
zMin = zMinData
if zMax is None:
zMax = zMaxData
# Set the axis formatter for log scale axes
if doXlog:
ax.set_xscale('log')
majorFormatterX = FuncFormatter(label_format_exp(5.0))
ax.xaxis.set_major_formatter(majorFormatterX)
if doYlog:
ax.set_yscale('log')
majorFormatterY = FuncFormatter(label_format_exp(3.0))
ax.yaxis.set_major_formatter(majorFormatterY)
norm = APLpyNormalize(stretch='power', exponent=zPower,
vmin=zMin, vmax=zMax)
# Plot each set of points in turn
sc3D = None
zMap = 'r'
for i in range(len(xLst)):
# Map the z axis to the colours
if not zLst==[]:
if np.all(np.isnan(zLst[i])):
zMap = fillColourLst[styIndx][i]
else:
zMap = zLst[i]
# Set the legend labels
try:
legLabel = legLabLst[i]
if legLabLst[i]=="":
raise Exception
except Exception:
legLabel = "Query %s" % (i+1)
# Add the points to the plot
sc = ax.scatter(xLst[i], yLst[i],
marker = symbolLst[styIndx][i],
s = symbolSize[styIndx][i],
c = zMap,
norm = norm,
vmin = zMin,
vmax = zMax,
linewidths = 0.5,
edgecolors=edgeColourLst[styIndx][i],
label=legLabel)
if not zLst==[]:
if not np.all(np.isnan(zLst[i])):
sc3D = sc
# Set the X axis limits
ax.set_xlim(xMin, xMax)
ax.set_ylim(yMin, yMax)
# Draw the labels on the plot
ax.set_xlabel(xLabel)
ax.set_ylabel(yLabel)
ax.set_title(title)
# Format tweaks
tweakAxFormat(ax, showLeg=True, loc=legLoc)
# Axis code above
#=============================================================================#
# Figure code below
#-----------------------------------------------------------------------------#
def plotSpecIPQU(dataMan, indx, io='fig'):
# Get the data
freqArr_Hz, IArr_Jy, rmsIArr_Jy= dataMan.get_specI_byindx(indx)
dummy, QArr_Jy, rmsQArr_Jy= dataMan.get_specQ_byindx(indx)
dummy, UArr_Jy, rmsUArr_Jy= dataMan.get_specU_byindx(indx)
# Get the models to overplot
freqHirArr_Hz, qModArr, uModArr = dataMan.get_thin_qumodel_byindx(indx,
oversample=True)
freqHirArr_Hz, IModArr_mJy = dataMan.get_modI_byindx(indx, oversample=True)
QmodArr = qModArr * IModArr_mJy
UmodArr = uModArr * IModArr_mJy
# Setup the figure
fig = Figure()
fig.set_size_inches([8,8])
# Plot I versus nu,
ax1 = fig.add_subplot(211)
plot_I_vs_nu_ax(ax=ax1,
freqArr_Hz = freqArr_Hz,
IArr_mJy = IArr_Jy*1e3,
dIArr_mJy = rmsIArr_Jy*1e3,
freqHirArr_Hz = freqHirArr_Hz,
IModArr_mJy = IModArr_mJy*1e3)
ax1.set_xlabel('')
[label.set_visible(False) for label in ax1.get_xticklabels()]
# Plot Stokes P, Q & U
ax2 = fig.add_subplot(212, sharex=ax1)
plot_PQU_vs_nu_ax(ax=ax2,
freqArr_Hz = freqArr_Hz,
QArr_mJy = QArr_Jy*1e3,
UArr_mJy = UArr_Jy*1e3,
dQArr_mJy = rmsQArr_Jy*1e3,
dUArr_mJy = rmsUArr_Jy*1e3,
freqHirArr_Hz=freqHirArr_Hz,
QmodArr=QmodArr*1e3,
UmodArr=UmodArr*1e3)
# Write to the pipe
if io=='string':
sio = StringIO.StringIO()
setattr(sio, "name", "foo.jpg")
fig.savefig(sio, format='jpg' )
return sio
else:
return fig
#-----------------------------------------------------------------------------#
def plotSpecRMS(dataMan, indx, io='fig'):
# Get the data
freqArr_Hz, IArr_Jy, rmsIArr_Jy= dataMan.get_specI_byindx(indx)
dummy, QArr_Jy, rmsQArr_Jy= dataMan.get_specQ_byindx(indx)
dummy, UArr_Jy, rmsUArr_Jy= dataMan.get_specU_byindx(indx)
# Setup the figure
fig = Figure()
fig.set_size_inches([8,8])
# Plot Stokes I, Q & U
ax1 = fig.add_subplot(111)
plot_rmsIQU_vs_nu_ax(ax=ax1,
freqArr_Hz = freqArr_Hz,
rmsIArr_mJy = rmsIArr_Jy*1e3,
rmsQArr_mJy = rmsQArr_Jy*1e3,
rmsUArr_mJy = rmsUArr_Jy*1e3)
# Write to the pipe
if io=='string':
sio = StringIO.StringIO()
setattr(sio, "name", "foo.jpg")
fig.savefig(sio, format='jpg' )
return sio
else:
return fig
#-----------------------------------------------------------------------------#
def plotPolang(dataMan, indx, io='fig'):
# Get the data
freqArr_Hz, IArr_Jy, rmsIArr_Jy= dataMan.get_specI_byindx(indx)
dummy, modIArr_Jy = dataMan.get_modI_byindx(indx)
dummy, QArr_Jy, rmsQArr_Jy= dataMan.get_specQ_byindx(indx)
dummy, UArr_Jy, rmsUArr_Jy= dataMan.get_specU_byindx(indx)
# Calculate fractional polarisation spectra
qArr = QArr_Jy / modIArr_Jy
uArr = UArr_Jy / modIArr_Jy
dqArr = qArr * np.sqrt( (rmsQArr_Jy/QArr_Jy)**2.0 +
(rmsIArr_Jy/IArr_Jy)**2.0 )
duArr = uArr * np.sqrt( (rmsUArr_Jy/UArr_Jy)**2.0 +
(rmsIArr_Jy/IArr_Jy)**2.0 )
lamSqArr_m2 = np.power(C/freqArr_Hz, 2.0)
# Get the models to overplot
freqHirArr_Hz, qModArr, uModArr = dataMan.get_thin_qumodel_byindx(indx,
oversample=True)
lamSqHirArr_m2 = np.power(C/freqHirArr_Hz, 2.0)
# Setup the figure
fig = Figure()
fig.set_size_inches([8,8])
# Plot psi versus lambda^2
ax1 = fig.add_subplot(111)
plot_psi_vs_lamsq_ax(ax=ax1,
lamSqArr_m2 = lamSqArr_m2,
qArr = qArr,
uArr = uArr,
dqArr = dqArr,
duArr = duArr,
lamSqHirArr_m2 = lamSqHirArr_m2,
qModArr = qModArr,
uModArr = uModArr,
axisYright = False)
# Write to the pipe
if io=='string':
sio = StringIO.StringIO()
setattr(sio, "name", "foo.jpg")
fig.savefig(sio, format='jpg' )
return sio
else:
return fig
#-----------------------------------------------------------------------------#
def plotFracPol(dataMan, indx, io='fig'):
# Get the data
freqArr_Hz, IArr_Jy, rmsIArr_Jy= dataMan.get_specI_byindx(indx)
dummy, modIArr_Jy = dataMan.get_modI_byindx(indx)
dummy, QArr_Jy, rmsQArr_Jy= dataMan.get_specQ_byindx(indx)
dummy, UArr_Jy, rmsUArr_Jy= dataMan.get_specU_byindx(indx)
# Calculate fractional polarisation spectra
qArr = QArr_Jy / modIArr_Jy
uArr = UArr_Jy / modIArr_Jy
dqArr = qArr * np.sqrt( (rmsQArr_Jy/QArr_Jy)**2.0 +
(rmsIArr_Jy/IArr_Jy)**2.0 )
duArr = uArr * np.sqrt( (rmsUArr_Jy/UArr_Jy)**2.0 +
(rmsIArr_Jy/IArr_Jy)**2.0 )
lamSqArr_m2 = np.power(C/freqArr_Hz, 2.0)
# Get the models to overplot
freqHirArr_Hz, qModArr, uModArr = dataMan.get_thin_qumodel_byindx(indx,
oversample=True)
lamSqHirArr_m2 = np.power(C/freqHirArr_Hz, 2.0)
# Setup the figure
fig = Figure()
fig.set_size_inches([8,8])
# Plot p, q, u versus lambda^2
ax2 = fig.add_subplot(111)
plot_pqu_vs_lamsq_ax(ax=ax2,
lamSqArr_m2 = lamSqArr_m2,
qArr = qArr,
uArr = uArr,
dqArr = dqArr,
duArr = duArr,
lamSqHirArr_m2 = lamSqHirArr_m2,
qModArr = qModArr,
uModArr = uModArr)
# Write to the pipe
if io=='string':
sio = StringIO.StringIO()
setattr(sio, "name", "foo.jpg")
fig.savefig(sio, format='jpg' )
return sio
else:
return fig
#-----------------------------------------------------------------------------#
def plotFracQvsU(dataMan, indx, io='fig'):
# Get the data
freqArr_Hz, IArr_Jy, rmsIArr_Jy= dataMan.get_specI_byindx(indx)
dummy, modIArr_Jy = dataMan.get_modI_byindx(indx)
dummy, QArr_Jy, rmsQArr_Jy= dataMan.get_specQ_byindx(indx)
dummy, UArr_Jy, rmsUArr_Jy= dataMan.get_specU_byindx(indx)
# Calculate fractional polarisation spectra
qArr = QArr_Jy / modIArr_Jy
uArr = UArr_Jy / modIArr_Jy
dqArr = qArr * np.sqrt( (rmsQArr_Jy/QArr_Jy)**2.0 +
(rmsIArr_Jy/IArr_Jy)**2.0 )
duArr = uArr * np.sqrt( (rmsUArr_Jy/UArr_Jy)**2.0 +
(rmsIArr_Jy/IArr_Jy)**2.0 )
lamSqArr_m2 = np.power(C/freqArr_Hz, 2.0)
# Get the models to overplot
freqHirArr_Hz, qModArr, uModArr = dataMan.get_thin_qumodel_byindx(indx,
oversample=True)
lamSqHirArr_m2 = np.power(C/freqHirArr_Hz, 2.0)
# Setup the figure
fig = Figure()
fig.set_size_inches([8,8])
# Plot U versus Q
ax1 = fig.add_subplot(111)
plot_q_vs_u_ax(ax=ax1,
lamSqArr_m2 = lamSqArr_m2,
qArr = qArr,
uArr = uArr,
dqArr = dqArr,
duArr = duArr,
lamSqHirArr_m2 = lamSqHirArr_m2,
qModArr = qModArr,
uModArr = uModArr,
axisYright = False)
# Write to the pipe
if io=='string':
sio = StringIO.StringIO()
setattr(sio, "name", "foo.jpg")
fig.savefig(sio, format='jpg' )
return sio
else:
return fig
#-----------------------------------------------------------------------------#
def plotPolsummary(dataMan, indx, io='fig'):
# Get the data
freqArr_Hz, IArr_Jy, rmsIArr_Jy= dataMan.get_specI_byindx(indx)
dummy, modIArr_Jy = dataMan.get_modI_byindx(indx)
dummy, QArr_Jy, rmsQArr_Jy= dataMan.get_specQ_byindx(indx)
dummy, UArr_Jy, rmsUArr_Jy= dataMan.get_specU_byindx(indx)
# Calculate fractional polarisation spectra
qArr = QArr_Jy / modIArr_Jy
uArr = UArr_Jy / modIArr_Jy
dqArr = qArr * np.sqrt( (rmsQArr_Jy/QArr_Jy)**2.0 +
(rmsIArr_Jy/IArr_Jy)**2.0 )
duArr = uArr * np.sqrt( (rmsUArr_Jy/UArr_Jy)**2.0 +
(rmsIArr_Jy/IArr_Jy)**2.0 )
lamSqArr_m2 = np.power(C/freqArr_Hz, 2.0)
# Get the models to overplot
freqHirArr_Hz, qModArr, uModArr = dataMan.get_thin_qumodel_byindx(indx,
oversample=True)
lamSqHirArr_m2 = np.power(C/freqHirArr_Hz, 2.0)
# Setup the figure
fig = Figure()
fig.set_size_inches([8,8])
# Plot I versus nu,
ax1 = fig.add_subplot(221)
plot_I_vs_nu_ax(ax=ax1,
freqArr_Hz = freqArr_Hz,
IArr_mJy = IArr_Jy*1e3,
dIArr_mJy = rmsIArr_Jy*1e3,
IModArr_mJy = modIArr_Jy*1e3,
axisXtop=True)
# Plot p, q, u versus lambda^2
ax2 = fig.add_subplot(223)
plot_pqu_vs_lamsq_ax(ax=ax2,
lamSqArr_m2 = lamSqArr_m2,
qArr = qArr,
uArr = uArr,
dqArr = dqArr,
duArr = duArr,
lamSqHirArr_m2 = lamSqHirArr_m2,
qModArr = qModArr,
uModArr = uModArr)
# Plot psi versus lambda^2
ax3 = fig.add_subplot(222)
plot_psi_vs_lamsq_ax(ax=ax3,
lamSqArr_m2 = lamSqArr_m2,
qArr = qArr,
uArr = uArr,
dqArr = dqArr,
duArr = duArr,
lamSqHirArr_m2 = lamSqHirArr_m2,
qModArr = qModArr,
uModArr = uModArr,
axisYright=True,
axisXtop=True)
# Plot U versus Q
ax4 = fig.add_subplot(224)
plot_q_vs_u_ax(ax=ax4,
lamSqArr_m2 = lamSqArr_m2,
qArr = qArr,
uArr = uArr,
dqArr = dqArr,
duArr = duArr,
lamSqHirArr_m2 = lamSqHirArr_m2,
qModArr = qModArr,
uModArr = uModArr,
axisYright = True)
# Write to the pipe
if io=='string':
sio = StringIO.StringIO()
setattr(sio, "name", "foo.jpg")
fig.savefig(sio, format='jpg' )
return sio
else:
return fig
#-----------------------------------------------------------------------------#
def plotRMSF(dataMan, indx, io='fig'):
# Get the data and Gaussian fit to RMSF
phiArr, RMSFArr = dataMan.get_RMSF_byindx(indx)
pDict = dataMan.get_RMSF_params_byindx(indx)
# Setup the figure
fig = Figure()
fig.set_size_inches([8,8])
# Plot the RMSF
ax1 = fig.add_subplot(111)
plot_RMSF_ax(ax=ax1,
phiArr = phiArr,
RMSFArr = RMSFArr,
fwhmRMSF=pDict["fwhmRMSF"])
# Write to the pipe
if io=='string':
sio = StringIO.StringIO()
setattr(sio, "name", "foo.jpg")
fig.savefig(sio, format='jpg' )
return sio
else:
return fig
#-----------------------------------------------------------------------------#
def plotDirtyFDF(dataMan, indx, io='fig'):
# Get the data
phiArr, FDFArr_Jy = dataMan.get_dirtyFDF_byindx(indx)
# Get the peak results
pDict = dataMan.get_FDF_peak_params_byindx(indx)
pDict1 = dataMan.get_RMSF_params_byindx(indx)
gaussParm=[pDict["ampPeakPIfit_Jybm"]*1e3,
pDict["phiPeakPIfit_rm2"],
pDict1["fwhmRMSF"]]
# Setup the figure
fig = Figure()
fig.set_size_inches([8,8])
# Plot the FDF
ax1 = fig.add_subplot(111)
plot_dirtyFDF_ax(ax=ax1,
phiArr = phiArr,
FDFArr_mJy = FDFArr_Jy*1e3,
gaussParm = gaussParm,
title="Dirty Faraday Dispersion Function")
# Write to the pipe
if io=='string':
sio = StringIO.StringIO()
setattr(sio, "name", "foo.jpg")
fig.savefig(sio, format='jpg' )
return sio
else:
return fig
#-----------------------------------------------------------------------------#
def plotCleanFDF(dataMan, indx, io='fig'):
# Get the data
phiArr, dirtyFDFArr_Jy = dataMan.get_dirtyFDF_byindx(indx)
dummy, cleanFDFArr_Jy = dataMan.get_cleanFDF_byindx(indx)
dummy, ccFDF_Jy = dataMan.get_ccFDF_byindx(indx)
# Get the peak results
pDict = dataMan.get_FDF_peak_params_byindx(indx, doClean=True)
pDict1 = dataMan.get_RMSF_params_byindx(indx)
gaussParm=[pDict["ampPeakPIfit_Jybm"]*1e3,
pDict["phiPeakPIfit_rm2"],
pDict1["fwhmRMSF"]]
# Setup the figure
fig = Figure()
fig.set_size_inches([8,8])
# Plot the clean FDF
ax1 = fig.add_subplot(111)
plot_cleanFDF_ax(ax=ax1,
phiArr = phiArr,
cleanFDFArr_mJy = cleanFDFArr_Jy*1e3,
ccFDFArr_mJy = ccFDF_Jy*1e3,
dirtyFDFArr_mJy = dirtyFDFArr_Jy*1e3,
gaussParm = gaussParm,
title = "Clean Faraday Dispersion Function",
cutoff_mJy = pDict["cleanCutoff_Jybm"]*1e3)
# Write to the pipe
if io=='string':
sio = StringIO.StringIO()
setattr(sio, "name", "foo.jpg")
fig.savefig(sio, format='jpg' )
return sio
else:
return fig
#-----------------------------------------------------------------------------#
def plotStampI(dataMan, indx, io='fig'):
# Get the data & header of the saved postage stamp
data, head = dataMan.get_stampI_byindx(indx)
# Setup the figure
fig = Figure()
fig.set_size_inches([8,8])
fig = plot_fits_map(data, head, fig=fig)
# Write to the pipe
if io=='string':
sio = StringIO.StringIO()
setattr(sio, "name", "foo.jpg")
fig.savefig(sio, format='jpg' )
return sio
else:
return fig
#-----------------------------------------------------------------------------#
def plotStampP(dataMan, indx, io='fig'):
# Get the data & header of the saved postage stamp
data, head = dataMan.get_stampP_byindx(indx)
# Setup the figure
fig = Figure()
fig.set_size_inches([8,8])
fig = plot_fits_map(data, head, fig=fig)
# Write to the pipe
if io=='string':
sio = StringIO.StringIO()
setattr(sio, "name", "foo.jpg")
fig.savefig(sio, format='jpg' )
return sio
else:
return fig
#-----------------------------------------------------------------------------#
def plotSctHstQuery(dataMan, plotParm, io='fig'):
# What type of plot are we creating?
plotType = plotParm.configDict.get("TYPE", "Histogram")
# Execute each query in turn and store results in list of recarrays
popLst = []
names = []
nCols = 0
for i in range(len(plotParm.queryLst)-1,-1,-1):
sql = plotParm.queryLst[i]
try:
resultArr = dataMan.query_database(sql)
colNames = resultArr.dtype.names
nCols = max(len(colNames), nCols)
popLst.append(resultArr)
except Exception:
popLst.append(None)
print "\nWarn: failed to execute query:"
print "'%s'\n" % sql
print traceback.format_exc(), "\n"
popLst.reverse()
popLst = popLst[:4]
# Filter data for limits given in the driving file (default None)
xMinDataCmd = plotParm.configDict.get("XDATAMIN", None)
xMaxDataCmd = plotParm.configDict.get("XDATAMAX", None)
xMinData = xfloat(xMinDataCmd, None)
xMaxData = xfloat(xMaxDataCmd, None)
yMinDataCmd = plotParm.configDict.get("YDATAMIN", None)
yMaxDataCmd = plotParm.configDict.get("YDATAMAX", None)
yMinData = xfloat(yMinDataCmd, None)
yMaxData = xfloat(yMaxDataCmd, None)
zMinDataCmd = plotParm.configDict.get("ZDATAMIN", None)
zMaxDataCmd = plotParm.configDict.get("ZDATAMAX", None)
zMinData = xfloat(zMinDataCmd, None)
zMaxData = xfloat(zMaxDataCmd, None)
for i in range(len(popLst)):
msk = filter_range_indx(popLst[i][colNames[0]], xMinData, xMaxData)
if plotType=="Scatter" and nCols>1:
msk += filter_range_indx(popLst[i][colNames[1]], yMinData, yMaxData)
if plotType=="Scatter" and nCols>2:
msk += filter_range_indx(popLst[i][colNames[2]], zMinData, zMaxData)
popLst[i] = popLst[i][~msk]
# Labels from driving file (default column name in DB)
if plotParm.configDict["XLABEL"]=="":
plotParm.configDict["XLABEL"] = colNames[0]
xLabel = plotParm.configDict.get("XLABEL", colNames[0])
if plotParm.configDict["YLABEL"]=="":
if plotType=="Scatter":
plotParm.configDict["YLABEL"] = colNames[1]
else:
plotParm.configDict["YLABEL"] = "Count"
yLabel = plotParm.configDict.get("YLABEL", "Count")
if plotParm.configDict["ZLABEL"]=="":
if plotType=="Scatter":
plotParm.configDict["ZLABEL"] = colNames[2]
zLabel = plotParm.configDict.get("ZLABEL", "")
plotTitle = plotParm.configDict.get("TITLE", "")
# Other driving parameters
nBins = xint(plotParm.configDict.get("NBINS", 10))
doXlog = xint(plotParm.configDict.get("DOLOGX", 0))
doYlog = xint(plotParm.configDict.get("DOLOGY", 0))
zPower = xfloat(plotParm.configDict.get("ZPOWER", 1.0))
# Setup the figure
fig = Figure()
fig.set_size_inches([8,8])
ax = fig.add_subplot(111)
# Bin the data and create the histogram
if plotType=="Histogram":
plot_hist4_ax(ax,
popLst = popLst,
nBins = nBins,
doXlog = doXlog,
doYlog = doYlog,
styIndx = 0,
xMin = None,
xMax = None,
yMax = None,
xLabel = xLabel,
yLabel = yLabel,
title = plotTitle,
legLabLst = plotParm.queryLabLst)
if plotType=="Scatter":
plot_scatter4_ax(ax,
popLst = popLst,
doXlog = doXlog,
doYlog = doYlog,
zPower = zPower,
styIndx = 0,
xMin = None,
xMax = None,
yMin = None,
yMax = None,
zMin = None,
zMax = None,
xLabel = xLabel,
yLabel = yLabel,
zLabel = zLabel,
title = plotTitle,
legLabLst = plotParm.queryLabLst,
showCbar = False,
show11Line = False,
legLoc = "tr",
verbose = False)
# Write to the pipe
if io=='string':
sio = StringIO.StringIO()
setattr(sio, "name", "foo.jpg")
fig.savefig(sio, format='jpg' )
return sio
else:
return fig
#-----------------------------------------------------------------------------#
def mk_hist_poly(bins, n, logScaleY=False, zeroPt=0.8, addZeroPt=True):
"""Create the line segments for the a polygon used to draw a histogram"""
if logScaleY is True:
for i in range(len(n)):
if n[i] <= 0.0:
n[i] = zeroPt
else:
zeroPt = 0.0
# Starting position
polyCoordLst = []
if addZeroPt:
polyCoordLst.append([bins[0],zeroPt])
# Form the line segments
i = 0
j = 0
while i <= len(bins)-1:
if j < len(n):
polyCoordLst.append([bins[i],n[j]])
if i == j:
i += 1
else:
j += 1
# Ground the polygon line and close
if addZeroPt:
polyCoordLst.append([bins[-1],zeroPt])
polyCoordLst.append([bins[0],zeroPt])
polyCoords = np.array(polyCoordLst)
return polyCoords
#-----------------------------------------------------------------------------#
def label_format_exp(switchExp=3.0):
"""Return a function to format labels for log axes. Switches to a power
format for log10(|number|) >= switchExp."""
def rfunc(num, pos=None):
absNum = 0.0
sign = ""
exponent = 0.0
if num!=0.0:
absNum = abs(num)
sign = "-" if int(num/absNum)<0 else ""
exponent = m.log10(absNum)
if abs(exponent)>=switchExp:
return r"$%s10^{%i}$" % (sign, m.log10(absNum))
else:
return r"$%s%g$" % (sign, absNum)
return rfunc
|
|
"""
Machine models for atmosphere.
"""
import json
import re
import os
from django.db import models
from django.db.models import Q, Model
from django.core.exceptions import ValidationError
from django.utils import timezone
from core.models.user import AtmosphereUser as User
from core.models.application import create_application, ApplicationThreshold
from core.models.license import License
from core.models.boot_script import BootScript
from core.models.machine import ProviderMachine
from core.models.node import NodeController
from core.models.provider import Provider
from core.models.identity import Identity
from core.models.application_version import ApplicationVersion
from atmosphere.settings import secrets
from threepio import logger
from core.models.abstract import BaseRequest
from core.exceptions import RequestLimitExceeded
from functools import reduce
UNRESOLVED_STATES = ["pending", "processing", "validated", "failed"]
class MachineRequest(BaseRequest):
"""
Storage container for the MachineRequestThread to start/restart the Queue
Provides a Parent-Child relationship between the new image and ancestor(s)
"""
# The instance to image.
instance = models.ForeignKey("Instance")
old_status = models.TextField(default="", null=True, blank=True)
# Machine imaging Metadata
parent_machine = models.ForeignKey(ProviderMachine,
related_name="ancestor_machine")
# Data for the new machine, version and app...
# Application specific:
new_application_name = models.CharField(max_length=256)
new_application_description = models.TextField(
default='Description Missing')
new_application_visibility = models.CharField(
max_length=256, default='private') # Choices:Public, Private, Select
access_list = models.TextField(
default='',
blank=True,
null=True) # DEPRECATED in API v2
# SPECIFIC to 'forked=False'
# Specific to ApplicationVersion && ProviderMachine
system_files = models.TextField(default='', blank=True, null=True)
installed_software = models.TextField(default='', blank=True, null=True)
exclude_files = models.TextField(default='', blank=True, null=True)
new_version_name = models.CharField(max_length=256, default='1.0')
new_version_change_log = models.TextField(default='Changelog Missing')
new_version_tags = models.TextField(
default='', blank=True, null=True) # Re-rename to new_application_tags
new_version_memory_min = models.IntegerField(default=0)
new_version_cpu_min = models.IntegerField(default=0)
new_version_allow_imaging = models.BooleanField(default=True)
new_version_forked = models.BooleanField(default=True)
new_version_licenses = models.ManyToManyField(License, blank=True)
new_version_scripts = models.ManyToManyField(BootScript, blank=True)
new_version_membership = models.ManyToManyField("Group", blank=True)
new_machine_provider = models.ForeignKey(Provider)
new_machine_owner = models.ForeignKey(User, related_name="new_image_owner")
# Filled in when completed.
# NOTE: ProviderMachine and 'new_machine' might be phased out
# along with 'new_machine_provider' as Versions become replicated
# across different clouds.
# However, it might be good to have the "Original machine"..
# similar to the 'created_by/created_by_identity' dilemma
new_machine = models.ForeignKey(ProviderMachine,
null=True, blank=True)
new_application_version = models.ForeignKey(ApplicationVersion,
null=True, blank=True)
def save(self, *args, **kwargs):
if not self.pk and self.is_active(self.instance):
raise RequestLimitExceeded(
"The number of open requests for "
"instance %s has been exceeded."
% self.instance.provider_alias)
Model.save(self, *args, **kwargs)
@classmethod
def is_active(cls, instance):
"""
"""
return cls.objects.filter(instance=instance,
status__name__in=UNRESOLVED_STATES).count() > 0
def clean(self):
"""
Clean up machine requests before saving initial objects to allow
users the chance to correct their mistakes.
"""
# 'Created application' specific logic that should fail:
if self.new_version_forked:
pass
# 'Updated Version' specific logic that should fail:
else:
if self.new_application_name:
raise ValidationError(
"Application name cannot be set unless a new application "
"is being created. Remove the Application name to update "
"-OR- fork the existing application")
# General Validation && AutoCompletion
if self.access_list:
self.new_version_membership = _match_membership_to_access(
self.access_list,
self.new_version_membership)
# Automatically set 'end date' when completed
# TODO: verify this should be 'old_status' or change it to a StatusType
if self.old_status == 'completed' and not self.end_date:
self.end_date = timezone.now()
def new_version_threshold(self):
return {'memory': self.new_version_memory_min,
'cpu': self.new_version_cpu_min}
def get_request_status(self):
return self.status.name
def get_app(self):
if self.new_machine:
return self.new_machine.application
# Return the parent application if the new machine has not been
# created.
return self.parent_machine.application
def get_version(self):
if self.new_machine:
return self.new_machine.application_version
return None
def update_threshold(self):
application_version = self.get_version()
existing_threshold = ApplicationThreshold.objects.filter(
application_version=application_version)
if existing_threshold:
threshold = existing_threshold[0]
else:
threshold = ApplicationThreshold(
application_version=application_version)
threshold.memory_min = self.new_version_memory_min
threshold.cpu_min = self.new_version_cpu_min
threshold.save()
return threshold
def has_threshold(self):
return self.new_version_memory_min > 0\
or self.new_version_cpu_min > 0
def migrate_access_to_membership_list(self, access_list):
for user in access_list:
# 'User' -> User -> Group -> Membership
user_qs = User.objects.filter(username=user)
if not user_qs.exists():
logger.warn("WARNING: User %s does not have a user object" % user)
continue
usergroup_qs = user_qs[0].group_set.filter(name=user)
if not usergroup_qs:
logger.warn("WARNING: User %s does not have a group object" % user)
continue
group = usergroup_qs[0]
self.new_version_membership.add(group)
def _get_meta_name(self):
"""
admin_<username>_<name_under_scored>_<mmddyyyy_hhmmss>
"""
meta_name = '%s_%s_%s_%s' %\
('admin', self.new_machine_owner.username,
self.new_application_name.replace(' ', '_').replace('/', '-'),
self.start_date.strftime('%m%d%Y_%H%M%S'))
return meta_name
def fix_metadata(self, im):
if not self.new_machine:
raise Exception(
"New machine missing from machine request. Cannot Fix.")
(orig_managerCls, orig_creds,
dest_managerCls, dest_creds) = self.prepare_manager()
im = dest_managerCls(**dest_creds)
old_mach_id = self.instance.source.identifier
new_mach_id = self.new_machine.identifier
old_mach = im.get_image(old_mach_id)
if not old_mach:
raise Exception("Could not find old machine.. Cannot Fix.")
new_mach = im.get_image(new_mach_id)
if not old_mach:
raise Exception("Could not find new machine.. Cannot Fix.")
properties = new_mach.properties
previous_kernel = old_mach.properties.get('kernel_id')
previous_ramdisk = old_mach.properties.get('ramdisk_id')
if not previous_kernel or previous_ramdisk:
raise Exception(
"Kernel/Ramdisk information MISSING "
"from previous machine. "
"Fix NOT required")
properties.update(
{'kernel_id': previous_kernel, 'ramdisk_id': previous_ramdisk})
im.update_image(new_mach, properties=properties)
def old_provider(self):
return self.instance.source.provider
def new_machine_id(self):
if self.new_machine:
return self.new_machine.identifier
else:
return None
def instance_alias(self):
return self.instance.provider_alias
def is_public(self):
return "public" in self.new_application_visibility.lower()
def get_access_list(self):
if '[' not in self.access_list:
json_loads_list = str(self.access_list.split(", "))
# New Format = "[u'test1', u'test2', u'test3']"
else:
json_loads_list = self.access_list
json_loads_list = json_loads_list.replace("'", '"').replace('u"', '"')
user_list = json.loads(json_loads_list)
return user_list
def parse_access_list(self):
user_list = re.split(', | |\n', self.access_list)
return user_list
def get_exclude_files(self):
exclude = re.split(", | |\n", self.exclude_files)
return exclude
def old_admin_identity(self):
old_provider = self.parent_machine.provider
old_admin = old_provider.get_admin_identity()
return old_admin
def new_admin_identity(self):
new_provider = self.new_machine_provider
new_admin = new_provider.get_admin_identity()
return new_admin
def active_provider(self):
active_provider = self.new_machine_provider
if not active_provider:
active_provider = self.parent_machine.provider
return active_provider
def get_credentials(self):
old_provider = self.parent_machine.provider
old_creds = old_provider.get_credentials()
old_admin = old_provider.get_admin_identity().get_credentials()
if 'ex_force_auth_version' not in old_creds:
old_creds['ex_force_auth_version'] = '2.0_password'
old_creds.update(old_admin)
new_provider = self.new_machine_provider
if old_provider.id == new_provider.id:
new_creds = old_creds.copy()
else:
new_creds = new_provider.get_credentials()
if 'ex_force_auth_version' not in new_creds:
new_creds['ex_force_auth_version'] = '2.0_password'
new_admin = new_provider.get_admin_identity().get_credentials()
new_creds.update(new_admin)
return (old_creds, new_creds)
def prepare_manager(self):
"""
Prepares, but does not initialize, manager(s)
This allows the manager and required credentials to be passed to celery
without causing serialization errors
"""
from chromogenic.drivers.openstack import \
ImageManager as OSImageManager
from chromogenic.drivers.eucalyptus import \
ImageManager as EucaImageManager
orig_provider = self.parent_machine.provider
dest_provider = self.new_machine_provider
orig_type = orig_provider.get_type_name().lower()
dest_type = dest_provider.get_type_name().lower()
origCls = destCls = None
if orig_type == 'eucalyptus':
origCls = EucaImageManager
elif orig_type == 'openstack':
origCls = OSImageManager
if dest_type == orig_type:
destCls = origCls
elif dest_type == 'eucalyptus':
destCls = EucaImageManager
elif dest_type == 'openstack':
destCls = OSImageManager
orig_creds, dest_creds = self.get_credentials()
orig_creds = origCls._build_image_creds(orig_creds)
dest_creds = destCls._build_image_creds(dest_creds)
return (origCls, orig_creds, destCls, dest_creds)
def _extract_file_location(self, download_dir):
id_owner = self.instance.created_by_identity
tenant_cred = id_owner.credential_set.filter(
key='ex_tenant_name')
if not tenant_cred:
tenant_cred = id_owner.credential_set.filter(
key='ex_project_name')
if not tenant_cred:
raise Exception("You should not be here! Update the key "
"used for openstack tenant names!")
tenant_cred = tenant_cred[0]
download_location = os.path.join(
download_dir, tenant_cred.value)
download_location = os.path.join(
download_location, '%s.qcow2' % self.new_application_name)
return download_location
def get_imaging_args(self, debug=False):
"""
Prepares the entire machine request for serialization to celery
"""
from chromogenic.drivers.openstack import \
ImageManager as OSImageManager
from chromogenic.drivers.eucalyptus import \
ImageManager as EucaImageManager
(orig_managerCls, orig_creds,
dest_managerCls, dest_creds) = self.prepare_manager()
download_dir = secrets.LOCAL_STORAGE
imaging_args = {
"visibility": self.new_application_visibility,
"instance_id": self.instance.provider_alias,
# NOTE: THERE IS AN ASSUMPTION MADE HERE!
# ASSUMPTION: the Creator's username == the LINUX username that was also created for them!
# FIXME if the ASSUMPTION above changes!
"created_by": self.instance.created_by.username,
"remove_image": True,
"remove_local_image": True,
"upload_image": True,
"image_name": self.new_application_name,
"timestamp": self.start_date,
"download_dir": download_dir
}
if debug:
# NOTE: use the `parent_image_id` value *OR* `instance_id` value
# If you are setting debug=True, you're calling from the REPL,
# and you must be responsible for deciding
# which of those two values you would like to .pop()
# You should use the 'instance_id' field if
# you need to snapshot the instance first.
# You should use the 'parent_image_id' field if
# you want to debug a glance image
# Usually, this will contain the new_machine.identifier,
# but possibly the instances boot-source will be required for debug
imaging_args['parent_image_id'] = self.new_machine.identifier if self.new_machine else self.instance.source.identifier
imaging_args['upload_image'] = False # Set to False to keep Snapshot or parent_image_id in glance
imaging_args['remove_image'] = False # Set to False to keep Snapshot or parent_image_id in glance
imaging_args['remove_local_image'] = False # Set to False to keep downloaded file
# NOTE: If you run with the debug setup above,
# the *only* operation that will be completed
# is the *download* the instance/image
# and then *clean* the file.
# Set to False to skip the 'clean' portion and only download the instance/image.
# imaging_args['clean_image'] = False
if issubclass(orig_managerCls, OSImageManager):
download_location = self._extract_file_location(download_dir)
imaging_args['download_location'] = download_location
elif issubclass(orig_managerCls, EucaImageManager):
euca_args = self._prepare_euca_args()
imaging_args.update(euca_args)
orig_provider = self.parent_machine.provider
dest_provider = self.new_machine_provider
orig_platform = orig_provider.get_platform_name().lower()
dest_platform = dest_provider.get_platform_name().lower()
if orig_platform != dest_platform:
if orig_platform == 'kvm' and dest_platform == 'xen':
imaging_args['kvm_to_xen'] = True
elif orig_platform == 'xen' and dest_platform == 'kvm':
imaging_args['xen_to_kvm'] = True
return imaging_args
def _prepare_euca_args(self):
meta_name = self._get_meta_name()
public_image = self.is_public()
# Splits the string by ", " OR " " OR "\n" to create the list
private_users = self.parse_access_list()
exclude = self.get_exclude_files()
# Create image on image manager
(orig_managerCls, orig_creds,
dest_managerCls, dest_creds) = self.prepare_manager()
node_scp_info = self.get_euca_node_info(orig_managerCls, orig_creds)
return {
"public": public_image,
"private_user_list": private_users,
"exclude": exclude,
"meta_name": meta_name,
"node_scp_info": node_scp_info,
}
def get_euca_node_info(self, euca_managerCls, euca_creds):
node_dict = {
'hostname': '',
'port': '',
'private_key': ''
}
instance_id = self.instance.provider_alias
# Prepare and use the manager
euca_manager = euca_managerCls(**euca_creds)
node_ip = euca_manager.get_instance_node(instance_id)
# Find the matching node
try:
core_node = NodeController.objects.get(alias=node_ip)
node_dict['hostname'] = core_node.hostname
node_dict['port'] = core_node.port
node_dict['private_key'] = core_node.private_ssh_key
except NodeController.DoesNotExist:
logger.error("Must create a nodecontroller for IP: %s" % node_ip)
# Return a dict containing information on how to SCP to the node
return node_dict
def __unicode__(self):
return '%s Instance: %s Name: %s Status: %s (%s)'\
% (self.new_machine_owner, self.instance.provider_alias,
self.new_application_name, self.old_status, self.status)
class Meta:
db_table = "machine_request"
app_label = "core"
def _match_membership_to_access(access_list, membership):
"""
INPUT: user1,user2, user3 + user4,user5
OUTPUT: <User: 1>, ..., <User: 5>
"""
# Circ.Dep. DO NOT MOVE UP!! -- Future Solve:Move into Group?
from core.models.group import Group
if not access_list:
return membership.all()
# If using access list, parse the list
# into queries and evaluate the filter ONCE.
names_wanted = access_list.split(',')
query_list = map(lambda name: Q(name__iexact=name), names_wanted)
query_list = reduce(lambda qry1, qry2: qry1 | qry2, query_list)
members = Group.objects.filter(query_list)
return members | membership.all()
def _create_new_application(machine_request, new_image_id, tags=[]):
new_provider = machine_request.new_machine_provider
user = machine_request.new_machine_owner
owner_ident = Identity.objects.get(created_by=user, provider=new_provider)
# This is a brand new app and a brand new providermachine
new_app = create_application(
new_provider.id,
new_image_id,
machine_request.new_application_name,
owner_ident,
# new_app.Private = False when machine_request.is_public = True
not machine_request.is_public(),
machine_request.new_machine_version,
machine_request.new_machine_description,
tags)
return new_app
def _update_parent_application(machine_request, new_image_id, tags=[]):
parent_app = machine_request.instance.source.providermachine.application
return _update_application(parent_app, machine_request, tags=tags)
def _update_application(application, machine_request, tags=[]):
if application.name is not machine_request.new_application_name:
application.name = machine_request.new_application_name
if machine_request.new_machine_description:
application.description = machine_request.new_machine_description
application.private = not machine_request.is_public()
application.tags = tags
application.save()
return application
def _update_existing_machine(machine_request, application, provider_machine):
new_provider = machine_request.new_machine_provider
user = machine_request.new_machine_owner
owner_ident = Identity.objects.get(created_by=user, provider=new_provider)
provider_machine.application = application
provider_machine.version = machine_request.new_machine_version
provider_machine.created_by = user
provider_machine.created_by_identity = owner_ident
provider_machine.save()
|
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import datetime
import time
import copy
import argparse
import json
import ast
import base64
from functools import wraps
from decimal import Decimal
from .import util
from .util import print_msg, format_satoshis, print_stderr
from .import bitcoin
from .bitcoin import is_address, hash_160, COIN, TYPE_ADDRESS
from .transaction import Transaction
from .import paymentrequest
from .paymentrequest import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
from .import contacts
from .plugins import run_hook
known_commands = {}
def satoshis(amount):
# satoshi conversion must not be performed by the parser
return int(COIN*Decimal(amount)) if amount not in ['!', None] else amount
class Command:
def __init__(self, func, s):
self.name = func.__name__
self.requires_network = 'n' in s
self.requires_wallet = 'w' in s
self.requires_password = 'p' in s
self.description = func.__doc__
self.help = self.description.split('.')[0] if self.description else None
varnames = func.__code__.co_varnames[1:func.__code__.co_argcount]
self.defaults = func.__defaults__
if self.defaults:
n = len(self.defaults)
self.params = list(varnames[:-n])
self.options = list(varnames[-n:])
else:
self.params = list(varnames)
self.options = []
self.defaults = []
def command(s):
def decorator(func):
global known_commands
name = func.__name__
known_commands[name] = Command(func, s)
@wraps(func)
def func_wrapper(*args, **kwargs):
c = known_commands[func.__name__]
wallet = args[0].wallet
password = kwargs.get('password')
if c.requires_wallet and wallet is None:
raise BaseException("wallet not loaded. Use 'electrum daemon load_wallet'")
if c.requires_password and password is None and wallet.storage.get('use_encryption'):
return {'error': 'Password required' }
return func(*args, **kwargs)
return func_wrapper
return decorator
class Commands:
def __init__(self, config, wallet, network, callback = None):
self.config = config
self.wallet = wallet
self.network = network
self._callback = callback
def _run(self, method, args, password_getter):
# this wrapper is called from the python console
cmd = known_commands[method]
if cmd.requires_password and self.wallet.has_password():
password = password_getter()
if password is None:
return
else:
password = None
f = getattr(self, method)
if cmd.requires_password:
result = f(*args, **{'password':password})
else:
result = f(*args)
if self._callback:
self._callback()
return result
@command('')
def commands(self):
"""List of commands"""
return ' '.join(sorted(known_commands.keys()))
@command('')
def create(self):
"""Create a new wallet"""
raise BaseException('Not a JSON-RPC command')
@command('wn')
def restore(self, text):
"""Restore a wallet from text. Text can be a seed phrase, a master
public key, a master private key, a list of bitcoin addresses
or bitcoin private keys. If you want to be prompted for your
seed, type '?' or ':' (concealed) """
raise BaseException('Not a JSON-RPC command')
@command('wp')
def password(self, password=None, new_password=None):
"""Change wallet password. """
self.wallet.update_password(password, new_password)
self.wallet.storage.write()
return {'password':self.wallet.has_password()}
@command('')
def getconfig(self, key):
"""Return a configuration variable. """
return self.config.get(key)
@command('')
def setconfig(self, key, value):
"""Set a configuration variable. 'value' may be a string or a Python expression."""
try:
value = ast.literal_eval(value)
except:
pass
self.config.set_key(key, value)
return True
@command('')
def make_seed(self, nbits=132, entropy=1, language=None, segwit=False):
"""Create a seed"""
from .mnemonic import Mnemonic
t = 'segwit' if segwit else 'standard'
s = Mnemonic(language).make_seed(t, nbits, custom_entropy=entropy)
return s
@command('')
def check_seed(self, seed, entropy=1, language=None):
"""Check that a seed was generated with given entropy"""
from mnemonic import Mnemonic
return Mnemonic(language).check_seed(seed, entropy)
@command('n')
def getaddresshistory(self, address):
"""Return the transaction history of any address. Note: This is a
walletless server query, results are not checked by SPV.
"""
return self.network.synchronous_get(('blockchain.address.get_history', [address]))
@command('w')
def listunspent(self):
"""List unspent outputs. Returns the list of unspent transaction
outputs in your wallet."""
l = copy.deepcopy(self.wallet.get_utxos(exclude_frozen=False))
for i in l:
v = i["value"]
i["value"] = float(v)/COIN if v is not None else None
return l
@command('n')
def getaddressunspent(self, address):
"""Returns the UTXO list of any address. Note: This
is a walletless server query, results are not checked by SPV.
"""
return self.network.synchronous_get(('blockchain.address.listunspent', [address]))
@command('')
def serialize(self, jsontx):
"""Create a transaction from json inputs.
Inputs must have a redeemPubkey.
Outputs must be a list of {'address':address, 'value':satoshi_amount}.
"""
keypairs = {}
inputs = jsontx.get('inputs')
outputs = jsontx.get('outputs')
locktime = jsontx.get('locktime', 0)
for txin in inputs:
if txin.get('output'):
prevout_hash, prevout_n = txin['output'].split(':')
txin['prevout_n'] = int(prevout_n)
txin['prevout_hash'] = prevout_hash
sec = txin.get('privkey')
if sec:
txin_type, privkey, compressed = bitcoin.deserialize_privkey(sec)
pubkey = bitcoin.public_key_from_private_key(privkey, compressed)
keypairs[pubkey] = privkey, compressed
txin['type'] = txin_type
txin['x_pubkeys'] = [pubkey]
txin['signatures'] = [None]
txin['num_sig'] = 1
outputs = [(TYPE_ADDRESS, x['address'], int(x['value'])) for x in outputs]
tx = Transaction.from_io(inputs, outputs, locktime=locktime)
tx.sign(keypairs)
return tx.as_dict()
@command('wp')
def signtransaction(self, tx, privkey=None, password=None):
"""Sign a transaction. The wallet keys will be used unless a private key is provided."""
tx = Transaction(tx)
if privkey:
txin_type, privkey2, compressed = bitcoin.deserialize_privkey(privkey)
pubkey = bitcoin.public_key_from_private_key(privkey2, compressed)
h160 = bitcoin.hash_160(bfh(pubkey))
x_pubkey = 'fd' + bh2u(b'\x00' + h160)
tx.sign({x_pubkey:(privkey2, compressed)})
else:
self.wallet.sign_transaction(tx, password)
return tx.as_dict()
@command('')
def deserialize(self, tx):
"""Deserialize a serialized transaction"""
tx = Transaction(tx)
return tx.deserialize()
@command('n')
def broadcast(self, tx, timeout=30):
"""Broadcast a transaction to the network. """
tx = Transaction(tx)
return self.network.broadcast(tx, timeout)
@command('')
def createmultisig(self, num, pubkeys):
"""Create multisig address"""
assert isinstance(pubkeys, list), (type(num), type(pubkeys))
redeem_script = Transaction.multisig_script(pubkeys, num)
address = bitcoin.hash160_to_p2sh(hash_160(bfh(redeem_script)))
return {'address':address, 'redeemScript':redeem_script}
@command('w')
def freeze(self, address):
"""Freeze address. Freeze the funds at one of your wallet\'s addresses"""
return self.wallet.set_frozen_state([address], True)
@command('w')
def unfreeze(self, address):
"""Unfreeze address. Unfreeze the funds at one of your wallet\'s address"""
return self.wallet.set_frozen_state([address], False)
@command('wp')
def getprivatekeys(self, address, password=None):
"""Get private keys of addresses. You may pass a single wallet address, or a list of wallet addresses."""
if is_address(address):
return self.wallet.export_private_key(address, password)[0]
domain = address
return [self.wallet.export_private_key(address, password)[0] for address in domain]
@command('w')
def ismine(self, address):
"""Check if address is in wallet. Return true if and only address is in wallet"""
return self.wallet.is_mine(address)
@command('')
def dumpprivkeys(self):
"""Deprecated."""
return "This command is deprecated. Use a pipe instead: 'electrum listaddresses | electrum getprivatekeys - '"
@command('')
def validateaddress(self, address):
"""Check that an address is valid. """
return is_address(address)
@command('w')
def getpubkeys(self, address):
"""Return the public keys for a wallet address. """
return self.wallet.get_public_keys(address)
@command('w')
def getbalance(self):
"""Return the balance of your wallet. """
c, u, x = self.wallet.get_balance()
out = {"confirmed": str(Decimal(c)/COIN)}
if u:
out["unconfirmed"] = str(Decimal(u)/COIN)
if x:
out["unmatured"] = str(Decimal(x)/COIN)
return out
@command('n')
def getaddressbalance(self, address):
"""Return the balance of any address. Note: This is a walletless
server query, results are not checked by SPV.
"""
out = self.network.synchronous_get(('blockchain.address.get_balance', [address]))
out["confirmed"] = str(Decimal(out["confirmed"])/COIN)
out["unconfirmed"] = str(Decimal(out["unconfirmed"])/COIN)
return out
@command('n')
def getproof(self, address):
"""Get Merkle branch of an address in the UTXO set"""
p = self.network.synchronous_get(('blockchain.address.get_proof', [address]))
out = []
for i,s in p:
out.append(i)
return out
@command('n')
def getmerkle(self, txid, height):
"""Get Merkle branch of a transaction included in a block. Electrum
uses this to verify transactions (Simple Payment Verification)."""
return self.network.synchronous_get(('blockchain.transaction.get_merkle', [txid, int(height)]))
@command('n')
def getservers(self):
"""Return the list of available servers"""
return self.network.get_servers()
@command('')
def version(self):
"""Return the version of electrum."""
from .version import ELECTRUM_VERSION
return ELECTRUM_VERSION
@command('w')
def getmpk(self):
"""Get master public key. Return your wallet\'s master public key"""
return self.wallet.get_master_public_key()
@command('wp')
def getmasterprivate(self, password=None):
"""Get master private key. Return your wallet\'s master private key"""
return str(self.wallet.keystore.get_master_private_key(password))
@command('wp')
def getseed(self, password=None):
"""Get seed phrase. Print the generation seed of your wallet."""
s = self.wallet.get_seed(password)
return s
@command('wp')
def importprivkey(self, privkey, password=None):
"""Import a private key."""
if not self.wallet.can_import_privkey():
return "Error: This type of wallet cannot import private keys. Try to create a new wallet with that key."
try:
addr = self.wallet.import_private_key(privkey, password)
out = "Keypair imported: " + addr
except BaseException as e:
out = "Error: " + str(e)
return out
def _resolver(self, x):
if x is None:
return None
out = self.wallet.contacts.resolve(x)
if out.get('type') == 'openalias' and self.nocheck is False and out.get('validated') is False:
raise BaseException('cannot verify alias', x)
return out['address']
@command('nw')
def sweep(self, privkey, destination, fee=None, nocheck=False, imax=100):
"""Sweep private keys. Returns a transaction that spends UTXOs from
privkey to a destination address. The transaction is not
broadcasted."""
tx_fee = satoshis(fee)
privkeys = privkey.split()
self.nocheck = nocheck
dest = self._resolver(destination)
tx = self.wallet.sweep(privkeys, self.network, self.config, dest, tx_fee, imax)
return tx.as_dict() if tx else None
@command('wp')
def signmessage(self, address, message, password=None):
"""Sign a message with a key. Use quotes if your message contains
whitespaces"""
sig = self.wallet.sign_message(address, message, password)
return base64.b64encode(sig)
@command('')
def verifymessage(self, address, signature, message):
"""Verify a signature."""
sig = base64.b64decode(signature)
return bitcoin.verify_message(address, sig, message)
def _mktx(self, outputs, fee, change_addr, domain, nocheck, unsigned, rbf, password, locktime=None):
self.nocheck = nocheck
change_addr = self._resolver(change_addr)
domain = None if domain is None else map(self._resolver, domain)
final_outputs = []
for address, amount in outputs:
address = self._resolver(address)
amount = satoshis(amount)
final_outputs.append((TYPE_ADDRESS, address, amount))
coins = self.wallet.get_spendable_coins(domain, self.config)
tx = self.wallet.make_unsigned_transaction(coins, final_outputs, self.config, fee, change_addr)
if locktime != None:
tx.locktime = locktime
if rbf:
tx.set_rbf(True)
if not unsigned:
run_hook('sign_tx', self.wallet, tx)
self.wallet.sign_transaction(tx, password)
return tx
@command('wp')
def payto(self, destination, amount, fee=None, from_addr=None, change_addr=None, nocheck=False, unsigned=False, rbf=False, password=None, locktime=None):
"""Create a transaction. """
tx_fee = satoshis(fee)
domain = from_addr.split(',') if from_addr else None
tx = self._mktx([(destination, amount)], tx_fee, change_addr, domain, nocheck, unsigned, rbf, password, locktime)
return tx.as_dict()
@command('wp')
def paytomany(self, outputs, fee=None, from_addr=None, change_addr=None, nocheck=False, unsigned=False, rbf=False, password=None, locktime=None):
"""Create a multi-output transaction. """
tx_fee = satoshis(fee)
domain = from_addr.split(',') if from_addr else None
tx = self._mktx(outputs, tx_fee, change_addr, domain, nocheck, unsigned, rbf, password, locktime)
return tx.as_dict()
@command('w')
def history(self):
"""Wallet history. Returns the transaction history of your wallet."""
balance = 0
out = []
for item in self.wallet.get_history():
tx_hash, height, conf, timestamp, value, balance = item
if timestamp:
date = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
else:
date = "----"
label = self.wallet.get_label(tx_hash)
tx = self.wallet.transactions.get(tx_hash)
tx.deserialize()
input_addresses = []
output_addresses = []
for x in tx.inputs():
if x['type'] == 'coinbase': continue
addr = x.get('address')
if addr == None: continue
if addr == "(pubkey)":
prevout_hash = x.get('prevout_hash')
prevout_n = x.get('prevout_n')
_addr = self.wallet.find_pay_to_pubkey_address(prevout_hash, prevout_n)
if _addr:
addr = _addr
input_addresses.append(addr)
for addr, v in tx.get_outputs():
output_addresses.append(addr)
out.append({
'txid': tx_hash,
'timestamp': timestamp,
'date': date,
'input_addresses': input_addresses,
'output_addresses': output_addresses,
'label': label,
'value': float(value)/COIN if value is not None else None,
'height': height,
'confirmations': conf
})
return out
@command('w')
def setlabel(self, key, label):
"""Assign a label to an item. Item may be a bitcoin address or a
transaction ID"""
self.wallet.set_label(key, label)
@command('w')
def listcontacts(self):
"""Show your list of contacts"""
return self.wallet.contacts
@command('w')
def getalias(self, key):
"""Retrieve alias. Lookup in your list of contacts, and for an OpenAlias DNS record."""
return self.wallet.contacts.resolve(key)
@command('w')
def searchcontacts(self, query):
"""Search through contacts, return matching entries. """
results = {}
for key, value in self.wallet.contacts.items():
if query.lower() in key.lower():
results[key] = value
return results
@command('w')
def listaddresses(self, receiving=False, change=False, labels=False, frozen=False, unused=False, funded=False, balance=False):
"""List wallet addresses. Returns the list of all addresses in your wallet. Use optional arguments to filter the results."""
out = []
for addr in self.wallet.get_addresses():
if frozen and not self.wallet.is_frozen(addr):
continue
if receiving and self.wallet.is_change(addr):
continue
if change and not self.wallet.is_change(addr):
continue
if unused and self.wallet.is_used(addr):
continue
if funded and self.wallet.is_empty(addr):
continue
item = addr
if labels or balance:
item = (item,)
if balance:
item += (format_satoshis(sum(self.wallet.get_addr_balance(addr))),)
if labels:
item += (repr(self.wallet.labels.get(addr, '')),)
out.append(item)
return out
@command('n')
def gettransaction(self, txid):
"""Retrieve a transaction. """
if self.wallet and txid in self.wallet.transactions:
tx = self.wallet.transactions[txid]
else:
raw = self.network.synchronous_get(('blockchain.transaction.get', [txid]))
if raw:
tx = Transaction(raw)
else:
raise BaseException("Unknown transaction")
return tx.as_dict()
@command('')
def encrypt(self, pubkey, message):
"""Encrypt a message with a public key. Use quotes if the message contains whitespaces."""
return bitcoin.encrypt_message(message, pubkey)
@command('wp')
def decrypt(self, pubkey, encrypted, password=None):
"""Decrypt a message encrypted with a public key."""
return self.wallet.decrypt_message(pubkey, encrypted, password)
def _format_request(self, out):
pr_str = {
PR_UNKNOWN: 'Unknown',
PR_UNPAID: 'Pending',
PR_PAID: 'Paid',
PR_EXPIRED: 'Expired',
}
out['amount (BTC)'] = format_satoshis(out.get('amount'))
out['status'] = pr_str[out.get('status', PR_UNKNOWN)]
return out
@command('w')
def getrequest(self, key):
"""Return a payment request"""
r = self.wallet.get_payment_request(key, self.config)
if not r:
raise BaseException("Request not found")
return self._format_request(r)
#@command('w')
#def ackrequest(self, serialized):
# """<Not implemented>"""
# pass
@command('w')
def listrequests(self, pending=False, expired=False, paid=False):
"""List the payment requests you made."""
out = self.wallet.get_sorted_requests(self.config)
if pending:
f = PR_UNPAID
elif expired:
f = PR_EXPIRED
elif paid:
f = PR_PAID
else:
f = None
if f is not None:
out = list(filter(lambda x: x.get('status')==f, out))
return list(map(self._format_request, out))
@command('w')
def createnewaddress(self):
"""Create a new receiving address, beyond the gap limit of the wallet"""
return self.wallet.create_new_address(False)
@command('w')
def getunusedaddress(self):
"""Returns the first unused address of the wallet, or None if all addresses are used.
An address is considered as used if it has received a transaction, or if it is used in a payment request."""
return self.wallet.get_unused_address()
@command('w')
def addrequest(self, amount, memo='', expiration=None, force=False):
"""Create a payment request, using the first unused address of the wallet.
The address will be condidered as used after this operation.
If no payment is received, the address will be considered as unused if the payment request is deleted from the wallet."""
addr = self.wallet.get_unused_address()
if addr is None:
if force:
addr = self.wallet.create_new_address(False)
else:
return False
amount = satoshis(amount)
expiration = int(expiration) if expiration else None
req = self.wallet.make_payment_request(addr, amount, memo, expiration)
self.wallet.add_payment_request(req, self.config)
out = self.wallet.get_payment_request(addr, self.config)
return self._format_request(out)
@command('wp')
def signrequest(self, address, password=None):
"Sign payment request with an OpenAlias"
alias = self.config.get('alias')
if not alias:
raise BaseException('No alias in your configuration')
alias_addr = self.wallet.contacts.resolve(alias)['address']
self.wallet.sign_payment_request(address, alias, alias_addr, password)
@command('w')
def rmrequest(self, address):
"""Remove a payment request"""
return self.wallet.remove_payment_request(address, self.config)
@command('w')
def clearrequests(self):
"""Remove all payment requests"""
for k in self.wallet.receive_requests.keys():
self.wallet.remove_payment_request(k, self.config)
@command('n')
def notify(self, address, URL):
"""Watch an address. Everytime the address changes, a http POST is sent to the URL."""
def callback(x):
import urllib2
headers = {'content-type':'application/json'}
data = {'address':address, 'status':x.get('result')}
try:
req = urllib2.Request(URL, json.dumps(data), headers)
response_stream = urllib2.urlopen(req, timeout=5)
util.print_error('Got Response for %s' % address)
except BaseException as e:
util.print_error(str(e))
self.network.send([('blockchain.address.subscribe', [address])], callback)
return True
@command('wn')
def is_synchronized(self):
""" return wallet synchronization status """
return self.wallet.is_up_to_date()
@command('')
def help(self):
# for the python console
return sorted(known_commands.keys())
param_descriptions = {
'privkey': 'Private key. Type \'?\' to get a prompt.',
'destination': 'Bitcoin address, contact or alias',
'address': 'Bitcoin address',
'seed': 'Seed phrase',
'txid': 'Transaction ID',
'pos': 'Position',
'height': 'Block height',
'tx': 'Serialized transaction (hexadecimal)',
'key': 'Variable name',
'pubkey': 'Public key',
'message': 'Clear text message. Use quotes if it contains spaces.',
'encrypted': 'Encrypted message',
'amount': 'Amount to be sent (in BTC). Type \'!\' to send the maximum available.',
'requested_amount': 'Requested amount (in BTC).',
'outputs': 'list of ["address", amount]',
'redeem_script': 'redeem script (hexadecimal)',
}
command_options = {
'password': ("-W", "Password"),
'new_password':(None, "New Password"),
'receiving': (None, "Show only receiving addresses"),
'change': (None, "Show only change addresses"),
'frozen': (None, "Show only frozen addresses"),
'unused': (None, "Show only unused addresses"),
'funded': (None, "Show only funded addresses"),
'balance': ("-b", "Show the balances of listed addresses"),
'labels': ("-l", "Show the labels of listed addresses"),
'nocheck': (None, "Do not verify aliases"),
'imax': (None, "Maximum number of inputs"),
'fee': ("-f", "Transaction fee (in BTC)"),
'from_addr': ("-F", "Source address. If it isn't in the wallet, it will ask for the private key unless supplied in the format public_key:private_key. It's not saved in the wallet."),
'change_addr': ("-c", "Change address. Default is a spare address, or the source address if it's not in the wallet"),
'nbits': (None, "Number of bits of entropy"),
'entropy': (None, "Custom entropy"),
'segwit': (None, "Create segwit seed"),
'language': ("-L", "Default language for wordlist"),
'privkey': (None, "Private key. Set to '?' to get a prompt."),
'unsigned': ("-u", "Do not sign transaction"),
'rbf': (None, "Replace-by-fee transaction"),
'locktime': (None, "Set locktime block number"),
'domain': ("-D", "List of addresses"),
'memo': ("-m", "Description of the request"),
'expiration': (None, "Time in seconds"),
'timeout': (None, "Timeout in seconds"),
'force': (None, "Create new address beyond gap limit, if no more addresses are available."),
'pending': (None, "Show only pending requests."),
'expired': (None, "Show only expired requests."),
'paid': (None, "Show only paid requests."),
}
# don't use floats because of rounding errors
from .transaction import tx_from_str
json_loads = lambda x: json.loads(x, parse_float=lambda x: str(Decimal(x)))
arg_types = {
'num': int,
'nbits': int,
'imax': int,
'entropy': int,
'tx': tx_from_str,
'pubkeys': json_loads,
'jsontx': json_loads,
'inputs': json_loads,
'outputs': json_loads,
'fee': lambda x: str(Decimal(x)) if x is not None else None,
'amount': lambda x: str(Decimal(x)) if x != '!' else '!',
'locktime': int,
}
config_variables = {
'addrequest': {
'requests_dir': 'directory where a bip70 file will be written.',
'ssl_privkey': 'Path to your SSL private key, needed to sign the request.',
'ssl_chain': 'Chain of SSL certificates, needed for signed requests. Put your certificate at the top and the root CA at the end',
'url_rewrite': 'Parameters passed to str.replace(), in order to create the r= part of bitcoin: URIs. Example: \"(\'file:///var/www/\',\'https://electrum.org/\')\"',
},
'listrequests':{
'url_rewrite': 'Parameters passed to str.replace(), in order to create the r= part of bitcoin: URIs. Example: \"(\'file:///var/www/\',\'https://electrum.org/\')\"',
}
}
def set_default_subparser(self, name, args=None):
"""see http://stackoverflow.com/questions/5176691/argparse-how-to-specify-a-default-subcommand"""
subparser_found = False
for arg in sys.argv[1:]:
if arg in ['-h', '--help']: # global help if no subparser
break
else:
for x in self._subparsers._actions:
if not isinstance(x, argparse._SubParsersAction):
continue
for sp_name in x._name_parser_map.keys():
if sp_name in sys.argv[1:]:
subparser_found = True
if not subparser_found:
# insert default in first position, this implies no
# global options without a sub_parsers specified
if args is None:
sys.argv.insert(1, name)
else:
args.insert(0, name)
argparse.ArgumentParser.set_default_subparser = set_default_subparser
# workaround https://bugs.python.org/issue23058
# see https://github.com/nickstenning/honcho/pull/121
def subparser_call(self, parser, namespace, values, option_string=None):
from argparse import ArgumentError, SUPPRESS, _UNRECOGNIZED_ARGS_ATTR
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
tup = parser_name, ', '.join(self._name_parser_map)
msg = _('unknown parser %r (choices: %s)') % tup
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
# store any unrecognized options on the object, so that the top
# level parser can decide what to do with them
namespace, arg_strings = parser.parse_known_args(arg_strings, namespace)
if arg_strings:
vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
argparse._SubParsersAction.__call__ = subparser_call
def add_network_options(parser):
parser.add_argument("-1", "--oneserver", action="store_true", dest="oneserver", default=False, help="connect to one server only")
parser.add_argument("-s", "--server", dest="server", default=None, help="set server host:port:protocol, where protocol is either t (tcp) or s (ssl)")
parser.add_argument("-p", "--proxy", dest="proxy", default=None, help="set proxy [type:]host[:port], where type is socks4,socks5 or http")
def add_global_options(parser):
group = parser.add_argument_group('global options')
group.add_argument("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Show debugging information")
group.add_argument("-D", "--dir", dest="electrum_path", help="electrum directory")
group.add_argument("-P", "--portable", action="store_true", dest="portable", default=False, help="Use local 'electrum_data' directory")
group.add_argument("-w", "--wallet", dest="wallet_path", help="wallet path")
group.add_argument("--testnet", action="store_true", dest="testnet", default=False, help="Use Testnet")
def get_parser():
# create main parser
parser = argparse.ArgumentParser(
epilog="Run 'electrum help <command>' to see the help for a command")
add_global_options(parser)
subparsers = parser.add_subparsers(dest='cmd', metavar='<command>')
# gui
parser_gui = subparsers.add_parser('gui', description="Run Electrum's Graphical User Interface.", help="Run GUI (default)")
parser_gui.add_argument("url", nargs='?', default=None, help="bitcoin URI (or bip70 file)")
parser_gui.add_argument("-g", "--gui", dest="gui", help="select graphical user interface", choices=['qt', 'kivy', 'text', 'stdio'])
parser_gui.add_argument("-o", "--offline", action="store_true", dest="offline", default=False, help="Run offline")
parser_gui.add_argument("-m", action="store_true", dest="hide_gui", default=False, help="hide GUI on startup")
parser_gui.add_argument("-L", "--lang", dest="language", default=None, help="default language used in GUI")
add_network_options(parser_gui)
add_global_options(parser_gui)
# daemon
parser_daemon = subparsers.add_parser('daemon', help="Run Daemon")
parser_daemon.add_argument("subcommand", choices=['start', 'status', 'stop', 'load_wallet', 'close_wallet'], nargs='?')
#parser_daemon.set_defaults(func=run_daemon)
add_network_options(parser_daemon)
add_global_options(parser_daemon)
# commands
for cmdname in sorted(known_commands.keys()):
cmd = known_commands[cmdname]
p = subparsers.add_parser(cmdname, help=cmd.help, description=cmd.description)
add_global_options(p)
if cmdname == 'restore':
p.add_argument("-o", "--offline", action="store_true", dest="offline", default=False, help="Run offline")
for optname, default in zip(cmd.options, cmd.defaults):
a, help = command_options[optname]
b = '--' + optname
action = "store_true" if type(default) is bool else 'store'
args = (a, b) if a else (b,)
if action == 'store':
_type = arg_types.get(optname, str)
p.add_argument(*args, dest=optname, action=action, default=default, help=help, type=_type)
else:
p.add_argument(*args, dest=optname, action=action, default=default, help=help)
for param in cmd.params:
h = param_descriptions.get(param, '')
_type = arg_types.get(param, str)
p.add_argument(param, help=h, type=_type)
cvh = config_variables.get(cmdname)
if cvh:
group = p.add_argument_group('configuration variables', '(set with setconfig/getconfig)')
for k, v in cvh.items():
group.add_argument(k, nargs='?', help=v)
# 'gui' is the default command
parser.set_default_subparser('gui')
return parser
|
|
from lib import *
#from keras.layers.merge import Concatenate
from keras.layers import Merge
import copy
from collections import Counter
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
import math
word2topic = pickle.load(open("word2topic", "r"))
embedding = pickle.load(open("word2topic", "r"))
#embedding = pickle.load(open("embedding", "r"))
vocab = word2topic.keys()
max_words = 25#30
depth_embed = 100#370
depth_distance = 100#368#70#100
##def getEmbedding_word2vec(sentence, model):
#
# global max_words, depth, no_features, train_length
## model = model[0]
# list = np.array([])
# for word in sentence:
# if word in model.wv.vocab:
# list = np.append(list, model.wv[word])
#
# #print list.size
# if(list.size > depth*max_words):
# list = list[0:depth*max_words]
# #print sentence
# pad = np.zeros(depth*max_words - list.size)
# list = np.append(list, pad)
# #print list.shape
# return list
def get_topic_rep(topic, word2topic, word2vec):
global vocab
topics = str(topic).split(' ')
v = np.zeros(np.concatenate((word2topic['donald'], word2vec.wv['donald'])).shape)
counter = 0
# if topics[0] in vocab:
# v = np.append(v, word#2topic[topics[0]])
## counter = 0
## if
for counter in range(len(topics)):
if topics[counter] in vocab:
# print topics[counter]
try:
v += np.concatenate((word2topic[topics[counter]], word2vec.wv[topics[counter]]))
except:
v += np.concatenate((word2topic[topics[counter]], np.zeros(word2vec.wv['donald'].shape)))
# print counter + 1
v /= (counter + 1) * 1.0
# print type(v)
return v
def custom_loss(y_true, y_pred):
y = K.argmax(y_true, axis=1)
print y[0:5]
## y_true = np.array(y_true).astype('int64')
##
print y_true[0:5]
## length = y_true.get_shape()
## l = tuple([length[i].value for i in range(0, len(length))])[0]
# for i in range(y_pred.get_shape()[0].value):
# y_pred[i] = y_pred[i][y[i]]
#
# y_pred = K.log(y_pred[:, K.constant(y, dtype='int64')])
return K.mean(K.categorical_crossentropy(y_pred[np.where(K.eval(K.equal(y, 0)))[0], :], y_true[np.where(K.eval(K.equal(y, 0)))[0], :]), K.categorical_crossentropy(y_pred[np.where(K.eval(K.equal(y, 1)))[0], :], y_true[np.where(K.eval(K.equal(y, 1)))[0], :]), K.categorical_crossentropy(y_pred[np.where(K.eval(K.equal(y, 2)))[0], :], y_true[np.where(K.eval(K.equal(y, 2)))[0], :]))
# return K.sum(K.mean(K.dot(K.equal(y, 0), y_pred)), K.mean(K.dot(K.equal(y, 1), y_pred)), K.mean(K.dot(K.equal(y, 2), y_pred)))
def evaluate(y_test, thresholded_pred):
print "accuracy", (sum(abs(y_test == thresholded_pred))) / float(len(thresholded_pred))
print Counter(y_test)
print Counter(thresholded_pred)
print confusion_matrix(y_test, thresholded_pred)
print "f1 is", f1_score(y_test, thresholded_pred, average='macro')
def distance_embed(sentence):
global max_words, depth_distance, word2topic
list = np.array([])
for word in sentence:
if word in vocab:
list = np.append(list, word2topic[word])
#print list.size
if(list.size > max_words * depth_distance):
list = list[0:max_words * depth_distance]
#print sentence
pad = np.zeros(max_words * depth_distance - list.size)
list = np.append(list, pad)
#print list.shape
return list
def getEmbedding(sentence, model):
global max_words, depth_embed, embedding#, depth_distance
list = np.array([])
for word in sentence:
if word in vocab:
try:
list = np.append(list, model[word])
# print "found", word
except:
list = np.append(list, np.zeros(model['donald'].shape))
# print word
#print list.size
if(list.size > max_words * depth_embed):
list = list[0:max_words * depth_embed]
#print sentence
pad = np.zeros(max_words * depth_embed - list.size)
list = np.append(list, pad)
#print list.shape
return list
#def getPOS(sentence):
#
# global max_words#, depth
# all_tags = CMUTweetTagger.runtagger_parse(sentence)
# list = np.array([])
# for i in range(len(sentence)):
# if sentence[i] in vocab:
# list = np.append(list, all_tags[i])
#
# #print list.size
# if(list.size > max_words):
# list = list[0:max_words]
# #print sentence
# pad = np.zeros(max_words - list.size)
# list = np.append(list, pad)
# #print list.shape
# return list
#def getEmbedding(sentence):
# global word2topic, vocab
# max_words = 30
# list = []#np.array([])
# for word in sentence:
# if word in vocab:
## list = np.append(list, word2topic[word])
# list.append(word2topic[word])
## list = np.array(list)
# #print list.size
# if(len(list) > max_words):
# list = list[0:max_words]
# #print sentence
# pad = [0] * 100# * (max_words - len(list))#np.zeros(max_words - list.size)
# for i in range((max_words - len(list))):
# list.append(pad)
## list.append(pad)
# #print list.shape
# return list
#getEmbedding(df['tokenized_sents'][0])
def run_model():
global tech, politics, sports, music, genre, max_words, depth_embed, depth_distance, word2topic, vocab, K
# with K.tf.device('/gpu:1'):
gpu_options = K.tf.GPUOptions(per_process_gpu_memory_fraction=1.0)#0.8)#0.2)
sess = K.tf.Session(config=K.tf.ConfigProto(gpu_options=gpu_options))
# all_topics = np.concatenate((tech, politics, music, sports))
# print "AAAAAAAAAAAAAAAAAAAAA"
# print len(all_topics)
# print all_topics
try:
[X, y, df, d] = pickle.load(open("data_rnn", "r"))
print d
# df = df[df["topic"].isin(all_topics)]
except:
#filename = "Homework2_data.csv"
# word2topic = pickle.load(open("word2topic", "r"))
df = readData(filename1, filename2)
#df = df[df["topic"].isin(all_topics)]
df['sentiment'] = pd.to_numeric(df['sentiment'])
# topics_array = np.array(([tech, politics, music, sports]))
# print genre
# for index, row in df.iterrows():
# tweet_topic = row['topic']
# # print "tweet_topic", tweet_topic
# for i in range(len(topics_array)):
# if tweet_topic in topics_array[i]:
# # print "ta", topics_array[i]
# # df["topic"][index] = genre[i]
# df.ix[index, 'topic'] = genre[i]
# # print "df", df["topic"][index]
# break
# Remove topics of no interest
print "length of df is", len(df)
# print "from joined data\n", Counter(list(df["user_id"])).most_common(50)
indices = []
# df['tweet'] = df['tweet'].apply(cleanhtml).apply(cleanUrl).apply(removeMention).apply(removeTrailingHash);
# df['tweet'] = df['tweet'].apply(cleanhtml).apply(cleanUrl).apply(removeTrailingHash);
df['tweet'] = df['tweet'].apply(cleanhtml).apply(cleanUrl)#.apply(removeTrailingHash);
df['tweet'] = tokenize_and_stopwords(df['tweet'])
# df = df.sample(frac=1).reset_index(drop=True)
# df = shuffle(df)
print df.size
df['tokenized_sents'] = df.apply(lambda row: nltk.word_tokenize(row['tweet']), axis=1)
try:
word2vec = wv.Word2Vec.load("word2vec")
#model.similarity("this", "is")
# model.init_sims(replace=True)
print "loaded"
except:
word2vec = wv.Word2Vec(df["tokenized_sents"], size=depth_embed, window=5, min_count=5, workers=4)
word2vec.save("word2vec")
#X.shape[0]#7349
df['embedding'] = df['tokenized_sents'].apply(getEmbedding, args=(word2topic,))
df['word2vec'] = df['tokenized_sents'].apply(getEmbedding, args=(word2vec.wv,))
X = list(df['embedding'])
X_w = list(df['word2vec'])
X = np.reshape(np.ravel(X), (len(X), max_words, depth_embed))
X_w = np.reshape(np.ravel(X_w), (len(X_w), max_words, depth_embed))
# a = copy.deepcopy(X)#np.array(df['embedding'])
df['tweet_rep'] = df['tokenized_sents'].apply(distance_embed)
#### a = list(df['tweet_rep'])
#### a = np.reshape(np.ravel(a), (len(a), max_words, depth_distance))
df['topic_rep'] = df['topic'].apply(get_topic_rep, args=(word2topic, word2vec,))
d = []
# a = np.reshape(a, ())
#### b = list(df['topic_rep'])
#### print b[0]
# print b
# print b.shape
#### b = np.reshape(np.ravel(np.ravel(b)), (X.shape[0], 1, depth_distance))
##### c = (a - b)**2
###### d = c
##### for i1 in range(len(c)):
##### for j1 in range(len(c[0])):
##### d.append(abs(sum(c[i1][j1])))
##### d = np.array(d)
##### d = np.reshape(d, (len(a), max_words))
##### d[d==0] = 0.1
##### d = 1.0 / d
##### print "d[0] is !!!", d[0]
# df['distance'] = d#1.0 / d#sum(sum(sum(abs(np.array(df['embedding']) - np.array(df['topic_rep'])))))
# one_hot =
# df['pos'] = df['tweet'].apply(getPOS)
# X = np.column_stack((np.array(df['embedding']), np.array(df['pos'])))
# for i in range(len(X)):
# X[i] = X[i][0:]
# B = np.array([])
# np.dstack((X, B)).shape
# y = np.array(df['sentiment'])
y = np.array(pd.get_dummies(df['sentiment']))
### No dumping
# try:
# pickle.dump([X, y, df, d], open("data_rnn", "wb"))
# except:
# "dumping data failed"
print len(X[0])
print len(X)
X_train = X[0:13000]
X_test = X[13000:]
X_train_w = X_w[0:13000]
X_test_w = X_w[13000:]
y_train = y[0:13000]
y_test = y[13000:]
print " Y train!!\n", y_train[0:5]
print list(df['sentiment'])[0:5]
print y_test[0:5]
## LOAD MODEL
try:
model = load_model('modelc_rnn5')
one_hot = list(df['topic_rep'])#(pd.get_dummies(df['topic']))
one_hot = np.reshape(np.ravel(np.ravel(one_hot)), (len(one_hot), 1, 2*depth_distance))
except:
# Word model
model_word = Sequential()
model_word.add(Bidirectional(LSTM(3 * max_words, activation='relu', return_sequences=True), input_shape=(max_words, depth_embed)))
model_word.add(Dropout(0.2))
model_word.add(Bidirectional(LSTM(max_words, activation='tanh', return_sequences=True)))
model_word.add(Dropout(0.2))
model_word_w = Sequential()
model_word_w.add(Bidirectional(LSTM(3 * max_words, activation='relu', return_sequences=True), input_shape=(max_words, depth_embed)))
model_word_w.add(Dropout(0.2))
model_word_w.add(Bidirectional(LSTM(max_words, activation='tanh', return_sequences=True)))
model_word_w.add(Dropout(0.2))
# model_word.add(Bidirectional(LSTM(max_words, return_sequences=True)))
# model_word.add(Dropout(0.2))
# model_word.add(Flatten())
# model_word.add(MaxPooling2D(pool_size=(2, 1)))
# model_word.add(Dropout(0.2))
# model_word.add(Dense((max_words), activation="tanh"))
## Reverse
# model_word_r = Sequential()
# model_word_r.add(LSTM(max_words, input_shape=(max_words, depth), consume_less='gpu', go_backwards=True))
# model_word_r.add(Dropout(0.2))
## model_word_r.add(LSTM(max_words, input_shape=(max_words, depth), consume_less='gpu', go_backwards=True))
# Topic model
print len(set(df['topic']))
print "set is", set(df['topic'])
# print "topic rep!! \n", df['topic_rep']
one_hot = list(df['topic_rep'])#(pd.get_dummies(df['topic']))
# print df['topic'][0:5]
print "init one hot", one_hot[0:2]
# one_hot = one_hot.as_matrix()
# one_hot = d#df['distance']
print len(one_hot)
# print len(one_hot[0])
# print one_hot[0]
## one_hot = np.reshape(one_hot, (one_hot.shape[0], max_words, 1))
# one_hot = np.reshape(np.ravel(np.ravel(one_hot)), (len(one_hot), depth_distance, 1))
one_hot = np.reshape(np.ravel(np.ravel(one_hot)), (len(one_hot), 1, 2*depth_distance))
one_hot_train = one_hot[0:13000]
one_hot_test = one_hot[13000:]
print "one hot shape", one_hot.shape
model_topic = Sequential()
# , return_sequences=True
model_topic.add(Bidirectional(LSTM(max_words, activation='tanh', return_sequences=True), input_shape=(1, 2*depth_distance)))
model_topic.add(Dropout(0.2))
# model_topic.add(Bidirectional(LSTM(max_words, return_sequences=True)))
# model_topic.add(Flatten())
# model_topic.add(MaxPooling2D(pool_size=(2, 1)))
# model_topic.add(Dropout(0.2))
# model_topic.add(Dense(4, activation="tanh"))
# model_topic.add(Dropout(0.2))
# Merge forward and backward
# merged = Merge([model_word_f, model_word_r], mode='concat')#, concat_axis=1)
# model_word = Sequential()
# model_word.add(merged)
# model_word.add(Dropout(0.2))
## model_word.add(MaxPooling2D(pool_size=(2, 1)))
## model_word.add(Dropout(0.2))
# model_word.add(LSTM(max_words, input_shape=(2*max_words, 1)))
# model_word.add(Dropout(0.2))
# Merge merged and topic info
merged2 = Merge([model_word, model_word_w, model_topic], mode='concat', concat_axis=1)
# merged = Concatenate([model_word, model_topic], axis=-1)
model = Sequential()
model.add(merged2)
model.add(Dropout(0.2))
model.add(Bidirectional(LSTM(2*max_words, activation='relu', return_sequences=True)))#)))
model.add(Dropout(0.2))
model.add(Bidirectional(LSTM(2*max_words, activation='tanh', return_sequences=True)))
## # model.add(Flatten())
model.add(Dropout(0.2))
# model.add(Bidirectional(LSTM(max_words), input_shape=(4 + max_words, 1)))
print "added additional Dense, no flatten"
## model.add(Dense(max_words, activation='tanh'))
# model.add(Dropout(0.2))
#model.add(Dense(1, activation='linear', W_constraint=maxnorm(3)))
# model.add(Bidirectional(LSTM(2*max_words, activation='tanh', return_sequences=True)))#)))
# model.add(Dropout(0.2))
model.add(Bidirectional(LSTM(max_words, activation='tanh', return_sequences=True)))#)))
model.add(Dropout(0.2))
model.add(LSTM(5, activation="softmax"))
# model.add(LSTM(1, activation="linear"))
# optimizer = RMSprop(lr=0.01)
# model.compile(loss='categorical_crossentropy', optimizer=optimizer)
adam = Adam(lr=0.00075, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(loss='categorical_crossentropy', optimizer=adam)
print "Custom!!!"
# model.compile(loss=custom_loss, optimizer=adam)
print "came here saaaaar!!!!!!\n\n"
# print X[0:5]
# print Y_train[0:5]
print "model changedd !!!"
model.fit([X_train, X_train_w, one_hot_train], y_train, batch_size = 64, epochs=60, validation_split = 0.05, callbacks=[history])
model_json = model.to_json()
with open("modelc_rnn5.json", "w") as json_file:
json_file.write(model_json)
model.save_weights("modelc_rnn5.h5")
print("Saved model to disk")
# print(history.History)
return [model, X, X_w, y, df, d, one_hot]
# print X.shape
# print X[0]
# print X[0]
# for i in X[0]:
# print i
def load_model(filename):
json_file = open(filename+ '.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights(filename + ".h5")
# [X, y, df, d] = pickle.load(open("data_rnn", "r"))
return model#, X, y, df, d]
def duplicate_model(filename):
global tech, politics, sports, music, genre, max_words, depth, word2topic, vocab, K
print "Duplicating!!"
# Word model
model_word = Sequential()
model_word.add(Bidirectional(LSTM(max_words, return_sequences=True), input_shape=(max_words, depth)))
model_word.add(Dropout(0.2))
# model_word.add(Flatten())
# model_word.add(MaxPooling2D(pool_size=(2, 1)))
# model_word.add(Dropout(0.2))
model_topic = Sequential()
model_topic.add(Bidirectional(LSTM(max_words, return_sequences=True), input_shape=(max_words, 1)))
model_topic.add(Dropout(0.2))
# model_topic.add(Flatten())
# model_topic.add(MaxPooling2D(pool_size=(2, 1)))
# model_topic.add(Dropout(0.2))
merged2 = Merge([model_word, model_topic], mode='concat')
model = Sequential()
model.add(merged2)
model.add(Dropout(0.2))
model.add(Bidirectional(LSTM(max_words, return_sequences=True)))
model.add(Dropout(0.2))
model.add(LSTM(max_words))
# model.add(Flatten())
model.add(Dropout(0.2))
# merged = Concatenate([model_word, model_topic], axis=-1)
model = Sequential()
model.add(merged2)
model.add(Dropout(0.2))
model.add(LSTM(max_words))
# model.add(Dropout(0.2))
# print "added additional Dense, no flatten"
# model.add(Dense(1, activation='linear', W_constraint=maxnorm(5)))
json_file = open(filename+ '.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model_true = model_from_json(loaded_model_json)
# load weights into new model
model_true.load_weights(filename + ".h5")
model.layers[0].set_weights(model_true.layers[0].get_weights())
model.layers[1].set_weights(model_true.layers[1].get_weights())
model.layers[2].set_weights(model_true.layers[2].get_weights())
model.layers[3].set_weights(model_true.layers[3].get_weights())
try:
model.layers[3].set_weights(model_true.layers[3].get_weights())
# model.layers[3].set_weights(model_true.layers[3].get_weights())
print "tried"
except:
print "excepted"
# model.add(Dropout(0.2))
# model.layers[3].set_weights(model_true.layers[3].get_weights())
return model
#equal weighted categorical cross entropy
def sentiment_classifier():
global max_words, depth_distance, depth_embed
print "in senti class, changes, class\n\n"
try:
assert False
print "in try\n\n"
[model, X, y, df, d] = load_model('modelc_rnn5')
print "Data found"
print "done"
except Exception, e:
print "Caught an exception\n\n"
print "Error is", str(e), "\n\n"
[model, X, X_w, y, df, d, one_hot] = run_model()
print "length of X is", len(X)
X_test = X[13000:]
y_test = y[13000:]
X_test_w = X_w[13000:]
X_train_w = X_w[0:13000]
X_train = X[0:13000]
y_train = y[0:13000]
topics = list(df['topic'])
# ____________________________________________________________________________________________________________HERE_________________
# one_hot = d#df['distance']
# one_hot = pd.get_dummies(df['topic'])
# one_hot = one_hot.as_matrix()
# print len(set(df['topic']))
# print "set is", set(df['topic'])
# print len(one_hot)
# print len(one_hot[0])
# print one_hot[0]
## print len(all_topics)
## print all_topics
# print set(df["topic"])
# one_hot = np.array(df['topic_rep'])#np.array(pd.get_dummies(df['topic']))
# one_hot = np.reshape(one_hot, (X.shape[0], 1, depth_distance))
one_hot_train = one_hot[0:13000]
one_hot_test = one_hot[13000:]
pred = model.predict([X_test, X_test_w, one_hot_test], batch_size = 64)#, Y_train, batch_size=32, verbose=1, sample_weight=None)
print pred[0:5]
print y_test[0:5]
# pred[:, 0] *= 1.5
# margin = 0.06
# indexes = pred[:, 0] + margin >= pred[:, 1]
# print indexes
# pred[indexes, 0] = pred[indexes, 1] + 0.01
# print pred[0:5]
##### print "This is the prediction"
###### y[y >= 0.1] = 1
###### y[y < 0.1] = 0
##### pred.shape = (pred.shape[0],)
##### print pred[0:20]
##### print "true labels"
##### print y_test[0:20]
###### print sum(sum(y == Y_train))
###### print (len(X_train) * len(X_train[0]))
##### print (sum(abs(y_test - pred))) / float(len(pred))
##### thresh1 = 1.5#49#1.8#1.5
##### thresh2 = 3.9
##### thresholded_pred = copy.deepcopy(pred)
##### thresholded_pred[(pred > (-thresh1 + 0.0)) & (pred < thresh2)] = 0
##### thresholded_pred[(pred >= thresh1) & (pred < thresh2)] = 3#1
##### thresholded_pred[pred >= thresh2] = 5#2
##### thresholded_pred[(pred > -thresh2) & (pred <= (-thresh1 + 0.0))] = -3#1
##### thresholded_pred[pred <= -thresh2] = -5#2
##### thresholded_pred = thresholded_pred.astype('int8')
##### print "Testing"
##### evaluate(y_test, thresholded_pred)
#####
##### y_test[y_test > 0] = 1
##### y_test[y_test < 0] = -1
#####
##### thresholded_pred[thresholded_pred > 0] = 1
##### thresholded_pred[thresholded_pred < 0] = -1
thresholded_pred = pred.argmax(axis=1)
y_test = y_test.argmax(axis=1)
evaluate(y_test, thresholded_pred)
thresholded_pred[thresholded_pred<=1] = -1
thresholded_pred[thresholded_pred==2] = 0
thresholded_pred[thresholded_pred>2] = 1
y_test[y_test<=1] = -1
y_test[y_test==2] = 0
y_test[y_test>2] = 1
evaluate(y_test, thresholded_pred)
pred = model.predict([X_train, X_train_w, one_hot_train], batch_size = 64)#, Y_train, batch_size=32, verbose=1, sample_weight=None)
print pred[0:5]
print y_train[0:5]
#pred[:,0] *= 1.5
print "This is the prediction"
#### y[y >= 0.1] = 1
#### y[y < 0.1] = 0
#### pred.shape = (pred.shape[0],)
#### print pred[0:20]
#### print "true labels"
#### print y_train[0:20]
##### print sum(sum(y == Y_train))
##### print (len(X_train) * len(X_train[0]))
#### print (sum(abs(y_train - pred))) / float(len(pred))
#### thresh1 = 1.5
#### thresh2 = 3.9
#### thresholded_pred = copy.deepcopy(pred)
#### thresholded_pred[(pred > (-thresh1 + 0.0)) & (pred < thresh2)] = 0
#### thresholded_pred[(pred >= thresh1) & (pred < thresh2)] = 3#1
#### thresholded_pred[pred >= thresh2] = 5#2
#### thresholded_pred[(pred > -thresh2) & (pred <= (-thresh1 + 0))] = -3#1
#### thresholded_pred[pred <= -thresh2] = -5#2
#### thresholded_pred = thresholded_pred.astype('int8')
#### print "Training"
#### evaluate(y_train, thresholded_pred)
#### y_train[y_train > 0] = 1
#### y_train[y_train < 0] = -1
####
#### thresholded_pred[thresholded_pred > 0] = 1
#### thresholded_pred[thresholded_pred < 0] = -1
thresholded_pred = pred.argmax(axis=1)
y_train = y_train.argmax(axis=1)
evaluate(y_train, thresholded_pred)
thresholded_pred[thresholded_pred<=1] = -1
thresholded_pred[thresholded_pred==2] = 0
thresholded_pred[thresholded_pred>2] = 1
y_train[y_train<=1] = -1
y_train[y_train==2] = 0
y_train[y_train>2] = 1
evaluate(y_train, thresholded_pred)
# model_dup = duplicate_model('modelc_rnn5')
# layer_output = model_dup.predict([X_test, one_hot_test], batch_size = 64)
#
### get_last_layer_output = K.function([model.layers[0].input],
### [model.layers[2].output])
## get_last_layer_output = K.function([model.layers[0].input, K.learning_phase()],
## [model.layers[2].output])
### output in train mode = 0
### layer_output = np.array(get_last_layer_output([X_train[0:1200], 0])[0])
##
### output in train mode = 0
##
### X = [X_test, one_hot_test]
## print X_test.shape
## print one_hot_test.shape
## print len(X_test)
## print len(one_hot_test)
##
##
## X_2 = np.concatenate((X_test, one_hot_test), axis=2)
## start = 0
## increment = 100
## flag = 1
## print len(X_test)
## print "now!!"
## while start+increment <= len(X_test):
### X = [[X_test[start:start+increment], 1], [one_hot_test[start:start+increment], 1]]
## if flag:
## layer_output = get_last_layer_output([X_2[start:start+increment], 0])[0]#get_last_layer_output([[X_test[start:start+increment], 0], [one_hot_test[:, start:start+increment], 0]])[0]
## flag = 0
## else:
## layer_output = np.concatenate((layer_output, get_last_layer_output([X_2[start:start+increment], 0])[0]))
## start += increment
## if start != len(X_test):
### X = [X_test[start:start+increment], one_hot_test[start:start+increment]]
## layer_output = np.concatenate((layer_output, get_last_layer_output([X_2[start:start+increment], 0])[0]))
# print "length of hidden", len(layer_output[0])
# for iter in range(10):
# print df["tweet"][iter], layer_output[iter]
sentiment_classifier()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Testing File IO operations in file_io.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
from tensorflow.python.framework import errors
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import test
class FileIoTest(test.TestCase):
def setUp(self):
self._base_dir = os.path.join(self.get_temp_dir(), "base_dir")
file_io.create_dir(self._base_dir)
def tearDown(self):
file_io.delete_recursively(self._base_dir)
def testFileDoesntExist(self):
file_path = os.path.join(self._base_dir, "temp_file")
self.assertFalse(file_io.file_exists(file_path))
with self.assertRaises(errors.NotFoundError):
_ = file_io.read_file_to_string(file_path)
def testWriteToString(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.write_string_to_file(file_path, "testing")
self.assertTrue(file_io.file_exists(file_path))
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("testing", file_contents)
def testAtomicWriteStringToFile(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.atomic_write_string_to_file(file_path, "testing")
self.assertTrue(file_io.file_exists(file_path))
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("testing", file_contents)
def testReadBinaryMode(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.write_string_to_file(file_path, "testing")
with file_io.FileIO(file_path, mode="rb") as f:
self.assertEqual(b"testing", f.read())
def testWriteBinaryMode(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, "wb").write("testing")
with file_io.FileIO(file_path, mode="r") as f:
self.assertEqual("testing", f.read())
def testAppend(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="w") as f:
f.write("begin\n")
with file_io.FileIO(file_path, mode="a") as f:
f.write("a1\n")
with file_io.FileIO(file_path, mode="a") as f:
f.write("a2\n")
with file_io.FileIO(file_path, mode="r") as f:
file_contents = f.read()
self.assertEqual("begin\na1\na2\n", file_contents)
def testMultipleFiles(self):
file_prefix = os.path.join(self._base_dir, "temp_file")
for i in range(5000):
f = file_io.FileIO(file_prefix + str(i), mode="w+")
f.write("testing")
f.flush()
self.assertEqual("testing", f.read())
f.close()
def testMultipleWrites(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="w") as f:
f.write("line1\n")
f.write("line2")
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("line1\nline2", file_contents)
def testFileWriteBadMode(self):
file_path = os.path.join(self._base_dir, "temp_file")
with self.assertRaises(errors.PermissionDeniedError):
file_io.FileIO(file_path, mode="r").write("testing")
def testFileReadBadMode(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
self.assertTrue(file_io.file_exists(file_path))
with self.assertRaises(errors.PermissionDeniedError):
file_io.FileIO(file_path, mode="w").read()
def testFileDelete(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
file_io.delete_file(file_path)
self.assertFalse(file_io.file_exists(file_path))
def testFileDeleteFail(self):
file_path = os.path.join(self._base_dir, "temp_file")
with self.assertRaises(errors.NotFoundError):
file_io.delete_file(file_path)
def testGetMatchingFiles(self):
dir_path = os.path.join(self._base_dir, "temp_dir")
file_io.create_dir(dir_path)
files = ["file1.txt", "file2.txt", "file3.txt"]
for name in files:
file_path = os.path.join(dir_path, name)
file_io.FileIO(file_path, mode="w").write("testing")
expected_match = [os.path.join(dir_path, name) for name in files]
self.assertItemsEqual(
file_io.get_matching_files(os.path.join(dir_path, "file*.txt")),
expected_match)
file_io.delete_recursively(dir_path)
self.assertFalse(file_io.file_exists(os.path.join(dir_path, "file3.txt")))
def testCreateRecursiveDir(self):
dir_path = os.path.join(self._base_dir, "temp_dir/temp_dir1/temp_dir2")
file_io.recursive_create_dir(dir_path)
file_io.recursive_create_dir(dir_path) # repeat creation
file_path = os.path.join(dir_path, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
self.assertTrue(file_io.file_exists(file_path))
file_io.delete_recursively(os.path.join(self._base_dir, "temp_dir"))
self.assertFalse(file_io.file_exists(file_path))
def testCopy(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
copy_path = os.path.join(self._base_dir, "copy_file")
file_io.copy(file_path, copy_path)
self.assertTrue(file_io.file_exists(copy_path))
f = file_io.FileIO(file_path, mode="r")
self.assertEqual("testing", f.read())
self.assertEqual(7, f.tell())
def testCopyOverwrite(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
copy_path = os.path.join(self._base_dir, "copy_file")
file_io.FileIO(copy_path, mode="w").write("copy")
file_io.copy(file_path, copy_path, overwrite=True)
self.assertTrue(file_io.file_exists(copy_path))
self.assertEqual("testing", file_io.FileIO(file_path, mode="r").read())
def testCopyOverwriteFalse(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
copy_path = os.path.join(self._base_dir, "copy_file")
file_io.FileIO(copy_path, mode="w").write("copy")
with self.assertRaises(errors.AlreadyExistsError):
file_io.copy(file_path, copy_path, overwrite=False)
def testRename(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
rename_path = os.path.join(self._base_dir, "rename_file")
file_io.rename(file_path, rename_path)
self.assertTrue(file_io.file_exists(rename_path))
self.assertFalse(file_io.file_exists(file_path))
def testRenameOverwrite(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
rename_path = os.path.join(self._base_dir, "rename_file")
file_io.FileIO(rename_path, mode="w").write("rename")
file_io.rename(file_path, rename_path, overwrite=True)
self.assertTrue(file_io.file_exists(rename_path))
self.assertFalse(file_io.file_exists(file_path))
def testRenameOverwriteFalse(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
rename_path = os.path.join(self._base_dir, "rename_file")
file_io.FileIO(rename_path, mode="w").write("rename")
with self.assertRaises(errors.AlreadyExistsError):
file_io.rename(file_path, rename_path, overwrite=False)
self.assertTrue(file_io.file_exists(rename_path))
self.assertTrue(file_io.file_exists(file_path))
def testDeleteRecursivelyFail(self):
fake_dir_path = os.path.join(self._base_dir, "temp_dir")
with self.assertRaises(errors.NotFoundError):
file_io.delete_recursively(fake_dir_path)
def testIsDirectory(self):
dir_path = os.path.join(self._base_dir, "test_dir")
# Failure for a non-existing dir.
self.assertFalse(file_io.is_directory(dir_path))
file_io.create_dir(dir_path)
self.assertTrue(file_io.is_directory(dir_path))
file_path = os.path.join(dir_path, "test_file")
file_io.FileIO(file_path, mode="w").write("test")
# False for a file.
self.assertFalse(file_io.is_directory(file_path))
# Test that the value returned from `stat()` has `is_directory` set.
file_statistics = file_io.stat(dir_path)
self.assertTrue(file_statistics.is_directory)
def testListDirectory(self):
dir_path = os.path.join(self._base_dir, "test_dir")
file_io.create_dir(dir_path)
files = ["file1.txt", "file2.txt", "file3.txt"]
for name in files:
file_path = os.path.join(dir_path, name)
file_io.FileIO(file_path, mode="w").write("testing")
subdir_path = os.path.join(dir_path, "sub_dir")
file_io.create_dir(subdir_path)
subdir_file_path = os.path.join(subdir_path, "file4.txt")
file_io.FileIO(subdir_file_path, mode="w").write("testing")
dir_list = file_io.list_directory(dir_path)
self.assertItemsEqual(files + ["sub_dir"], dir_list)
def testListDirectoryFailure(self):
dir_path = os.path.join(self._base_dir, "test_dir")
with self.assertRaises(errors.NotFoundError):
file_io.list_directory(dir_path)
def _setupWalkDirectories(self, dir_path):
# Creating a file structure as follows
# test_dir -> file: file1.txt; dirs: subdir1_1, subdir1_2, subdir1_3
# subdir1_1 -> file: file3.txt
# subdir1_2 -> dir: subdir2
file_io.create_dir(dir_path)
file_io.FileIO(
os.path.join(dir_path, "file1.txt"), mode="w").write("testing")
sub_dirs1 = ["subdir1_1", "subdir1_2", "subdir1_3"]
for name in sub_dirs1:
file_io.create_dir(os.path.join(dir_path, name))
file_io.FileIO(
os.path.join(dir_path, "subdir1_1/file2.txt"),
mode="w").write("testing")
file_io.create_dir(os.path.join(dir_path, "subdir1_2/subdir2"))
def testWalkInOrder(self):
dir_path = os.path.join(self._base_dir, "test_dir")
self._setupWalkDirectories(dir_path)
# Now test the walk (in_order = True)
all_dirs = []
all_subdirs = []
all_files = []
for (w_dir, w_subdirs, w_files) in file_io.walk(dir_path, in_order=True):
all_dirs.append(w_dir)
all_subdirs.append(w_subdirs)
all_files.append(w_files)
self.assertItemsEqual(all_dirs, [dir_path] + [
os.path.join(dir_path, item)
for item in
["subdir1_1", "subdir1_2", "subdir1_2/subdir2", "subdir1_3"]
])
self.assertEqual(dir_path, all_dirs[0])
self.assertLess(
all_dirs.index(os.path.join(dir_path, "subdir1_2")),
all_dirs.index(os.path.join(dir_path, "subdir1_2/subdir2")))
self.assertItemsEqual(all_subdirs[1:5], [[], ["subdir2"], [], []])
self.assertItemsEqual(all_subdirs[0],
["subdir1_1", "subdir1_2", "subdir1_3"])
self.assertItemsEqual(all_files, [["file1.txt"], ["file2.txt"], [], [], []])
self.assertLess(
all_files.index(["file1.txt"]), all_files.index(["file2.txt"]))
def testWalkPostOrder(self):
dir_path = os.path.join(self._base_dir, "test_dir")
self._setupWalkDirectories(dir_path)
# Now test the walk (in_order = False)
all_dirs = []
all_subdirs = []
all_files = []
for (w_dir, w_subdirs, w_files) in file_io.walk(dir_path, in_order=False):
all_dirs.append(w_dir)
all_subdirs.append(w_subdirs)
all_files.append(w_files)
self.assertItemsEqual(all_dirs, [
os.path.join(dir_path, item)
for item in
["subdir1_1", "subdir1_2/subdir2", "subdir1_2", "subdir1_3"]
] + [dir_path])
self.assertEqual(dir_path, all_dirs[4])
self.assertLess(
all_dirs.index(os.path.join(dir_path, "subdir1_2/subdir2")),
all_dirs.index(os.path.join(dir_path, "subdir1_2")))
self.assertItemsEqual(all_subdirs[0:4], [[], [], ["subdir2"], []])
self.assertItemsEqual(all_subdirs[4],
["subdir1_1", "subdir1_2", "subdir1_3"])
self.assertItemsEqual(all_files, [["file2.txt"], [], [], [], ["file1.txt"]])
self.assertLess(
all_files.index(["file2.txt"]), all_files.index(["file1.txt"]))
def testWalkFailure(self):
dir_path = os.path.join(self._base_dir, "test_dir")
# Try walking a directory that wasn't created.
all_dirs = []
all_subdirs = []
all_files = []
for (w_dir, w_subdirs, w_files) in file_io.walk(dir_path, in_order=False):
all_dirs.append(w_dir)
all_subdirs.append(w_subdirs)
all_files.append(w_files)
self.assertItemsEqual(all_dirs, [])
self.assertItemsEqual(all_subdirs, [])
self.assertItemsEqual(all_files, [])
def testStat(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
file_statistics = file_io.stat(file_path)
os_statistics = os.stat(file_path)
self.assertEqual(7, file_statistics.length)
self.assertEqual(
int(os_statistics.st_mtime), int(file_statistics.mtime_nsec / 1e9))
self.assertFalse(file_statistics.is_directory)
def testReadLine(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual(36, f.size())
self.assertEqual("testing1\n", f.readline())
self.assertEqual("testing2\n", f.readline())
self.assertEqual("testing3\n", f.readline())
self.assertEqual("\n", f.readline())
self.assertEqual("testing5", f.readline())
self.assertEqual("", f.readline())
def testRead(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual(36, f.size())
self.assertEqual("testing1\n", f.read(9))
self.assertEqual("testing2\n", f.read(9))
self.assertEqual("t", f.read(1))
self.assertEqual("esting3\n\ntesting5", f.read())
def testTell(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual(0, f.tell())
self.assertEqual("testing1\n", f.readline())
self.assertEqual(9, f.tell())
self.assertEqual("testing2\n", f.readline())
self.assertEqual(18, f.tell())
self.assertEqual("testing3\n", f.readline())
self.assertEqual(27, f.tell())
self.assertEqual("\n", f.readline())
self.assertEqual(28, f.tell())
self.assertEqual("testing5", f.readline())
self.assertEqual(36, f.tell())
self.assertEqual("", f.readline())
self.assertEqual(36, f.tell())
def testSeek(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual("testing1\n", f.readline())
self.assertEqual(9, f.tell())
# Seek to 18
f.seek(18)
self.assertEqual(18, f.tell())
self.assertEqual("testing3\n", f.readline())
# Seek back to 9
f.seek(9)
self.assertEqual(9, f.tell())
self.assertEqual("testing2\n", f.readline())
f.seek(0)
self.assertEqual(0, f.tell())
self.assertEqual("testing1\n", f.readline())
with self.assertRaises(errors.InvalidArgumentError):
f.seek(-1)
with self.assertRaises(TypeError):
f.seek()
# TODO(jhseu): Delete after position deprecation.
with self.assertRaises(TypeError):
f.seek(offset=0, position=0)
f.seek(position=9)
self.assertEqual(9, f.tell())
self.assertEqual("testing2\n", f.readline())
def testSeekFromWhat(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual("testing1\n", f.readline())
self.assertEqual(9, f.tell())
# Seek to 18
f.seek(9, 1)
self.assertEqual(18, f.tell())
self.assertEqual("testing3\n", f.readline())
# Seek back to 9
f.seek(9, 0)
self.assertEqual(9, f.tell())
self.assertEqual("testing2\n", f.readline())
f.seek(-f.size(), 2)
self.assertEqual(0, f.tell())
self.assertEqual("testing1\n", f.readline())
with self.assertRaises(errors.InvalidArgumentError):
f.seek(0, 3)
def testReadingIterator(self):
file_path = os.path.join(self._base_dir, "temp_file")
data = ["testing1\n", "testing2\n", "testing3\n", "\n", "testing5"]
with file_io.FileIO(file_path, mode="r+") as f:
f.write("".join(data))
actual_data = []
for line in f:
actual_data.append(line)
self.assertSequenceEqual(actual_data, data)
def testReadlines(self):
file_path = os.path.join(self._base_dir, "temp_file")
data = ["testing1\n", "testing2\n", "testing3\n", "\n", "testing5"]
f = file_io.FileIO(file_path, mode="r+")
f.write("".join(data))
f.flush()
lines = f.readlines()
self.assertSequenceEqual(lines, data)
def testEof(self):
"""Test that reading past EOF does not raise an exception."""
file_path = os.path.join(self._base_dir, "temp_file")
f = file_io.FileIO(file_path, mode="r+")
content = "testing"
f.write(content)
f.flush()
self.assertEqual(content, f.read(len(content) + 1))
if __name__ == "__main__":
test.main()
|
|
#
# init_lib.py
#
# functions for initialization
#
from aws_lib import SpinupError
import base64
from boto import vpc, ec2
from os import environ
from pprint import pprint
import re
import sys
import time
from yaml_lib import yaml_attr
def read_user_data( fn ):
"""
Given a filename, returns the file's contents in a string.
"""
r = ''
with open( fn ) as fh:
r = fh.read()
fh.close()
return r
def get_tags( ec, r_id ):
"""
Takes EC2Connection object and resource ID. Returns tags associated
with that resource.
"""
return ec.get_all_tags(filters={ "resource-id": r_id })
def get_tag( ec, obj, tag ):
"""
Get the value of a tag associated with the given resource object.
Returns None if the tag is not set. Warning: EC2 tags are case-sensitive.
"""
tags = get_tags( ec, obj.id )
found = 0
for t in tags:
if t.name == tag:
found = 1
break
if found:
return t
else:
return None
def update_tag( obj, tag, val ):
"""
Given an EC2 resource object, a tag and a value, updates the given tag
to val.
"""
for x in range(0, 5):
error = False
try:
obj.add_tag( tag, val )
except:
error = True
e = sys.exc_info()[0]
print "Huh, trying again ({})".format(e)
time.sleep(5)
if not error:
print "Object {} successfully tagged.".format(obj)
break
return None
def init_region( r ):
"""
Takes a region string. Connects to that region. Returns EC2Connection
and VPCConnection objects in a tuple.
"""
# connect to region
c = vpc.connect_to_region( r )
ec = ec2.connect_to_region( r )
return ( c, ec )
def init_vpc( c, cidr ):
"""
Takes VPCConnection object (which is actually a connection to a
particular region) and a CIDR block string. Looks for our VPC in that
region. Returns the boto.vpc.vpc.VPC object corresponding to our VPC.
See:
http://boto.readthedocs.org/en/latest/ref/vpc.html#boto.vpc.vpc.VPC
"""
# look for our VPC
all_vpcs = c.get_all_vpcs()
found = 0
our_vpc = None
for v in all_vpcs:
if v.cidr_block == cidr:
our_vpc = v
found = 1
break
if not found:
raise SpinupError( "VPC {} not found".format(cidr) )
return our_vpc
def init_subnet( c, vpc_id, cidr ):
"""
Takes VPCConnection object, which is actually a connection to a
region, and a CIDR block string. Looks for our subnet in that region.
If subnet does not exist, creates it. Returns the subnet resource
object on success, raises exception on failure.
"""
# look for our VPC
all_subnets = c.get_all_subnets()
found = False
our_subnet = None
for s in all_subnets:
if s.cidr_block == cidr:
#print "Found subnet {}".format(cidr)
our_subnet = s
found = True
break
if not found:
our_subnet = c.create_subnet( vpc_id, cidr )
return our_subnet
def set_subnet_map_public_ip( ec, subnet_id ):
"""
Takes ECConnection object and SubnetId string. Attempts to set the
MapPublicIpOnLaunch attribute to True.
FIXME: give credit to source
"""
orig_api_version = ec.APIVersion
ec.APIVersion = '2014-06-15'
ec.get_status(
'ModifySubnetAttribute',
{'SubnetId': subnet_id, 'MapPublicIpOnLaunch.Value': 'true'},
verb='POST'
)
ec.APIVersion = orig_api_version
return None
def derive_ip_address( cidr_block, delegate, final8 ):
"""
Given a CIDR block string, a delegate number, and an integer
representing the final 8 bits of the IP address, construct and return
the IP address derived from this values. For example, if cidr_block is
10.0.0.0/16, the delegate number is 10, and the final8 is 8, the
derived IP address will be 10.0.10.8.
"""
result = ''
match = re.match( r'\d+\.\d+', cidr_block )
if match:
result = '{}.{}.{}'.format( match.group(0), delegate, final8 )
else:
raise SpinupError( "{} passed to derive_ip_address() is not a CIDR block!".format(cidr_block) )
return result
def get_master_instance( ec2_conn, subnet_id ):
"""
Given EC2Connection object and Master Subnet id, check that there is
just one instance running in that subnet - this is the Master. Raise
exception if the number of instances is != 0.
Return the Master instance object.
"""
instances = ec2_conn.get_only_instances( filters={ "subnet-id": subnet_id } )
if 1 > len(instances):
raise SpinupError( "There are no instances in the master subnet" )
if 1 < len(instances):
raise SpinupError( "There are too many instances in the master subnet" )
return instances[0]
def template_token_subst( buf, key, val ):
"""
Given a string (buf), a key (e.g. '@@MASTER_IP@@') and val, replace all
occurrences of key in buf with val. Return the new string.
"""
targetre = re.compile( re.escape( key ) )
return re.sub( targetre, str(val), buf )
def process_user_data( fn, vars = [] ):
"""
Given filename of user-data file and a list of environment
variable names, replaces @@...@@ tokens with the values of the
environment variables. Returns the user-data string on success
raises exception on failure.
"""
# Get user_data string.
buf = read_user_data( fn )
for e in vars:
if not e in environ:
raise SpinupError( "Missing environment variable {}!".format( e ) )
buf = template_token_subst( buf, '@@'+e+'@@', environ[e] )
return buf
def count_instances_in_subnet( ec, subnet_id ):
"""
Given EC2Connection object and subnet ID, count number of instances
in that subnet and return it.
"""
instance_list = ec.get_only_instances(
filters={ "subnet-id": subnet_id }
)
return len(instance_list)
def make_reservation( ec, ami_id, **kwargs ):
"""
Given EC2Connection object, delegate number, AMI ID, as well as
all the kwargs referred to below, make a reservation for an instance
and return the registration object.
"""
# extract arguments to be passed to ec.run_instances()
our_kwargs = {
"key_name": kwargs['key_name'],
"subnet_id": kwargs['subnet_id'],
"instance_type": kwargs['instance_type'],
"private_ip_address": kwargs['private_ip_address']
}
# Master or minion?
if kwargs['master']:
our_kwargs['user_data'] = kwargs['user_data']
else:
# perform token substitution in user-data string
u = kwargs['user_data']
u = template_token_subst( u, '@@MASTER_IP@@', kwargs['master_ip'] )
u = template_token_subst( u, '@@DELEGATE@@', kwargs['delegate_no'] )
u = template_token_subst( u, '@@ROLE@@', kwargs['role'] )
u = template_token_subst( u, '@@NODE_NO@@', kwargs['node_no'] )
our_kwargs['user_data'] = u
# Make the reservation.
reservation = ec.run_instances( ami_id, **our_kwargs )
# Return the reservation object.
return reservation
def wait_for_running( ec2_conn, instance_id ):
"""
Given an instance id, wait for its state to change to "running".
"""
print "Waiting for {} running state".format( instance_id )
while True:
instances = ec2_conn.get_only_instances( instance_ids=[ instance_id ] )
print "Current state is {}".format( instances[0].state )
if instances[0].state != 'running':
print "Sleeping for 5 seconds"
time.sleep(5)
else:
print "Waiting another 5 seconds for good measure"
time.sleep(5)
break
def wait_for_available( ec2_conn, volume_id ):
"""
Given a volume id, wait for its state to change to "available".
"""
print "Waiting for {} available state".format( volume_id )
while True:
volumes = ec2_conn.get_all_volumes( volume_ids=[ volume_id ] )
print "Current status is {}".format( volumes[0].status )
if volumes[0].status != 'available':
print "Sleeping for 5 seconds"
time.sleep(5)
else:
break
def wait_for_detachment( ec2_conn, v_id, i_id ):
"""
Given a volume ID and an instance ID, wait for volume to
become detached.
"""
print "Waiting for volume {} to be detached from instnace {}".format(v_id, i_id)
while True:
attached_vol = ec2_conn.get_all_volumes(
filters={
"volume-id": v_id,
"attachment.instance-id": i_id,
"attachment.device": "/dev/sdb"
}
)
print "attached_vol == {}".format(attached_vol)
if attached_vol is None or len(attached_vol) == 0:
print "Detached!"
break
else:
time.sleep(5)
print "Still attached."
|
|
from ctypes import *
import os
from .util import *
from .mjtypes import *
osp = os.path
if sys.platform.startswith("darwin"):
libfile = osp.abspath(osp.join(osp.dirname(__file__),"../../vendor/mujoco/libmujoco131.dylib"))
elif sys.platform.startswith("linux"):
libfile = osp.abspath(osp.join(osp.dirname(__file__),"../../vendor/mujoco/libmujoco131.so"))
elif sys.platform.startswith("win"):
libfile = osp.abspath(osp.join(osp.dirname(__file__),"../../vendor/mujoco/mujoco.lib"))
else:
raise RuntimeError("unrecognized platform %s"%sys.platform)
mjlib = cdll.LoadLibrary(libfile)
mjlib.mj_loadXML.argtypes = [String, String, c_char_p, c_int]
mjlib.mj_loadXML.restype = POINTER(MJMODEL)
mjlib.mj_saveXML.argtypes = [String, POINTER(MJMODEL), String]
mjlib.mj_saveXML.restype = c_int
#mjlib.mj_printSchema.argtypes = [String, String, c_int, c_int, c_int]
#mjlib.mj_printSchema.restype = c_int
mjlib.mj_activate.argtypes = [String]
mjlib.mj_activate.restype = c_int
mjlib.mj_step.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
mjlib.mj_step.restype = None
#mjlib.mj_step1.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_step1.restype = None
#mjlib.mj_step2.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_step2.restype = None
mjlib.mj_forward.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
mjlib.mj_forward.restype = None
mjlib.mj_inverse.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
mjlib.mj_inverse.restype = None
#mjlib.mj_forwardSkip.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int]
#mjlib.mj_forwardSkip.restype = None
#mjlib.mj_inverseSkip.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int]
#mjlib.mj_inverseSkip.restype = None
#mjlib.mj_sensor.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_sensor.restype = None
#mjlib.mj_energy.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_energy.restype = None
#mjlib.mj_defaultSolRefImp.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mj_defaultSolRefImp.restype = None
#mjlib.mj_defaultOption.argtypes = [POINTER(mjOption)]
#mjlib.mj_defaultOption.restype = None
#mjlib.mj_defaultVisual.argtypes = [POINTER(mjVisual)]
#mjlib.mj_defaultVisual.restype = None
#mjlib.mj_copyModel.argtypes = [POINTER(MJMODEL), POINTER(MJMODEL)]
#mjlib.mj_copyModel.restype = POINTER(MJMODEL)
#mjlib.mj_saveModel.argtypes = [POINTER(MJMODEL), String, c_int, POINTER(None)]
#mjlib.mj_saveModel.restype = None
#mjlib.mj_loadModel.argtypes = [String, c_int, POINTER(None)]
#mjlib.mj_loadModel.restype = POINTER(MJMODEL)
mjlib.mj_deleteModel.argtypes = [POINTER(MJMODEL)]
mjlib.mj_deleteModel.restype = None
#mjlib.mj_sizeModel.argtypes = [POINTER(MJMODEL)]
#mjlib.mj_sizeModel.restype = c_int
mjlib.mj_makeData.argtypes = [POINTER(MJMODEL)]
mjlib.mj_makeData.restype = POINTER(MJDATA)
#mjlib.mj_copyData.argtypes = [POINTER(MJDATA), POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_copyData.restype = POINTER(MJDATA)
#mjlib.mj_resetData.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_resetData.restype = None
#mjlib.mj_stackAlloc.argtypes = [POINTER(MJDATA), c_int]
#mjlib.mj_stackAlloc.restype = POINTER(c_double)
mjlib.mj_deleteData.argtypes = [POINTER(MJDATA)]
mjlib.mj_deleteData.restype = None
#mjlib.mj_resetCallbacks.argtypes = []
#mjlib.mj_resetCallbacks.restype = None
#mjlib.mj_setConst.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int]
#mjlib.mj_setConst.restype = None
#mjlib.mj_printModel.argtypes = [POINTER(MJMODEL), String]
#mjlib.mj_printModel.restype = None
#mjlib.mj_printData.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), String]
#mjlib.mj_printData.restype = None
#mjlib.mju_printMat.argtypes = [POINTER(c_double), c_int, c_int]
#mjlib.mju_printMat.restype = None
#mjlib.mj_fwdPosition.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdPosition.restype = None
#mjlib.mj_fwdVelocity.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdVelocity.restype = None
#mjlib.mj_fwdActuation.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdActuation.restype = None
#mjlib.mj_fwdAcceleration.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdAcceleration.restype = None
#mjlib.mj_fwdConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdConstraint.restype = None
#mjlib.mj_Euler.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_Euler.restype = None
#mjlib.mj_RungeKutta.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int]
#mjlib.mj_RungeKutta.restype = None
#mjlib.mj_invPosition.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_invPosition.restype = None
#mjlib.mj_invVelocity.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_invVelocity.restype = None
#mjlib.mj_invConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_invConstraint.restype = None
#mjlib.mj_compareFwdInv.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_compareFwdInv.restype = None
#mjlib.mj_checkPos.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_checkPos.restype = None
#mjlib.mj_checkVel.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_checkVel.restype = None
#mjlib.mj_checkAcc.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_checkAcc.restype = None
#mjlib.mj_kinematics.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_kinematics.restype = None
#mjlib.mj_comPos.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_comPos.restype = None
#mjlib.mj_tendon.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_tendon.restype = None
#mjlib.mj_transmission.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_transmission.restype = None
#mjlib.mj_crb.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_crb.restype = None
#mjlib.mj_factorM.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_factorM.restype = None
#mjlib.mj_backsubM.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_backsubM.restype = None
#mjlib.mj_backsubM2.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_backsubM2.restype = None
#mjlib.mj_comVel.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_comVel.restype = None
#mjlib.mj_passive.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_passive.restype = None
#mjlib.mj_rne.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, POINTER(c_double)]
#mjlib.mj_rne.restype = None
#mjlib.mj_rnePostConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_rnePostConstraint.restype = None
#mjlib.mj_collision.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_collision.restype = None
#mjlib.mj_makeConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_makeConstraint.restype = None
#mjlib.mj_projectConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_projectConstraint.restype = None
#mjlib.mj_referenceConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_referenceConstraint.restype = None
#mjlib.mj_isPyramid.argtypes = [POINTER(MJMODEL)]
#mjlib.mj_isPyramid.restype = c_int
#mjlib.mj_isSparse.argtypes = [POINTER(MJMODEL)]
#mjlib.mj_isSparse.restype = c_int
#mjlib.mj_mulJacVec.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double)]
#mjlib.mj_mulJacVec.restype = None
#mjlib.mj_mulJacTVec.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double)]
#mjlib.mj_mulJacTVec.restype = None
#mjlib.mj_jac.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jac.restype = None
#mjlib.mj_jacBody.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacBody.restype = None
#mjlib.mj_jacBodyCom.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacBodyCom.restype = None
#mjlib.mj_jacGeom.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacGeom.restype = None
#mjlib.mj_jacSite.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacSite.restype = None
#mjlib.mj_jacPointAxis.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacPointAxis.restype = None
#mjlib.mj_name2id.argtypes = [POINTER(MJMODEL), mjtObj, String]
#mjlib.mj_name2id.restype = c_int
#mjlib.mj_id2name.argtypes = [POINTER(MJMODEL), mjtObj, c_int]
#mjlib. mj_id2name.restype = ReturnString
#mjlib.else:
#mjlib. mj_id2name.restype = String
#mjlib. mj_id2name.errcheck = ReturnString
#mjlib.mj_fullM.argtypes = [POINTER(MJMODEL), POINTER(c_double), POINTER(c_double)]
#mjlib.mj_fullM.restype = None
#mjlib.mj_mulM.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double)]
#mjlib.mj_mulM.restype = None
#mjlib.mj_applyFT.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, POINTER(c_double)]
#mjlib.mj_applyFT.restype = None
mjlib.mj_objectVelocity.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, c_int, POINTER(c_double), c_ubyte]
mjlib.mj_objectVelocity.restype = None
#mjlib.mj_objectAcceleration.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, c_int, POINTER(c_double), mjtByte]
#mjlib.mj_objectAcceleration.restype = None
#mjlib.mj_contactForce.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, POINTER(c_double)]
#mjlib.mj_contactForce.restype = None
#mjlib.mj_integratePos.argtypes = [POINTER(MJMODEL), POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mj_integratePos.restype = None
#mjlib.mj_normalizeQuat.argtypes = [POINTER(MJMODEL), POINTER(c_double)]
#mjlib.mj_normalizeQuat.restype = None
#mjlib.mj_local2Global.argtypes = [POINTER(MJDATA), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_local2Global.restype = None
#mjlib.mj_getTotalmass.argtypes = [POINTER(MJMODEL)]
#mjlib.mj_getTotalmass.restype = c_double
#mjlib.mj_setTotalmass.argtypes = [POINTER(MJMODEL), c_double]
#mjlib.mj_setTotalmass.restype = None
#mjlib.mj_version.argtypes = []
#mjlib.mj_version.restype = c_double
mjlib.mjv_makeObjects.argtypes = [POINTER(MJVOBJECTS), c_int]
mjlib.mjv_makeObjects.restype = None
mjlib.mjv_freeObjects.argtypes = [POINTER(MJVOBJECTS)]
mjlib.mjv_freeObjects.restype = None
mjlib.mjv_defaultOption.argtypes = [POINTER(MJVOPTION)]
mjlib.mjv_defaultOption.restype = None
#mjlib.mjv_defaultCameraPose.argtypes = [POINTER(MJVCAMERAPOSE)]
#mjlib.mjv_defaultCameraPose.restype = None
mjlib.mjv_defaultCamera.argtypes = [POINTER(MJVCAMERA)]
mjlib.mjv_defaultCamera.restype = None
mjlib.mjv_setCamera.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(MJVCAMERA)]
mjlib.mjv_setCamera.restype = None
mjlib.mjv_updateCameraPose.argtypes = [POINTER(MJVCAMERA), c_double]
mjlib.mjv_updateCameraPose.restype = None
#mjlib.mjv_convert3D.argtypes = [POINTER(c_double), POINTER(c_double), c_double, POINTER(MJVCAMERAPOSE)]
#mjlib.mjv_convert3D.restype = None
#mjlib.mjv_convert2D.argtypes = [POINTER(c_double), mjtMouse, c_double, c_double, c_double, POINTER(MJVCAMERAPOSE)]
#mjlib.mjv_convert2D.restype = None
mjlib.mjv_moveCamera.argtypes = [c_int, c_float, c_float, POINTER(MJVCAMERA), c_float, c_float]
mjlib.mjv_moveCamera.restype = None
#mjlib.mjv_moveObject.argtypes = [mjtMouse, c_float, c_float, POINTER(MJVCAMERAPOSE), c_float, c_float, POINTER(c_double), POINTER(c_double)]
#mjlib.mjv_moveObject.restype = None
#mjlib.mjv_mousePerturb.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mjv_mousePerturb.restype = None
#mjlib.mjv_mouseEdit.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, c_int, POINTER(c_double), POINTER(c_double)]
#mjlib.mjv_mouseEdit.restype = None
mjlib.mjv_makeGeoms.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(MJVOBJECTS), POINTER(MJVOPTION), c_int, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)]
mjlib.mjv_makeGeoms.restype = None
mjlib.mjv_makeLights.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(MJVOBJECTS)]
mjlib.mjv_makeLights.restype = None
mjlib.mjr_overlay.argtypes = [MJRRECT, c_int, c_int, String, String, POINTER(MJRCONTEXT)]
mjlib.mjr_overlay.restype = None
#mjlib.mjr_rectangle.argtypes = [c_int, MJRRECT, c_double, c_double, c_double, c_double, c_double, c_double, c_double, c_double]
#mjlib.mjr_rectangle.restype = None
#mjlib.mjr_finish.argtypes = []
#mjlib.mjr_finish.restype = None
#mjlib.mjr_text.argtypes = [String, POINTER(MJRCONTEXT), c_int, c_float, c_float, c_float, c_float, c_float, c_float]
#mjlib.mjr_text.restype = None
#mjlib.mjr_textback.argtypes = [String, POINTER(MJRCONTEXT), c_float, c_float, c_float, c_float, c_float, c_float]
#mjlib.mjr_textback.restype = None
#mjlib.mjr_textWidth.argtypes = [String, POINTER(MJRCONTEXT), c_int]
#mjlib.mjr_textWidth.restype = c_int
mjlib.mjr_defaultOption.argtypes = [POINTER(MJROPTION)]
mjlib.mjr_defaultOption.restype = None
mjlib.mjr_defaultContext.argtypes = [POINTER(MJRCONTEXT)]
mjlib.mjr_defaultContext.restype = None
#mjlib.mjr_uploadTexture.argtypes = [POINTER(MJMODEL), POINTER(MJRCONTEXT), c_int]
#mjlib.mjr_uploadTexture.restype = None
mjlib.mjr_makeContext.argtypes = [POINTER(MJMODEL), POINTER(MJRCONTEXT), c_int]
mjlib.mjr_makeContext.restype = None
mjlib.mjr_freeContext.argtypes = [POINTER(MJRCONTEXT)]
mjlib.mjr_freeContext.restype = None
mjlib.mjr_render.argtypes = [c_int, MJRRECT, POINTER(MJVOBJECTS), POINTER(MJROPTION), POINTER(MJVCAMERAPOSE), POINTER(MJRCONTEXT)]
mjlib.mjr_render.restype = None
#mjlib.mjr_select.argtypes = [MJRRECT, POINTER(MJVOBJECTS), c_int, c_int, POINTER(c_double), POINTER(c_double), POINTER(MJROPTION), POINTER(MJVCAMERAPOSE), POINTER(MJRCONTEXT)]
#mjlib.mjr_select.restype = c_int
#mjlib.mjr_showOffscreen.argtypes = [c_int, c_int, POINTER(MJRCONTEXT)]
#mjlib.mjr_showOffscreen.restype = None
#mjlib.mjr_showBuffer.argtypes = [POINTER(c_ubyte), c_int, c_int, c_int, c_int, POINTER(MJRCONTEXT)]
#mjlib.mjr_showBuffer.restype = None
#mjlib.mjr_getOffscreen.argtypes = [POINTER(c_ubyte), POINTER(c_float), MJRRECT, POINTER(MJRCONTEXT)]
#mjlib.mjr_getOffscreen.restype = None
#mjlib.mjr_getBackbuffer.argtypes = [POINTER(c_ubyte), POINTER(c_float), MJRRECT, POINTER(MJRCONTEXT)]
#mjlib.mjr_getBackbuffer.restype = None
#mjlib.
#mjlib.
#mjlib.mju_error.argtypes = [String]
#mjlib.mju_error.restype = None
#mjlib.mju_error_i.argtypes = [String, c_int]
#mjlib.mju_error_i.restype = None
#mjlib.mju_error_s.argtypes = [String, String]
#mjlib.mju_error_s.restype = None
#mjlib.mju_warning.argtypes = [String]
#mjlib.mju_warning.restype = None
#mjlib.mju_warning_i.argtypes = [String, c_int]
#mjlib.mju_warning_i.restype = None
#mjlib.mju_warning_s.argtypes = [String, String]
#mjlib.mju_warning_s.restype = None
#mjlib.mju_clearHandlers.argtypes = []
#mjlib.mju_clearHandlers.restype = None
#mjlib.mju_malloc.argtypes = [c_size_t]
#mjlib.mju_malloc.restype = POINTER(None)
#mjlib.mju_free.argtypes = [POINTER(None)]
#mjlib.mju_free.restype = None
#mjlib.mj_warning.argtypes = [POINTER(MJDATA), c_int]
#mjlib.mj_warning.restype = None
#mjlib.mju_zero3.argtypes = [POINTER(c_double)]
#mjlib.mju_zero3.restype = None
#mjlib.mju_copy3.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_copy3.restype = None
#mjlib.mju_scl3.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_scl3.restype = None
#mjlib.mju_add3.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_add3.restype = None
#mjlib.mju_sub3.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_sub3.restype = None
#mjlib.mju_addTo3.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_addTo3.restype = None
#mjlib.mju_addToScl3.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_addToScl3.restype = None
#mjlib.mju_addScl3.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_addScl3.restype = None
#mjlib.mju_normalize3.argtypes = [POINTER(c_double)]
#mjlib.mju_normalize3.restype = c_double
#mjlib.mju_norm3.argtypes = [POINTER(c_double)]
#mjlib.mju_norm3.restype = c_double
#mjlib.mju_dot3.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_dot3.restype = c_double
#mjlib.mju_dist3.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_dist3.restype = c_double
#mjlib.mju_rotVecMat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_rotVecMat.restype = None
#mjlib.mju_rotVecMatT.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_rotVecMatT.restype = None
#mjlib.mju_cross.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_cross.restype = None
#mjlib.mju_zero.argtypes = [POINTER(c_double), c_int]
#mjlib.mju_zero.restype = None
#mjlib.mju_copy.argtypes = [POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_copy.restype = None
#mjlib.mju_scl.argtypes = [POINTER(c_double), POINTER(c_double), c_double, c_int]
#mjlib.mju_scl.restype = None
#mjlib.mju_add.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_add.restype = None
#mjlib.mju_sub.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_sub.restype = None
#mjlib.mju_addTo.argtypes = [POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_addTo.restype = None
#mjlib.mju_addToScl.argtypes = [POINTER(c_double), POINTER(c_double), c_double, c_int]
#mjlib.mju_addToScl.restype = None
#mjlib.mju_addScl.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_double, c_int]
#mjlib.mju_addScl.restype = None
#mjlib.mju_normalize.argtypes = [POINTER(c_double), c_int]
#mjlib.mju_normalize.restype = c_double
#mjlib.mju_norm.argtypes = [POINTER(c_double), c_int]
#mjlib.mju_norm.restype = c_double
#mjlib.mju_dot.argtypes = [POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_dot.restype = c_double
#mjlib.mju_mulMatVec.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int]
#mjlib.mju_mulMatVec.restype = None
#mjlib.mju_mulMatTVec.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int]
#mjlib.mju_mulMatTVec.restype = None
#mjlib.mju_transpose.argtypes = [POINTER(c_double), POINTER(c_double), c_int, c_int]
#mjlib.mju_transpose.restype = None
#mjlib.mju_mulMatMat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int, c_int]
#mjlib.mju_mulMatMat.restype = None
#mjlib.mju_mulMatMatT.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int, c_int]
#mjlib.mju_mulMatMatT.restype = None
#mjlib.mju_sqrMat.argtypes = [POINTER(c_double), POINTER(c_double), c_int, c_int, POINTER(c_double), c_int]
#mjlib.mju_sqrMat.restype = None
#mjlib.mju_mulMatTMat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int, c_int]
#mjlib.mju_mulMatTMat.restype = None
#mjlib.mju_transformSpatial.argtypes = [POINTER(c_double), POINTER(c_double), mjtByte, POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_transformSpatial.restype = None
#mjlib.mju_rotVecQuat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_rotVecQuat.restype = None
#mjlib.mju_negQuat.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_negQuat.restype = None
#mjlib.mju_mulQuat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_mulQuat.restype = None
#mjlib.mju_mulQuatAxis.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_mulQuatAxis.restype = None
#mjlib.mju_axisAngle2Quat.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_axisAngle2Quat.restype = None
#mjlib.mju_quat2Vel.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_quat2Vel.restype = None
#mjlib.mju_quat2Mat.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_quat2Mat.restype = None
#mjlib.mju_mat2Quat.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_mat2Quat.restype = None
#mjlib.mju_derivQuat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_derivQuat.restype = None
#mjlib.mju_quatIntegrate.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_quatIntegrate.restype = None
#mjlib.mju_quatZ2Vec.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_quatZ2Vec.restype = None
#mjlib.mju_cholFactor.argtypes = [POINTER(c_double), POINTER(c_double), c_int, c_double, c_double, POINTER(c_double)]
#mjlib.mju_cholFactor.restype = c_int
#mjlib.mju_cholBacksub.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int, c_int]
#mjlib.mju_cholBacksub.restype = None
#mjlib.mju_eig3.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_eig3.restype = c_int
#mjlib.mju_muscleFVL.argtypes = [c_double, c_double, c_double, c_double, POINTER(c_double)]
#mjlib.mju_muscleFVL.restype = c_double
#mjlib.mju_musclePassive.argtypes = [c_double, c_double, c_double, POINTER(c_double)]
#mjlib.mju_musclePassive.restype = c_double
#mjlib.mju_pneumatic.argtypes = [c_double, c_double, c_double, POINTER(c_double), c_double, c_double, c_double, POINTER(c_double)]
#mjlib.mju_pneumatic.restype = c_double
#mjlib.mju_encodePyramid.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_encodePyramid.restype = None
#mjlib.mju_decodePyramid.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_decodePyramid.restype = None
#mjlib.mju_springDamper.argtypes = [c_double, c_double, c_double, c_double, c_double]
#mjlib.mju_springDamper.restype = c_double
#mjlib.mju_min.argtypes = [c_double, c_double]
#mjlib.mju_min.restype = c_double
#mjlib.mju_max.argtypes = [c_double, c_double]
#mjlib.mju_max.restype = c_double
#mjlib.mju_sign.argtypes = [c_double]
#mjlib.mju_sign.restype = c_double
#mjlib.mju_round.argtypes = [c_double]
#mjlib.mju_round.restype = c_int
#mjlib.mju_type2Str.argtypes = [c_int]
#mjlib. mju_type2Str.restype = ReturnString
#mjlib.else:
#mjlib. mju_type2Str.restype = String
#mjlib. mju_type2Str.errcheck = ReturnString
#mjlib.mju_str2Type.argtypes = [String]
#mjlib.mju_str2Type.restype = mjtObj
#mjlib.mju_warningText.argtypes = [c_int]
#mjlib. mju_warningText.restype = ReturnString
#mjlib.else:
#mjlib. mju_warningText.restype = String
#mjlib. mju_warningText.errcheck = ReturnString
#mjlib.mju_isBad.argtypes = [c_double]
#mjlib.mju_isBad.restype = c_int
#mjlib.mju_isZero.argtypes = [POINTER(c_double), c_int]
#mjlib.mju_isZero.restype = c_int
|
|
from __future__ import unicode_literals
from django.db.models.fields import NOT_PROVIDED
from django.utils import six
from .base import Operation
class AddField(Operation):
"""
Adds a field to a model.
"""
def __init__(self, model_name, name, field, preserve_default=True):
self.model_name = model_name
self.name = name
self.field = field
self.preserve_default = preserve_default
def state_forwards(self, app_label, state):
# If preserve default is off, don't use the default for future state
if not self.preserve_default:
field = self.field.clone()
field.default = NOT_PROVIDED
else:
field = self.field
state.models[app_label, self.model_name.lower()].fields.append((self.name, field))
def database_forwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.render().get_model(app_label, self.model_name)
if self.allowed_to_migrate(schema_editor.connection.alias, to_model):
from_model = from_state.render().get_model(app_label, self.model_name)
field = to_model._meta.get_field_by_name(self.name)[0]
if not self.preserve_default:
field.default = self.field.default
schema_editor.add_field(
from_model,
field,
)
if not self.preserve_default:
field.default = NOT_PROVIDED
def database_backwards(self, app_label, schema_editor, from_state, to_state):
from_model = from_state.render().get_model(app_label, self.model_name)
if self.allowed_to_migrate(schema_editor.connection.alias, from_model):
schema_editor.remove_field(from_model, from_model._meta.get_field_by_name(self.name)[0])
def describe(self):
return "Add field %s to %s" % (self.name, self.model_name)
def __eq__(self, other):
return (
(self.__class__ == other.__class__) and
(self.name == other.name) and
(self.model_name.lower() == other.model_name.lower()) and
(self.field.deconstruct()[1:] == other.field.deconstruct()[1:])
)
def references_model(self, name, app_label=None):
return name.lower() == self.model_name.lower()
def references_field(self, model_name, name, app_label=None):
return self.references_model(model_name) and name.lower() == self.name.lower()
class RemoveField(Operation):
"""
Removes a field from a model.
"""
def __init__(self, model_name, name):
self.model_name = model_name
self.name = name
def state_forwards(self, app_label, state):
new_fields = []
for name, instance in state.models[app_label, self.model_name.lower()].fields:
if name != self.name:
new_fields.append((name, instance))
state.models[app_label, self.model_name.lower()].fields = new_fields
def database_forwards(self, app_label, schema_editor, from_state, to_state):
from_model = from_state.render().get_model(app_label, self.model_name)
if self.allowed_to_migrate(schema_editor.connection.alias, from_model):
schema_editor.remove_field(from_model, from_model._meta.get_field_by_name(self.name)[0])
def database_backwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.render().get_model(app_label, self.model_name)
if self.allowed_to_migrate(schema_editor.connection.alias, to_model):
from_model = from_state.render().get_model(app_label, self.model_name)
schema_editor.add_field(from_model, to_model._meta.get_field_by_name(self.name)[0])
def describe(self):
return "Remove field %s from %s" % (self.name, self.model_name)
def references_model(self, name, app_label=None):
return name.lower() == self.model_name.lower()
def references_field(self, model_name, name, app_label=None):
return self.references_model(model_name) and name.lower() == self.name.lower()
class AlterField(Operation):
"""
Alters a field's database column (e.g. null, max_length) to the provided new field
"""
def __init__(self, model_name, name, field, preserve_default=True):
self.model_name = model_name
self.name = name
self.field = field
self.preserve_default = preserve_default
def state_forwards(self, app_label, state):
if not self.preserve_default:
field = self.field.clone()
field.default = NOT_PROVIDED
else:
field = self.field
state.models[app_label, self.model_name.lower()].fields = [
(n, field if n == self.name else f) for n, f in state.models[app_label, self.model_name.lower()].fields
]
def database_forwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.render().get_model(app_label, self.model_name)
if self.allowed_to_migrate(schema_editor.connection.alias, to_model):
from_model = from_state.render().get_model(app_label, self.model_name)
from_field = from_model._meta.get_field_by_name(self.name)[0]
to_field = to_model._meta.get_field_by_name(self.name)[0]
# If the field is a relatedfield with an unresolved rel.to, just
# set it equal to the other field side. Bandaid fix for AlterField
# migrations that are part of a RenameModel change.
if from_field.rel and from_field.rel.to:
if isinstance(from_field.rel.to, six.string_types):
from_field.rel.to = to_field.rel.to
elif to_field.rel and isinstance(to_field.rel.to, six.string_types):
to_field.rel.to = from_field.rel.to
if not self.preserve_default:
to_field.default = self.field.default
schema_editor.alter_field(from_model, from_field, to_field)
if not self.preserve_default:
to_field.default = NOT_PROVIDED
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.database_forwards(app_label, schema_editor, from_state, to_state)
def describe(self):
return "Alter field %s on %s" % (self.name, self.model_name)
def __eq__(self, other):
return (
(self.__class__ == other.__class__) and
(self.name == other.name) and
(self.model_name.lower() == other.model_name.lower()) and
(self.field.deconstruct()[1:] == other.field.deconstruct()[1:])
)
def references_model(self, name, app_label=None):
return name.lower() == self.model_name.lower()
def references_field(self, model_name, name, app_label=None):
return self.references_model(model_name) and name.lower() == self.name.lower()
class RenameField(Operation):
"""
Renames a field on the model. Might affect db_column too.
"""
def __init__(self, model_name, old_name, new_name):
self.model_name = model_name
self.old_name = old_name
self.new_name = new_name
def state_forwards(self, app_label, state):
# Rename the field
state.models[app_label, self.model_name.lower()].fields = [
(self.new_name if n == self.old_name else n, f)
for n, f in state.models[app_label, self.model_name.lower()].fields
]
# Fix index/unique_together to refer to the new field
options = state.models[app_label, self.model_name.lower()].options
for option in ('index_together', 'unique_together'):
if option in options:
options[option] = [
[self.new_name if n == self.old_name else n for n in together]
for together in options[option]
]
def database_forwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.render().get_model(app_label, self.model_name)
if self.allowed_to_migrate(schema_editor.connection.alias, to_model):
from_model = from_state.render().get_model(app_label, self.model_name)
schema_editor.alter_field(
from_model,
from_model._meta.get_field_by_name(self.old_name)[0],
to_model._meta.get_field_by_name(self.new_name)[0],
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.render().get_model(app_label, self.model_name)
if self.allowed_to_migrate(schema_editor.connection.alias, to_model):
from_model = from_state.render().get_model(app_label, self.model_name)
schema_editor.alter_field(
from_model,
from_model._meta.get_field_by_name(self.new_name)[0],
to_model._meta.get_field_by_name(self.old_name)[0],
)
def describe(self):
return "Rename field %s on %s to %s" % (self.old_name, self.model_name, self.new_name)
def references_model(self, name, app_label=None):
return name.lower() == self.model_name.lower()
def references_field(self, model_name, name, app_label=None):
return self.references_model(model_name) and (
name.lower() == self.old_name.lower() or
name.lower() == self.new_name.lower()
)
|
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2016, wradlib Development Team. All Rights Reserved.
# Distributed under the MIT License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
#!/usr/bin/env python
import numpy as np
from PyQt4 import QtGui, QtCore
from vispy.scene import SceneCanvas
from vispy.util.event import EventEmitter
from vispy.visuals.transforms import STTransform, MatrixTransform, PolarTransform
from vispy.scene.cameras import PanZoomCamera
from vispy.scene.visuals import Image, ColorBar, Markers, Text
from vispy.geometry import Rect
from wradvis import utils
from wradvis.config import conf
class ColorbarCanvas(SceneCanvas):
def __init__(self, **kwargs):
super(ColorbarCanvas, self).__init__(keys='interactive', **kwargs)
# set size ov Canvas
self.size = 60, 450
# unfreeze needed to add more elements
self.unfreeze()
# add grid central widget
self.grid = self.central_widget.add_grid()
# add view to grid
self.view = self.grid.add_view(row=0, col=0)
self.view.border_color = (0.5, 0.5, 0.5, 1)
# initialize colormap, we take cubehelix for now
# this is the most nice colormap for radar in vispy
cmap = 'cubehelix'
# initialize ColorBar Visual, add to view
self.cbar = ColorBar(center_pos=(0, 10),
size=np.array([400, 20]),
cmap=cmap,
clim=(conf["vis"]["cmin"], conf["vis"]["cmax"]),
label_str='measurement units',
orientation='right',
border_width=1,
border_color='white',
parent=self.view.scene)
# add transform to Colorbar
self.cbar.transform = STTransform(scale=(1, 1, 1),
translate=(20, 225, 0.5))
# whiten label and ticks
self.cbar.label.color = 'white'
for tick in self.cbar.ticks:
tick.color = 'white'
self.freeze()
class RadolanCanvas(SceneCanvas):
def __init__(self, **kwargs):
super(RadolanCanvas, self).__init__(keys='interactive', **kwargs)
# set size ov Canvas
self.size = 450, 450
# unfreeze needed to add more elements
self.unfreeze()
# add grid central widget
self.grid = self.central_widget.add_grid()
# add view to grid
self.view = self.grid.add_view(row=0, col=0)
self.view.border_color = (0.5, 0.5, 0.5, 1)
# add signal emitters
self.mouse_moved = EventEmitter(source=self, type="mouse_moved")
self.key_pressed = EventEmitter(source=self, type="key_pressed")
# block double clicks
self.events.mouse_double_click.block()
# initialize empty RADOLAN image
img_data = np.zeros((900, 900))
# initialize colormap, we take cubehelix for now
# this is the most nice colormap for radar in vispy
cmap = 'cubehelix'
self.images = []
# initialize Image Visual with img_data
# add to view
self.image = Image(img_data,
method='subdivide',
#interpolation='bicubic',
cmap=cmap,
clim=(0,50),
parent=self.view.scene)
self.images.append(self.image)
# add transform to Image
# (mostly positioning within canvas)
self.image.transform = STTransform(translate=(0, 0, 0))
# get radolan ll point coodinate into self.r0
self.r0 = utils.get_radolan_origin()
# create cities (Markers and Text Visuals
self.create_cities()
# create PanZoomCamera
self.cam = PanZoomCamera(name="PanZoom",
rect=Rect(0, 0, 900, 900),
aspect=1,
parent=self.view.scene)
self.view.camera = self.cam
self._mouse_position = None
self.freeze()
# print FPS to console, vispy SceneCanvas internal function
self.measure_fps()
def create_marker(self, id, pos, name):
marker = Markers(parent=self.view.scene)
marker.transform = STTransform(translate=(0, 0, -10))
marker.interactive = True
# add id
marker.unfreeze()
marker.id = id
marker.freeze()
marker.set_data(pos=pos[np.newaxis],
symbol="disc",
edge_color="blue",
face_color='red',
size=10)
# initialize Markertext
text = Text(text=name,
pos=pos,
font_size=15,
anchor_x='right',
anchor_y='top',
parent=self.view.scene)
return marker, text
def create_cities(self):
self.selected = None
cities = utils.get_cities_coords()
cnameList = []
ccoordList = []
for k, v in cities.items():
cnameList.append(k)
ccoordList.append(v)
ccoord = np.vstack(ccoordList)
ccoord = utils.wgs84_to_radolan(ccoord)
pos_scene = np.zeros((ccoord.shape[0], 2), dtype=np.float32)
pos_scene[:] = ccoord - self.r0
# initialize Markers
self.markers = []
self.text = []
i = 0
for p, n in zip(pos_scene, cnameList):
m, t = self.create_marker(i, p, n)
self.markers.append(m)
self.text.append(t)
i += 1
def on_mouse_move(self, event):
point = self.scene.node_transform(self.image).map(event.pos)[:2]
self._mouse_position = point
# emit signal
self.mouse_moved(event)
def on_mouse_press(self, event):
self.view.interactive = False
for v in self.visuals_at(event.pos, radius=30):
if isinstance(v, Markers):
if self.selected is None:
self.selected = v
self.selected.symbol = 'star'
else:
self.selected.symbol = 'disc'
if self.selected.id == v.id:
self.selected = None
else:
self.selected = v
self.selected.symbol = 'star'
self.view.interactive = True
def on_key_press(self, event):
self.key_pressed(event)
class PTransform(PolarTransform):
glsl_imap = """
vec4 polar_transform_map(vec4 pos) {
float theta = atan(radians(pos.x), radians(pos.y));
theta = degrees(theta + 3.14159265358979323846);
float r = length(pos.xy);
return vec4(r, theta, pos.z, 1);
}
"""
class PolarImage(Image):
def __init__(self, source=None, **kwargs):
super(PolarImage, self).__init__(**kwargs)
self.unfreeze()
# source should be an object, which contains information about
# a specific radar source
self.source = source
# source should contain the radar coordinates in some usable format
# here I assume offset from lower left (0,0)
if source is not None:
xoff = source['X']
yoff = source['Y']
else:
xoff = 0
yoff = 0
# this takes the image sizes and uses it for transformation
self.theta = self._data.shape[0]
self.range = self._data.shape[1]
# PTransform takes care of making PPI from data array
# rot rotates the ppi 180 deg (image origin is upper left)
# the translation moves the image to centere the ppi
rot = MatrixTransform()
rot.rotate(180, (0, 0, 1))
self.transform = (STTransform(translate=(self.range+xoff, self.range+yoff, 0)) *
rot *
PTransform())
self.freeze()
class DXCanvas(SceneCanvas):
def __init__(self, **kwargs):
super(DXCanvas, self).__init__(keys='interactive', **kwargs)
self.size = 450, 450
self.unfreeze()
# add grid central widget
self.grid = self.central_widget.add_grid()
# add view to grid
self.view = self.grid.add_view(row=0, col=0)
self.view.border_color = (0.5, 0.5, 0.5, 1)
# This is hardcoded now, but maybe handled as the data source changes
self.img_data = np.zeros((360, 128))
# initialize colormap, we take cubehelix for now
# this is the most nice colormap for radar in vispy
cmap = 'cubehelix'
# this way we can hold several images on the same scene
# usable for radar mosaic
self.images = []
self.image = PolarImage(source=None,
data=self.img_data,
method='impostor',
# interpolation='bicubic',
cmap=cmap,
clim=(-32.5, 95),
parent=self.view.scene)
self.images.append(self.image)
# add signal emitters
self.mouse_moved = EventEmitter(source=self, type="mouse_moved")
self.key_pressed = EventEmitter(source=self, type="key_pressed")
# block double clicks
self.events.mouse_double_click.block()
# create PanZoomCamera
# the camera should zoom to the ppi "bounding box"
self.cam = PanZoomCamera(name="PanZoom",
rect=Rect(0, 0, 256, 256),
aspect=1,
parent=self.view.scene)
self.view.camera = self.cam
self._mouse_position = None
self.freeze()
self.measure_fps()
def on_mouse_move(self, event):
tr = self.scene.node_transform(self.image)
point = tr.map(event.pos)[:2]
# todo: we should actually move this into PTransform in the future
point[0] += np.pi
point[0] = np.rad2deg(point[0])
self._mouse_position = point
# emit signal
self.mouse_moved(event)
def on_key_press(self, event):
self.key_pressed(event)
def add_image(self, radar):
# this adds an image to the images list
image = PolarImage(source=radar,
data=self.img_data,
method='impostor',
# interpolation='bicubic',
cmap='cubehelix',
clim=(-32.5, 95),
parent=self.view.scene)
self.images.append(image)
class RadolanWidget(QtGui.QWidget):
def __init__(self, parent=None):
super(RadolanWidget, self).__init__(parent)
self.parent = parent
self.rcanvas = RadolanCanvas()
self.rcanvas.create_native()
self.rcanvas.native.setParent(self)
self.pcanvas = DXCanvas()
self.pcanvas.create_native()
self.pcanvas.native.setParent(self)
self.cbar = ColorbarCanvas()
self.cbar.create_native()
self.cbar.native.setParent(self)
self.canvas = self.rcanvas
# canvas swapper
self.swapper = {}
self.swapper['R'] = self.rcanvas.native
self.swapper['P'] = self.pcanvas.native
self.splitter = QtGui.QSplitter(QtCore.Qt.Horizontal)
self.splitter.addWidget(self.swapper['R'])
self.splitter.addWidget(self.swapper['P'])
self.swapper['P'].hide()
self.splitter.addWidget(self.cbar.native)
# stretchfactors for correct splitter behaviour
self.splitter.setStretchFactor(0, 1)
self.splitter.setStretchFactor(1, 1)
self.splitter.setStretchFactor(2, 0)
self.hbl = QtGui.QHBoxLayout()
self.hbl.addWidget(self.splitter)
self.setLayout(self.hbl)
def set_canvas(self, type):
if type == 'DX':
self.canvas = self.pcanvas
self.swapper['P'].show()
self.swapper['R'].hide()
else:
self.canvas = self.rcanvas
self.swapper['R'].show()
self.swapper['P'].hide()
def set_data(self, data):
# now this sets same data to all images
# we would need to do the data loading
# via objects (maybe radar-object from above)
# and use
for im in self.canvas.images:
im.set_data(data)
self.canvas.update()
def set_clim(self, clim):
self.canvas.image.clim = clim
self.cbar.cbar.clim = clim
|
|
import datetime
import decimal
import json
import re
from django.core import serializers
from django.core.serializers.base import DeserializationError
from django.core.serializers.json import DjangoJSONEncoder
from django.db import models
from django.test import SimpleTestCase, TestCase, TransactionTestCase
from django.test.utils import isolate_apps
from django.utils.translation import override, ugettext_lazy
from .models import Score
from .tests import SerializersTestBase, SerializersTransactionTestBase
class JsonSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "json"
pkless_str = """[
{
"pk": null,
"model": "serializers.category",
"fields": {"name": "Reference"}
}, {
"model": "serializers.category",
"fields": {"name": "Non-fiction"}
}]"""
mapping_ordering_str = """[
{
"model": "serializers.article",
"pk": %(article_pk)s,
"fields": {
"author": %(author_pk)s,
"headline": "Poker has no place on ESPN",
"pub_date": "2006-06-16T11:00:00",
"categories": [
%(first_category_pk)s,
%(second_category_pk)s
],
"meta_data": []
}
}
]
"""
@staticmethod
def _validate_output(serial_str):
try:
json.loads(serial_str)
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
serial_list = json.loads(serial_str)
for obj_dict in serial_list:
ret_list.append(obj_dict["pk"])
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
serial_list = json.loads(serial_str)
for obj_dict in serial_list:
if field_name in obj_dict["fields"]:
ret_list.append(obj_dict["fields"][field_name])
return ret_list
def test_indentation_whitespace(self):
s = serializers.json.Serializer()
json_data = s.serialize([Score(score=5.0), Score(score=6.0)], indent=2)
for line in json_data.splitlines():
if re.search(r'.+,\s*$', line):
self.assertEqual(line, line.rstrip())
@isolate_apps('serializers')
def test_custom_encoder(self):
class ScoreDecimal(models.Model):
score = models.DecimalField()
class CustomJSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
return str(o)
return super(CustomJSONEncoder, self).default(o)
s = serializers.json.Serializer()
json_data = s.serialize(
[ScoreDecimal(score=decimal.Decimal(1.0))], cls=CustomJSONEncoder
)
self.assertIn('"fields": {"score": "1"}', json_data)
def test_json_deserializer_exception(self):
with self.assertRaises(DeserializationError):
for obj in serializers.deserialize("json", """[{"pk":1}"""):
pass
def test_helpful_error_message_invalid_pk(self):
"""
If there is an invalid primary key, the error message should contain
the model associated with it.
"""
test_string = """[{
"pk": "badpk",
"model": "serializers.player",
"fields": {
"name": "Bob",
"rank": 1,
"team": "Team"
}
}]"""
with self.assertRaisesMessage(DeserializationError, "(serializers.player:pk=badpk)"):
list(serializers.deserialize('json', test_string))
def test_helpful_error_message_invalid_field(self):
"""
If there is an invalid field value, the error message should contain
the model associated with it.
"""
test_string = """[{
"pk": "1",
"model": "serializers.player",
"fields": {
"name": "Bob",
"rank": "invalidint",
"team": "Team"
}
}]"""
expected = "(serializers.player:pk=1) field_value was 'invalidint'"
with self.assertRaisesMessage(DeserializationError, expected):
list(serializers.deserialize('json', test_string))
def test_helpful_error_message_for_foreign_keys(self):
"""
Invalid foreign keys with a natural key should throw a helpful error
message, such as what the failing key is.
"""
test_string = """[{
"pk": 1,
"model": "serializers.category",
"fields": {
"name": "Unknown foreign key",
"meta_data": [
"doesnotexist",
"metadata"
]
}
}]"""
key = ["doesnotexist", "metadata"]
expected = "(serializers.category:pk=1) field_value was '%r'" % key
with self.assertRaisesMessage(DeserializationError, expected):
list(serializers.deserialize('json', test_string))
def test_helpful_error_message_for_many2many_non_natural(self):
"""
Invalid many-to-many keys should throw a helpful error message.
"""
test_string = """[{
"pk": 1,
"model": "serializers.article",
"fields": {
"author": 1,
"headline": "Unknown many to many",
"pub_date": "2014-09-15T10:35:00",
"categories": [1, "doesnotexist"]
}
}, {
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}, {
"pk": 1,
"model": "serializers.category",
"fields": {
"name": "Reference"
}
}]"""
expected = "(serializers.article:pk=1) field_value was 'doesnotexist'"
with self.assertRaisesMessage(DeserializationError, expected):
list(serializers.deserialize('json', test_string))
def test_helpful_error_message_for_many2many_natural1(self):
"""
Invalid many-to-many keys should throw a helpful error message.
This tests the code path where one of a list of natural keys is invalid.
"""
test_string = """[{
"pk": 1,
"model": "serializers.categorymetadata",
"fields": {
"kind": "author",
"name": "meta1",
"value": "Agnes"
}
}, {
"pk": 1,
"model": "serializers.article",
"fields": {
"author": 1,
"headline": "Unknown many to many",
"pub_date": "2014-09-15T10:35:00",
"meta_data": [
["author", "meta1"],
["doesnotexist", "meta1"],
["author", "meta1"]
]
}
}, {
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}]"""
key = ["doesnotexist", "meta1"]
expected = "(serializers.article:pk=1) field_value was '%r'" % key
with self.assertRaisesMessage(DeserializationError, expected):
for obj in serializers.deserialize('json', test_string):
obj.save()
def test_helpful_error_message_for_many2many_natural2(self):
"""
Invalid many-to-many keys should throw a helpful error message. This
tests the code path where a natural many-to-many key has only a single
value.
"""
test_string = """[{
"pk": 1,
"model": "serializers.article",
"fields": {
"author": 1,
"headline": "Unknown many to many",
"pub_date": "2014-09-15T10:35:00",
"meta_data": [1, "doesnotexist"]
}
}, {
"pk": 1,
"model": "serializers.categorymetadata",
"fields": {
"kind": "author",
"name": "meta1",
"value": "Agnes"
}
}, {
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}]"""
expected = "(serializers.article:pk=1) field_value was 'doesnotexist'"
with self.assertRaisesMessage(DeserializationError, expected):
for obj in serializers.deserialize('json', test_string, ignore=False):
obj.save()
class JsonSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "json"
fwd_ref_str = """[
{
"pk": 1,
"model": "serializers.article",
"fields": {
"headline": "Forward references pose no problem",
"pub_date": "2006-06-16T15:00:00",
"categories": [1],
"author": 1
}
},
{
"pk": 1,
"model": "serializers.category",
"fields": {
"name": "Reference"
}
},
{
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}]"""
class DjangoJSONEncoderTests(SimpleTestCase):
def test_lazy_string_encoding(self):
self.assertEqual(
json.dumps({'lang': ugettext_lazy("French")}, cls=DjangoJSONEncoder),
'{"lang": "French"}'
)
with override('fr'):
self.assertEqual(
json.dumps({'lang': ugettext_lazy("French")}, cls=DjangoJSONEncoder),
'{"lang": "Fran\\u00e7ais"}'
)
def test_timedelta(self):
duration = datetime.timedelta(days=1, hours=2, seconds=3)
self.assertEqual(
json.dumps({'duration': duration}, cls=DjangoJSONEncoder),
'{"duration": "P1DT02H00M03S"}'
)
duration = datetime.timedelta(0)
self.assertEqual(
json.dumps({'duration': duration}, cls=DjangoJSONEncoder),
'{"duration": "P0DT00H00M00S"}'
)
|
|
"""AuthZ Adapter implementations of cataloging managers."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from . import sessions
from ..osid import managers as osid_managers
from ..osid.osid_errors import Unimplemented
from ..osid.osid_errors import Unimplemented, OperationFailed, Unsupported
from ..primitives import Id
from ..utilities import raise_null_argument
from dlkit.manager_impls.cataloging import managers as cataloging_managers
class CatalogingProfile(osid_managers.OsidProfile, cataloging_managers.CatalogingProfile):
"""Adapts underlying CatalogingProfile methodswith authorization checks."""
def __init__(self):
osid_managers.OsidProfile.__init__(self)
def _get_hierarchy_session(self, proxy=None):
if proxy is not None:
try:
return self._provider_manager.get_catalog_hierarchy_session(proxy)
except Unimplemented:
return None
try:
return self._provider_manager.get_catalog_hierarchy_session()
except Unimplemented:
return None
def supports_catalog_lookup(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_catalog_lookup()
def supports_catalog_query(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_catalog_query()
def supports_catalog_admin(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_catalog_admin()
def supports_catalog_hierarchy(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_catalog_hierarchy()
def supports_catalog_hierarchy_design(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_catalog_hierarchy_design()
def get_catalog_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_catalog_record_types()
catalog_record_types = property(fget=get_catalog_record_types)
def get_catalog_search_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_catalog_search_record_types()
catalog_search_record_types = property(fget=get_catalog_search_record_types)
class CatalogingManager(osid_managers.OsidManager, CatalogingProfile, cataloging_managers.CatalogingManager):
"""Adapts underlying CatalogingManager methodswith authorization checks."""
def __init__(self):
CatalogingProfile.__init__(self)
def initialize(self, runtime):
osid_managers.OsidManager.initialize(self, runtime)
config = self._my_runtime.get_configuration()
parameter_id = Id('parameter:catalogingProviderImpl@authz_adapter')
provider_impl = config.get_value_by_parameter(parameter_id).get_string_value()
self._provider_manager = runtime.get_manager('CATALOGING', provider_impl)
# need to add version argument
def get_catalog_lookup_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'CatalogLookupSession')(
provider_session=self._provider_manager.get_catalog_lookup_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
catalog_lookup_session = property(fget=get_catalog_lookup_session)
def get_catalog_query_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'CatalogQuerySession')(
provider_session=self._provider_manager.get_catalog_query_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
catalog_query_session = property(fget=get_catalog_query_session)
def get_catalog_admin_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'CatalogAdminSession')(
provider_session=self._provider_manager.get_catalog_admin_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
catalog_admin_session = property(fget=get_catalog_admin_session)
def get_catalog_hierarchy_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'CatalogHierarchySession')(
provider_session=self._provider_manager.get_catalog_hierarchy_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
catalog_hierarchy_session = property(fget=get_catalog_hierarchy_session)
def get_catalog_hierarchy_design_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'CatalogHierarchyDesignSession')(
provider_session=self._provider_manager.get_catalog_hierarchy_design_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
catalog_hierarchy_design_session = property(fget=get_catalog_hierarchy_design_session)
def get_cataloging_rules_manager(self):
raise Unimplemented()
cataloging_rules_manager = property(fget=get_cataloging_rules_manager)
class CatalogingProxyManager(osid_managers.OsidProxyManager, CatalogingProfile, cataloging_managers.CatalogingProxyManager):
"""Adapts underlying CatalogingProxyManager methodswith authorization checks."""
def __init__(self):
CatalogingProfile.__init__(self)
def initialize(self, runtime):
osid_managers.OsidProxyManager.initialize(self, runtime)
config = self._my_runtime.get_configuration()
parameter_id = Id('parameter:catalogingProviderImpl@authz_adapter')
provider_impl = config.get_value_by_parameter(parameter_id).get_string_value()
self._provider_manager = runtime.get_proxy_manager('CATALOGING', provider_impl)
# need to add version argument
@raise_null_argument
def get_catalog_lookup_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'CatalogLookupSession')(
provider_session=self._provider_manager.get_catalog_lookup_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_catalog_query_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'CatalogQuerySession')(
provider_session=self._provider_manager.get_catalog_query_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_catalog_admin_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'CatalogAdminSession')(
provider_session=self._provider_manager.get_catalog_admin_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_catalog_hierarchy_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'CatalogHierarchySession')(
provider_session=self._provider_manager.get_catalog_hierarchy_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_catalog_hierarchy_design_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'CatalogHierarchyDesignSession')(
provider_session=self._provider_manager.get_catalog_hierarchy_design_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
def get_cataloging_rules_proxy_manager(self):
raise Unimplemented()
cataloging_rules_proxy_manager = property(fget=get_cataloging_rules_proxy_manager)
|
|
# -*- test-case-name: twotp.test.test_node -*-
# Copyright (c) 2007-2009 Thomas Herve <[email protected]>.
# See LICENSE for details.
"""
Basic node protocol and node message handler classes.
"""
import os
import time
import random
import inspect
from hashlib import md5
from twisted.internet.protocol import Protocol
from twisted.internet.defer import (
succeed, Deferred, maybeDeferred, TimeoutError)
from twisted.python import log
from twotp.term import Tuple, Atom, Integer, Reference, Pid, List, Port
from twotp.packer import termToBinary, thePacker
from twotp.parser import ParserWithPidCache
class InvalidIdentifier(ValueError):
"""
Exception raised when the packet identifier received wasn't expected.
"""
class InvalidDigest(ValueError):
"""
Exception raised when the challenge digest received doesn't match.
"""
class BadRPC(ValueError):
"""
Exception raised when receiving a B{badrpc} answer to a callRemote.
"""
class MessageHandler(object):
"""
Proxy between erlang protocol and python methods.
"""
# Early operations
CTRLMSGOP_LINK = 1
CTRLMSGOP_SEND = 2
CTRLMSGOP_EXIT = 3
CTRLMSGOP_UNLINK = 4
CTRLMSGOP_NODE_LINK = 5
CTRLMSGOP_REG_SEND = 6
CTRLMSGOP_GROUP_LEADER = 7
CTRLMSGOP_EXIT2 = 8
# New operations in destrvsn = 1 (OTP R4)
CTRLMSGOP_SEND_TT = 12
CTRLMSGOP_EXIT_TT = 13
CTRLMSGOP_REG_SEND_TT = 16
CTRLMSGOP_EXIT2_TT = 18
# New operations in destrvsn = 4 (OTP R6)
CTRLMSGOP_MONITOR_P = 19
CTRLMSGOP_DEMONITOR_P = 20
CTRLMSGOP_MONITOR_P_EXIT = 21
DISTR_FLAG_PUBLISHED = 1
DISTR_FLAG_ATOMCACHE = 2
DISTR_FLAG_EXTENDEDREFERENCES = 4
DISTR_FLAG_DISTMONITOR = 8
DISTR_FLAG_FUNTAGS = 16
DISTR_FLAG_DISTMONITORNAME = 32
DISTR_FLAG_HIDDENATOMCACHE = 64
DISTR_FLAG_NEWFUNTAGS = 128
DISTR_FLAG_EXTENDEDPIDSPORTS = 256
distrVersion = 5
distrFlags = (DISTR_FLAG_EXTENDEDREFERENCES |
DISTR_FLAG_EXTENDEDPIDSPORTS | DISTR_FLAG_DISTMONITOR)
refIds = None
pidCount = 0
portCount = 0
serial = 0
nodeName = ""
cookie = ""
creation = 0
def __init__(self, nodeName, cookie):
"""
Instantiate the handler and its operation mapping.
"""
self.nodeName = nodeName
self.cookie = cookie
self.refIds = [1, 0, 0]
self._mapping = {}
for name, val in MessageHandler.__dict__.iteritems():
if name.startswith('CTRLMSGOP_'):
name = name.split('CTRLMSGOP_')[1].lower()
self._mapping[val] = getattr(self, 'operation_%s' % (name,))
self._parser = ParserWithPidCache()
self._namedProcesses = {}
self._registeredProcesses = {}
def operation_send(self, proto, controlMessage, message):
"""
Manage C{CTRLMSGOP_SEND}.
"""
destPid = controlMessage[1]
if destPid in self._registeredProcesses:
self._registeredProcesses[destPid](proto, controlMessage, message)
else:
log.msg("Send to unknown process %r" % (destPid,))
def operation_exit(self, proto, controlMessage, message):
"""
Manage C{CTRLMSGOP_EXIT}.
"""
destPid = controlMessage[0]
srcPid = controlMessage[1]
srcPid._signalExit(destPid, controlMessage[2])
def operation_link(self, proto, controlMessage, message):
"""
Manage C{CTRLMSGOP_LINK}.
"""
srcPid = controlMessage[0]
destPid = controlMessage[1]
destPid.link(proto, srcPid)
def operation_unlink(self, proto, controlMessage, message):
"""
Manage C{CTRLMSGOP_UNLINK}.
"""
srcPid = controlMessage[0]
destPid = controlMessage[1]
destPid.unlink(proto, srcPid)
def operation_node_link(self, proto, controlMessage, message):
"""
Manage C{CTRLMSGOP_NODE_LINK}.
"""
raise NotImplementedError()
def operation_group_leader(self, proto, controlMessage, message):
"""
Manage C{CTRLMSGOP_GROUP_LEADER}.
"""
raise NotImplementedError()
def operation_exit2(self, proto, controlMessage, message):
"""
Manage C{CTRLMSGOP_EXIT2}.
"""
return self.operation_exit(proto, controlMessage, message)
def operation_send_tt(self, proto, controlMessage, message):
"""
Manage C{CTRLMSGOP_SEND_TT}.
"""
return self.operation_send(proto, controlMessage, message)
def operation_exit_tt(self, proto, controlMessage, message):
"""
Manage C{CTRLMSGOP_EXIT_TT}.
"""
return self.operation_exit(proto, controlMessage, message)
def operation_reg_send_tt(self, proto, controlMessage, message):
"""
Manage C{CTRLMSGOP_REG_SEND_TT}.
"""
return self.operation_reg_send(proto, controlMessage, message)
def operation_exit2_tt(self, proto, controlMessage, message):
"""
Manage C{CTRLMSGOP_EXIT2_TT}.
"""
return self.operation_exit2(proto, controlMessage, message)
def operation_monitor_p(self, proto, controlMessage, message):
"""
Manage C{CTRLMSGOP_MONITOR_P}.
"""
srcPid = controlMessage[0]
destPid = controlMessage[1]
monitorRef = controlMessage[2]
destPid._remoteMonitor(proto, srcPid, monitorRef)
def operation_demonitor_p(self, proto, controlMessage, message):
"""
Manage C{CTRLMSGOP_DEMONITOR_P}.
"""
srcPid = controlMessage[0]
destPid = controlMessage[1]
monitorRef = controlMessage[2]
destPid._remoteDemonitor(proto, srcPid, monitorRef)
def operation_monitor_p_exit(self, proto, controlMessage, message):
"""
Manage C{CTRLMSGOP_MONITOR_P_EXIT}.
"""
srcPid = controlMessage[0]
destPid = controlMessage[1]
monitorRef = controlMessage[2]
destPid._signalMonitorExit(srcPid, monitorRef, controlMessage[3])
def operation_reg_send(self, proto, controlMessage, message):
"""
Handle C{REG_SEND} reply.
"""
# Unpacked, but unused for now
#fromPid = controlMessage[0]
#cookie = controlMessage[1]
toName = controlMessage[2]
if toName.text in self._namedProcesses:
self._namedProcesses[toName.text](proto, controlMessage, message)
else:
log.msg("Send to unknown process name %r" % (toName.text,))
def send(self, proto, destPid, msg):
"""
Common routine to reply to a request.
"""
cookie = Atom('')
ctrlMsg = Tuple((Integer(self.CTRLMSGOP_SEND), cookie, destPid))
proto.send("p" + termToBinary(ctrlMsg) + termToBinary(msg))
def namedSend(self, proto, pid, processName, msg):
"""
Send a message to a named process.
"""
cookie = Atom('')
ctrlMsg = Tuple(
(Integer(self.CTRLMSGOP_REG_SEND), pid, cookie, processName))
proto.send("p" + termToBinary(ctrlMsg) + termToBinary(msg))
def passThroughMessage(self, proto, controlMessage, message=None):
"""
Forward operation to the methods handling it.
"""
operation = controlMessage[0]
self._mapping[operation](proto, controlMessage[1:], message)
def sendLink(self, proto, srcPid, destPid):
"""
Create a link from local PID C{srcPid} to remote PID C{destPid}.
"""
ctrlMsg = Tuple((Integer(self.CTRLMSGOP_LINK), srcPid, destPid))
proto.send("p" + termToBinary(ctrlMsg))
def sendUnlink(self, proto, srcPid, destPid):
"""
Remove a previously created link between local PID C{srcPid} to remote
PID C{destPid}.
"""
ctrlMsg = Tuple((Integer(self.CTRLMSGOP_UNLINK), srcPid, destPid))
proto.send("p" + termToBinary(ctrlMsg))
def sendMonitor(self, proto, srcPid, destPid):
"""
Monitor remote PID C{destPid}.
@return: the L{Reference} of the monitoring, which will be passed in
exit.
"""
monitorRef = self.createRef()
ctrlMsg = Tuple(
(Integer(self.CTRLMSGOP_MONITOR_P), srcPid, destPid, monitorRef))
proto.send("p" + termToBinary(ctrlMsg))
return monitorRef
def sendDemonitor(self, proto, srcPid, destPid, monitorRef):
"""
Remove monitoring of remote process C{destPid}.
@return: the L{Reference} of the monitoring, which will be passed in
exit.
"""
ctrlMsg = Tuple(
(Integer(self.CTRLMSGOP_DEMONITOR_P), srcPid, destPid, monitorRef))
proto.send("p" + termToBinary(ctrlMsg))
def sendLinkExit(self, proto, srcPid, destPid, reason):
"""
Send an exit signal for a remote linked process.
"""
ctrlMsg = Tuple(
(Integer(self.CTRLMSGOP_EXIT), srcPid, destPid, reason))
proto.send("p" + termToBinary(ctrlMsg))
def sendMonitorExit(self, proto, srcPid, destPid, monitorRef, reason):
"""
Send a monitor exit signal for a remote process.
"""
ctrlMsg = Tuple(
(Integer(self.CTRLMSGOP_MONITOR_P_EXIT), srcPid, destPid,
monitorRef, reason))
proto.send("p" + termToBinary(ctrlMsg))
def createRef(self):
"""
Create an unique erlang reference.
"""
r = Reference(Atom(self.nodeName), self.refIds, self.creation)
self.refIds[0] += 1
if self.refIds[0] > 0x3ffff:
self.refIds[0] = 0
self.refIds[1] += 1
if isinstance(self.refIds[1], long):
self.refIds[1] = 0
self.refIds[2] += 1
return r
def createPid(self):
"""
Create an unique Pid object.
"""
p = Pid(Atom(self.nodeName), self.pidCount, self.serial, self.creation)
self.pidCount += 1
if self.pidCount > 0x7fff:
self.pidCount = 0
self.serial += 1
if self.distrFlags & self.DISTR_FLAG_EXTENDEDPIDSPORTS:
if self.serial > 0x1fff:
self.serial = 0
elif self.serial > 0x07:
self.serial = 0
self._parser._pids[p] = p
return p
def createPort(self):
"""
Create an unique Port object.
"""
o = Port(Atom(self.nodeName), self.portCount, self.creation)
self.portCount += 1
if self.distrFlags & self.DISTR_FLAG_EXTENDEDPIDSPORTS:
if self.portCount > 0xfffffff:
self.portCount = 0
elif self.portCount > 0x3ffff:
self.portCount = 0
return o
class NodeProtocol(Protocol):
"""
@ivar state: 'handshake', 'challenge', 'connected'.
@type state: C{str}
"""
def __init__(self):
"""
Initialize protocol attributes.
"""
self.received = ""
self.state = "handshake"
self._responseTimerID = None
self._tickTimerID = None
self._lastResponseTime = 0
self._lastTickTime = 0
def callLater(self, interval, f):
"""
Wrapper around C{reactor.callLater} for test purpose.
"""
from twisted.internet import reactor
return reactor.callLater(interval, f)
def startTimer(self):
"""
Start timers checking connection activity.
"""
self._responseTimerID = self.callLater(
self.factory.netTickTime * 0.25, self._responseTimer)
self._tickTimerID = self.callLater(
self.factory.netTickTime * 0.125, self._tickTimer)
def updateResponseTimer(self):
"""
Update last time a response was received.
"""
self._lastResponseTime = self.factory.timeFactory()
def updateTickTimer(self):
"""
Update last time a request was sent.
"""
self._lastTickTime = self.factory.timeFactory()
def _responseTimer(self):
"""
Check for last response.
"""
now = self.factory.timeFactory()
if now > self._lastResponseTime + self.factory.netTickTime * 1.25:
log.msg("No response for a long time, disconnecting.")
self._responseTimerID = None
self.transport.loseConnection()
else:
self._responseTimerID = self.callLater(
self.factory.netTickTime * 0.25, self._responseTimer)
def _tickTimer(self):
"""
Check for last request, sending a fake request if necessary.
"""
now = self.factory.timeFactory()
if now > self._lastTickTime + self.factory.netTickTime * 0.25:
self._tickTimerID = None
self.send("")
self._tickTimerID = self.callLater(
self.factory.netTickTime * 0.125, self._tickTimer)
def send_handshake(self, data):
"""
Send during the handshake state.
"""
msg = thePacker.packShort(len(data)) + data
self.transport.write(msg)
send_challenge = send_handshake
def send_connected(self, data):
"""
Send during the connected state.
"""
msg = thePacker.packInt(len(data)) + data
self.transport.write(msg)
def send(self, data):
"""
Send data: update last sent time and forward current send method.
"""
self.updateTickTimer()
return getattr(self, "send_%s" % (self.state,))(data)
def generateChallenge(self):
"""
Generate a simple insecure challenge.
"""
return self.factory.randomFactory(0, 0x7fffffff)
def generateDigest(self, challenge, cookie):
"""
Create a MD5 digest of a challenge and a cookie.
"""
challengeStr = str(challenge)
return md5(cookie + challengeStr).digest()
def dataReceived(self, data):
"""
Dispatch data between handlers.
"""
self.received += data
while self.received:
remainData = getattr(
self, "handle_%s" % (self.state,))(self.received)
if len(remainData) == len(self.received):
# Data remains unchanged, so there's nothing more to see here
break
self.received = remainData
self.updateResponseTimer()
def handle_connected(self, data):
"""
Handle data in the connected state.
"""
if len(data) < 4:
return data
packetLen = self.factory.handler._parser.parseInt(data[0:4])
if len(data) < packetLen + 4:
# Incomplete packet.
return data
packetData = data[4:packetLen + 4]
if packetLen == 0:
# Tick
pass
elif packetData[0] == "p":
terms = list(
self.factory.handler._parser.binaryToTerms(packetData[1:]))
if len(terms) == 2:
self.factory.handler.passThroughMessage(
self, terms[0], terms[1])
elif len(terms) == 1:
self.factory.handler.passThroughMessage(self, terms[0])
else:
log.msg("Unhandled terms")
else:
log.msg("Unhandled packed type: %r" % (packetData[0],))
return data[packetLen + 4:]
def connectionLost(self, reason):
"""
Manage connection lost with a node.
"""
log.msg("Connection closed: %s" % (reason,))
if self._responseTimerID is not None:
self._responseTimerID.cancel()
self._responseTimerID = None
if self._tickTimerID is not None:
self._tickTimerID.cancel()
self._tickTimerID = None
class NodeBaseFactory(object):
"""
Mixin factory for client and server node connections.
@ivar creation: node serial number.
@type creation: C{int}
@ivar netTickTime: reference time used for checking node connectivity.
@type netTickTime: C{int}
@ivar timeFactory: factory giving current time, to be customized in tests.
@type timeFactory: C{callable}
@ivar randomFactory: factory giving a random number, to be customized in
the tests.
@type randomFactory: C{callable}
"""
creation = 0
netTickTime = 60
timeFactory = time.time
randomFactory = random.randint
def __init__(self, nodeName, cookie):
"""
Initialize the server factory.
@param nodeName: the name of the node.
@type nodeName: C{str}
@type cookie: cookie used for authorization between nodes.
@param cookie: C{str}
"""
self.handler = MessageHandler(nodeName, cookie)
class ProcessExited(Exception):
"""
Exception raised when trying to use an exited process.
"""
class ProcessBase(object):
"""
Represent the an Erlang-like process in your Twisted application, able to
communicate with Erlang nodes.
"""
persistentEpmd = None
serverFactory = None
pid = None
_receiveDeferred = None
_cancelReceiveID = None
def __init__(self, nodeName, cookie, handler=None):
self.nodeName = nodeName
self.cookie = cookie
self.oneShotEpmds = {}
self._pendingReceivedData = []
if handler is None:
handler = MessageHandler(nodeName, cookie)
self.handler = handler
self.pid = self.handler.createPid()
self.handler._registeredProcesses[self.pid] = self._receivedData
def callLater(self, interval, f):
"""
Wrapper around C{reactor.callLater} for test purpose.
"""
from twisted.internet import reactor
return reactor.callLater(interval, f)
def oneShotPortMapperClass(self):
"""
Property serving L{OneShotPortMapperFactory}, to be customized in
tests.
"""
from twotp.epmd import OneShotPortMapperFactory
return OneShotPortMapperFactory
oneShotPortMapperClass = property(oneShotPortMapperClass)
def persistentPortMapperClass(self):
"""
Property serving L{PersistentPortMapperFactory}, to be customized in
tests.
"""
from twotp.epmd import PersistentPortMapperFactory
return PersistentPortMapperFactory
persistentPortMapperClass = property(persistentPortMapperClass)
def listen(self):
"""
Start a listening process able to receive calls from other Erlang
nodes.
"""
if self.persistentEpmd is not None:
raise RuntimeError("Already listening")
self.persistentEpmd = self.persistentPortMapperClass(
self.nodeName, self.cookie)
def gotFactory(factory):
self.serverFactory = factory
factory.handler = self.handler
for call in self.handler._registeredProcesses.values():
call.im_self.serverFactory = factory
return self.persistentEpmd.publish().addCallback(gotFactory)
def stop(self):
"""
Stop listening.
"""
return self.persistentEpmd.stop()
def _getNodeConnection(self, nodeName):
"""
Retrieve a connection to node C{nodeName}.
"""
nodeName = buildNodeName(nodeName)
if (self.serverFactory is not None and
nodeName in self.serverFactory._nodeCache):
return succeed(self.serverFactory._nodeCache[nodeName])
def sync(instance):
instance.factory.handler = self.handler
return instance
host = nodeName.split("@", 1)[1]
if not host in self.oneShotEpmds:
self.oneShotEpmds[host] = self.oneShotPortMapperClass(
self.nodeName, self.cookie, host)
oneShotEpmd = self.oneShotEpmds[host]
return oneShotEpmd.connectToNode(nodeName).addCallback(sync)
def names(self, host):
"""
Return the list of registered nodes on the give C{host}.
"""
if not host in self.oneShotEpmds:
self.oneShotEpmds[host] = self.oneShotPortMapperClass(
self.nodeName, self.cookie, host)
oneShotEpmd = self.oneShotEpmds[host]
return oneShotEpmd.names()
def register(self, name):
"""
Register this process with name C{name}.
"""
self.handler._namedProcesses[name] = self._receivedData
def ping(self, nodeName):
"""
Ping node C{nodeName}.
"""
def doPing(instance):
d = Deferred()
process = NetKernelResponseProcess(
self.nodeName, self.cookie, self.handler, d)
pid = process.pid
ref = self.handler.createRef()
msg = Tuple((Atom("$gen_call"), Tuple((pid, ref)),
Tuple((Atom("is_auth"), Atom(self.nodeName)))))
self.handler.namedSend(instance, pid, Atom("net_kernel"), msg)
return d
return self._getNodeConnection(nodeName).addCallback(doPing)
def callRemote(self, nodeName, module, func, *args):
"""
RPC call against node C{nodeName}.
"""
def doCallRemote(instance):
d = Deferred()
process = RexResponseProcess(
self.nodeName, self.cookie, self.handler, d)
pid = process.pid
call = Tuple((Atom("call"), Atom(module), Atom(func), List(args),
Atom("user")))
rpc = Tuple((pid, call))
self.handler.namedSend(instance, pid, Atom("rex"), rpc)
return d
return self._getNodeConnection(nodeName).addCallback(doCallRemote)
def whereis(self, nodeName, process):
"""
Return the pid of a process on a node.
"""
def check(result):
if isinstance(result, Atom) and result.text == "undefined":
raise ValueError("Process not found")
elif isinstance(result, Pid):
return result
else:
raise ValueError("Unexpected result", result)
return self.callRemote(
nodeName, "erlang", "whereis", Atom(process)).addCallback(check)
def link(self, pid):
"""
Create a link with process C{pid}.
"""
def doLink(instance):
self.pid.link(instance, pid)
self.handler.sendLink(instance, self.pid, pid)
return self._getNodeConnection(pid.nodeName.text).addCallback(doLink)
def unlink(self, pid):
"""
Remove a link with process C{pid}.
"""
def doUnlink(instance):
self.pid.unlink(instance, pid)
self.handler.sendUnlink(instance, self.pid, pid)
return self._getNodeConnection(pid.nodeName.text).addCallback(doUnlink)
def monitor(self, pid):
"""
Monitor process C{pid}.
@return: a L{Reference} of the monitoring link.
"""
def doMonitor(instance):
return self.handler.sendMonitor(instance, self.pid, pid)
d = self._getNodeConnection(pid.nodeName.text)
return d.addCallback(doMonitor)
def demonitor(self, pid, ref):
"""
Demonitor process C{pid}.
"""
def doDemonitor(instance):
return self.handler.sendDemonitor(instance, self, pid, ref)
d = self._getNodeConnection(pid.nodeName.text)
return d.addCallback(doDemonitor)
def send(self, pid, msg):
"""
Directy send data to process C{pid}.
"""
def doSend(instance):
return self.handler.send(instance, pid, msg)
return self._getNodeConnection(pid.nodeName.text).addCallback(doSend)
def namedSend(self, nodeName, processName, msg):
"""
Send data to process named C{processName} on node C{nodeName}.
"""
def doSend(instance):
return self.handler.namedSend(
instance, self.pid, Atom(processName), msg)
return self._getNodeConnection(nodeName).addCallback(doSend)
def _receivedData(self, proto, ctrlMessage, message):
"""
Callback called when a message has been send to this pid.
"""
if self._receiveDeferred is not None:
if self._cancelReceiveID is not None:
self._cancelReceiveID.cancel()
self._cancelReceiveID = None
d, self._receiveDeferred = self._receiveDeferred, None
d.callback(message)
else:
self._pendingReceivedData.append(message)
def _cancelReceive(self):
"""
Cancel a receive with a specified timeout.
@see: C{receive}
"""
self._cancelReceiveID = None
d, self._receiveDeferred = self._receiveDeferred, None
d.errback(TimeoutError())
def receive(self, timeout=None):
"""
Wait for received data on this process, possibly specifying a timeout.
@param timeout: timeout in seconds to wait for data.
@type timeout: C{int} or C{NoneType}
@return: a L{Deferred} that will fire received data.
"""
if self._pendingReceivedData:
return succeed(self._pendingReceivedData.pop(0))
elif self._receiveDeferred is None:
self._receiveDeferred = Deferred()
if timeout is not None:
self._cancelReceiveID = self.callLater(
timeout, self._cancelReceive)
return self._receiveDeferred
else:
raise RuntimeError("Pending receive")
def exit(self, reason):
"""
Exit this process.
"""
for proto, pid in self.pid._links:
self.handler.sendLinkExit(proto, self.pid, pid, reason)
for proto, pid, ref in self.pid._remoteMonitors:
self.handler.sendMonitorExit(proto, self.pid, pid, ref, reason)
self.pid.exit(reason)
if self._receiveDeferred is not None:
d, self._receiveDeferred = self._receiveDeferred, None
d.errback(ProcessExited(reason))
def addExitHandler(self, pid, handler):
"""
Register a callback to be called when C{pid} exits.
"""
self.pid._handlers.setdefault(pid, []).append(handler)
def addMonitorHandler(self, ref, handler):
"""
Register a callback to be called when monitoring L{Reference} C{ref}
fires.
"""
self.pid._monitorHandlers.setdefault(ref, []).append(handler)
class Process(ProcessBase):
"""
The master process, managing C{rex} and C{net_kernel} process.
"""
def __init__(self, nodeName, cookie):
ProcessBase.__init__(self, nodeName, cookie)
self._methodsHolder = {}
rex = RexProcess(nodeName, cookie, self.handler, self._methodsHolder)
rex.register("rex")
netKernel = NetKernelProcess(nodeName, cookie, self.handler,
self._methodsHolder)
netKernel.register("net_kernel")
def registerModule(self, name, instance):
"""
Register a method holder for module named C{name}.
"""
self._methodsHolder[name] = instance
class SpawnProcess(ProcessBase):
"""
Process class to subclass when implementing a remote process supporting
'spawn'.
"""
def start(self, pid, args):
"""
Method to implement to be notified when the process is started.
"""
class RexProcess(ProcessBase):
"""
The C{rex} process: specific process able to receive RPC calls.
"""
def __init__(self, nodeName, cookie, handler, methodsHolder):
ProcessBase.__init__(self, nodeName, cookie, handler)
self._methodsHolder = methodsHolder
def _receivedData(self, proto, ctrlMessage, message):
"""
Parse messages and forward data to the appropriate method, if any.
"""
toPid = message[1][0]
ref = message[1][1]
module = message[2][1].text
func = message[2][2].text
args = message[2][3]
if module in self._methodsHolder:
proxy = getattr(self._methodsHolder[module],
"remote_%s" % (func,), None)
if proxy is None:
log.msg("Unknow method %r of module %r" % (func, module))
self.handler.send(proto, toPid, Tuple((ref, (Atom('badrpc'),
"undefined function %r" % (func,)))))
else:
log.msg("Remote call to method %r" % (func,))
d = maybeDeferred(proxy, *args)
d.addCallback(self._forwardResponse, proto, toPid, ref)
d.addErrback(self._forwardError, proto, toPid, ref)
else:
log.msg("No holder registered for %r" % (module,))
self.handler.send(proto, toPid, Tuple((ref, (Atom('badrpc'),
"undefined module %r" % (module,)))))
def _forwardResponse(self, result, proto, toPid, ref):
"""
Forward a response to an erlang node from a python method.
"""
if result is None:
result = Atom("null")
else:
result = Tuple((Atom("ok"), result))
self.handler.send(proto, toPid, Tuple((ref, result)))
def _forwardError(self, error, proto, toPid, ref):
"""
Forward the string representation of the exception to the node.
"""
log.err(error)
self.handler.send(
proto, toPid, Tuple((ref, (Atom('badrpc'), str(error.value)))))
class RexResponseProcess(ProcessBase):
"""
A volatile process used to manage one response to a callRemote.
"""
def __init__(self, nodeName, cookie, handler, deferred):
ProcessBase.__init__(self, nodeName, cookie, handler)
self.deferred = deferred
def _receivedData(self, proto, ctrlMessage, message):
"""
Parse the message and fire the deferred with appropriate content.
"""
d = self.deferred
if (isinstance(message[1], (list, tuple)) and len(message[1]) > 0
and message[1][0] == Atom("badrpc")):
d.errback(BadRPC(message[1][1]))
else:
d.callback(message[1])
class NetKernelProcess(ProcessBase):
"""
A process managing net_kernel calls: it only implements ping responses for
now.
"""
def __init__(self, nodeName, cookie, handler, methodsHolder):
ProcessBase.__init__(self, nodeName, cookie, handler)
self._methodsHolder = methodsHolder
def _receivedData(self, proto, ctrlMessage, message):
"""
Handle regsend reply for net_kernel module.
"""
if message[0].text == "$gen_call":
toPid = message[1][0]
ref = message[1][1]
method = message[2][0].text
if method == "is_auth":
# Reply to ping
resp = Tuple((ref, Atom("yes")))
elif method in ("spawn", "spawn_link"):
moduleName, funcName, args = message[2][1:4]
module = moduleName.text
func = funcName.text
if module in self._methodsHolder:
processClass = getattr(
self._methodsHolder[module], func, None)
if processClass is None:
log.msg(
"Unknow method %r of module %r" % (func, module))
resp = Tuple(
(ref, Atom("undefined function %r" % (func,))))
elif not (inspect.isclass(processClass) and
issubclass(processClass, SpawnProcess)):
log.msg("Trying to spawn non process %r of module %r" %
(func, module))
resp = Tuple(
(ref, Atom("wrong process %r" % (func,))))
else:
log.msg("Spawn call to method %r" % (func,))
process = processClass(self.nodeName, self.cookie,
self.handler)
process.serverFactory = self.serverFactory
if method == "spawn_link":
process.pid.link(proto, toPid)
self.handler.sendLink(proto, process.pid, toPid)
process.start(toPid, args)
resp = Tuple((ref, process.pid))
else:
log.msg("No holder registered for %r" % (module,))
resp = Tuple(
(ref, Atom("undefined module %r" % (module,))))
else:
log.msg("Unhandled method %s" % (message[2][0].text,))
resp = Tuple((ref, Atom("error")))
self.handler.send(proto, toPid, resp)
else:
log.msg("Unhandled call %s" % (message[0].text,))
class NetKernelResponseProcess(ProcessBase):
"""
A process managing net_kernel responses: it only implements ping for now.
"""
def __init__(self, nodeName, cookie, handler, deferred):
ProcessBase.__init__(self, nodeName, cookie, handler)
self.deferred = deferred
def _receivedData(self, proto, ctrlMessage, message):
"""
Handle ping reply.
"""
d = self.deferred
if message[1].text == "yes":
d.callback("pong")
else:
d.callback("pang")
def buildNodeName(nodeName):
"""
Check if nodeName is a valid nodeName, or append the current hostname
to it.
"""
if "@" in nodeName:
return nodeName
else:
return nodeName + "@" + getHostName()
def getHostName():
"""
Return the current hostname to be used in the node name.
"""
import socket
fqdn = socket.getfqdn()
if "localhost" in fqdn:
fqdn = socket.getaddrinfo(socket.gethostname(), None, socket.AF_INET,
socket.SOCK_DGRAM, socket.IPPROTO_IP,
socket.AI_CANONNAME)[0][3]
if "localhost" in fqdn:
fqdn = socket.gethostname()
return fqdn.split(".")[0]
def readCookie():
"""
Read cookie on disk.
"""
return file(os.path.expanduser('~/.erlang.cookie')).readlines()[0].strip()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------------------------------------------------
# INFO:
# -----------------------------------------------------------------------------------------------------------------------
"""
Author: Evan Hubinger
License: Apache 2.0
Description: Wrapper around PyParsing that selects the best available implementation.
"""
# -----------------------------------------------------------------------------------------------------------------------
# IMPORTS:
# -----------------------------------------------------------------------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
from coconut.root import * # NOQA
import os
import sys
import traceback
import functools
import inspect
from warnings import warn
from collections import defaultdict
from coconut.constants import (
PURE_PYTHON,
PYPY,
use_fast_pyparsing_reprs,
use_packrat_parser,
packrat_cache_size,
default_whitespace_chars,
varchars,
min_versions,
pure_python_env_var,
enable_pyparsing_warnings,
use_left_recursion_if_available,
)
from coconut.util import get_clock_time # NOQA
from coconut.util import (
ver_str_to_tuple,
ver_tuple_to_str,
get_next_version,
)
# warning: do not name this file cPyparsing or pyparsing or it might collide with the following imports
try:
if PURE_PYTHON:
raise ImportError("skipping cPyparsing check due to " + pure_python_env_var + " = " + os.environ.get(pure_python_env_var, ""))
import cPyparsing as _pyparsing
from cPyparsing import * # NOQA
from cPyparsing import __version__
PYPARSING_PACKAGE = "cPyparsing"
PYPARSING_INFO = "Cython cPyparsing v" + __version__
except ImportError:
try:
import pyparsing as _pyparsing
from pyparsing import * # NOQA
from pyparsing import __version__
PYPARSING_PACKAGE = "pyparsing"
PYPARSING_INFO = "Python pyparsing v" + __version__
except ImportError:
traceback.print_exc()
__version__ = None
PYPARSING_PACKAGE = "cPyparsing"
PYPARSING_INFO = None
# -----------------------------------------------------------------------------------------------------------------------
# VERSION CHECKING:
# -----------------------------------------------------------------------------------------------------------------------
min_ver = min(min_versions["pyparsing"], min_versions["cPyparsing"][:3]) # inclusive
max_ver = get_next_version(max(min_versions["pyparsing"], min_versions["cPyparsing"][:3])) # exclusive
cur_ver = None if __version__ is None else ver_str_to_tuple(__version__)
if cur_ver is None or cur_ver < min_ver:
min_ver_str = ver_tuple_to_str(min_ver)
raise ImportError(
"Coconut requires pyparsing/cPyparsing version >= " + min_ver_str
+ ("; got " + PYPARSING_INFO if PYPARSING_INFO is not None else "")
+ " (run '{python} -m pip install --upgrade {package}' to fix)".format(python=sys.executable, package=PYPARSING_PACKAGE),
)
elif cur_ver >= max_ver:
max_ver_str = ver_tuple_to_str(max_ver)
warn(
"This version of Coconut was built for pyparsing/cPyparsing versions < " + max_ver_str
+ ("; got " + PYPARSING_INFO if PYPARSING_INFO is not None else "")
+ " (run '{python} -m pip install {package}<{max_ver}' to fix)".format(python=sys.executable, package=PYPARSING_PACKAGE, max_ver=max_ver_str),
)
# -----------------------------------------------------------------------------------------------------------------------
# SETUP:
# -----------------------------------------------------------------------------------------------------------------------
if cur_ver >= (3,):
MODERN_PYPARSING = True
_trim_arity = _pyparsing.core._trim_arity
_ParseResultsWithOffset = _pyparsing.core._ParseResultsWithOffset
else:
MODERN_PYPARSING = False
_trim_arity = _pyparsing._trim_arity
_ParseResultsWithOffset = _pyparsing._ParseResultsWithOffset
USE_COMPUTATION_GRAPH = (
not MODERN_PYPARSING # not yet supported
and not PYPY # experimentally determined
)
if enable_pyparsing_warnings:
if MODERN_PYPARSING:
_pyparsing.enable_all_warnings()
else:
_pyparsing._enable_all_warnings()
_pyparsing.__diag__.warn_name_set_on_empty_Forward = False
if MODERN_PYPARSING and use_left_recursion_if_available:
ParserElement.enable_left_recursion()
elif use_packrat_parser:
ParserElement.enablePackrat(packrat_cache_size)
ParserElement.setDefaultWhitespaceChars(default_whitespace_chars)
Keyword.setDefaultKeywordChars(varchars)
# -----------------------------------------------------------------------------------------------------------------------
# FAST REPRS:
# -----------------------------------------------------------------------------------------------------------------------
if PY2:
def fast_repr(cls):
"""A very simple, fast __repr__/__str__ implementation."""
return "<" + cls.__name__ + ">"
else:
fast_repr = object.__repr__
_old_pyparsing_reprs = []
def set_fast_pyparsing_reprs():
"""Make pyparsing much faster by preventing it from computing expensive nested string representations."""
for obj in vars(_pyparsing).values():
try:
if issubclass(obj, ParserElement):
_old_pyparsing_reprs.append((obj, (obj.__repr__, obj.__str__)))
obj.__repr__ = functools.partial(fast_repr, obj)
obj.__str__ = functools.partial(fast_repr, obj)
except TypeError:
pass
def unset_fast_pyparsing_reprs():
"""Restore pyparsing's default string representations for ease of debugging."""
for obj, (repr_method, str_method) in _old_pyparsing_reprs:
obj.__repr__ = repr_method
obj.__str__ = str_method
if use_fast_pyparsing_reprs:
set_fast_pyparsing_reprs()
# -----------------------------------------------------------------------------------------------------------------------
# PROFILING:
# -----------------------------------------------------------------------------------------------------------------------
_timing_info = [None] # in list to allow reassignment
class _timing_sentinel(object):
pass
def add_timing_to_method(cls, method_name, method):
"""Add timing collection to the given method.
It's a monstrosity, but it's only used for profiling."""
from coconut.terminal import internal_assert # hide to avoid circular import
args, varargs, keywords, defaults = inspect.getargspec(method)
internal_assert(args[:1] == ["self"], "cannot add timing to method", method_name)
if not defaults:
defaults = []
num_undefaulted_args = len(args) - len(defaults)
def_args = []
call_args = []
fix_arg_defaults = []
defaults_dict = {}
for i, arg in enumerate(args):
if i >= num_undefaulted_args:
default = defaults[i - num_undefaulted_args]
def_args.append(arg + "=_timing_sentinel")
defaults_dict[arg] = default
fix_arg_defaults.append(
"""
if {arg} is _timing_sentinel:
{arg} = _exec_dict["defaults_dict"]["{arg}"]
""".strip("\n").format(
arg=arg,
),
)
else:
def_args.append(arg)
call_args.append(arg)
if varargs:
def_args.append("*" + varargs)
call_args.append("*" + varargs)
if keywords:
def_args.append("**" + keywords)
call_args.append("**" + keywords)
new_method_name = "new_" + method_name + "_func"
_exec_dict = globals().copy()
_exec_dict.update(locals())
new_method_code = """
def {new_method_name}({def_args}):
{fix_arg_defaults}
_all_args = (lambda *args, **kwargs: args + tuple(kwargs.values()))({call_args})
_exec_dict["internal_assert"](not any(_arg is _timing_sentinel for _arg in _all_args), "error handling arguments in timed method {new_method_name}({def_args}); got", _all_args)
_start_time = _exec_dict["get_clock_time"]()
try:
return _exec_dict["method"]({call_args})
finally:
_timing_info[0][str(self)] += _exec_dict["get_clock_time"]() - _start_time
{new_method_name}._timed = True
""".format(
fix_arg_defaults="\n".join(fix_arg_defaults),
new_method_name=new_method_name,
def_args=", ".join(def_args),
call_args=", ".join(call_args),
)
exec(new_method_code, _exec_dict)
setattr(cls, method_name, _exec_dict[new_method_name])
return True
def collect_timing_info():
"""Modifies pyparsing elements to time how long they're executed for.
It's a monstrosity, but it's only used for profiling."""
from coconut.terminal import logger # hide to avoid circular imports
logger.log("adding timing to pyparsing elements:")
_timing_info[0] = defaultdict(float)
for obj in vars(_pyparsing).values():
if isinstance(obj, type) and issubclass(obj, ParserElement):
added_timing = False
for attr_name in dir(obj):
attr = getattr(obj, attr_name)
if (
callable(attr)
and not isinstance(attr, ParserElement)
and not getattr(attr, "_timed", False)
and attr_name not in (
"__getattribute__",
"__setattribute__",
"__init_subclass__",
"__subclasshook__",
"__class__",
"__setattr__",
"__getattr__",
"__new__",
"__init__",
"__str__",
"__repr__",
"__hash__",
"__eq__",
"_trim_traceback",
"_ErrorStop",
"enablePackrat",
"inlineLiteralsUsing",
"setDefaultWhitespaceChars",
"setDefaultKeywordChars",
"resetCache",
)
):
added_timing |= add_timing_to_method(obj, attr_name, attr)
if added_timing:
logger.log("\tadded timing to", obj)
def print_timing_info():
"""Print timing_info collected by collect_timing_info()."""
print(
"""
=====================================
Timing info:
(timed {num} total pyparsing objects)
=====================================
""".rstrip().format(
num=len(_timing_info[0]),
),
)
sorted_timing_info = sorted(_timing_info[0].items(), key=lambda kv: kv[1])
for method_name, total_time in sorted_timing_info:
print("{method_name}:\t{total_time}".format(method_name=method_name, total_time=total_time))
|
|
#!/usr/bin/python2
# ----------------------------------------------------------------------
# Copyright (2010) Aram Davtyan and Garegin Papoian
# Papoian's Group, University of Maryland at Collage Park
# http://papoian.chem.umd.edu/
# Last Update: 03/04/2011
# ----------------------------------------------------------------------
import os
import sys
class Atom:
def __init__(self, no, ch, res, ty, q, x, y, z):
self.no = no
self.ch = ch
self.res = res
self.ty = ty
self.q = q
self.x = x
self.y = y
self.z = z
def write_(self, f):
space11 = " "
f.write( (space11+str(self.no))[-12:] + "\t" )
f.write( "\t".join([ str(self.ch), str(self.res), str(self.ty), str(self.q), str(self.x), str(self.y), str(self.z) ]) )
f.write( "\n" )
class Bond:
def __init__(self, no, ty, I, J):
self.no = no
self.ty = ty
self.I = I
self.J = J
def write_(self, f):
f.write( (space11+str(self.no))[-12:] + "\t" )
f.write( "\t".join([ str(self.ty), str(self.I), str(self.J) ]) )
f.write( "\n" )
inp_file = ""
out_file = ""
if len(sys.argv)>1: inp_file = sys.argv[1]
if len(sys.argv)>2: out_file = sys.argv[2]
if inp_file=="":
print "\nCoordinatesToLammpsDataFile.py input_file [output_file] [-b] [-go]\n\n"
print "\t-b\tadd bonds between CA & CA, CA & O and CA & CB in the case of coarse graining\n"
print "\t-go\tcoarse-grained setup\n\n"
exit()
cg_bonds = False
go = False
for cl in sys.argv[3:]:
if cl == '-b': cg_bonds = True
if cl == '-go': go = True
seq_file = "sequance.seq"
lammps_out_file = "file.in"
if out_file[:5]=="data.":
lammps_out_file = out_file[5:] + ".in"
seq_file = out_file[5:] + ".seq"
elif out_file[-5:]==".data":
lammps_out_file = out_file[:-5] + ".in"
seq_file = out_file[:-5] + ".seq"
else:
lammps_out_file = out_file + ".in"
out_file = "data." + out_file
seq_file = out_file + ".seq"
cg = True
xlo = -1000.0
xhi = 1000.0
ylo = -1000.0
yhi = 1000.0
zlo = -1000.0
zhi = 1000.0
masses = [12.0, 14.0, 16.0, 12.0, 1.0]
if cg and not go:
masses = [27.0, 14.0, 28.0, 60.0, 2.0]
n_atom_types = 5
if cg:
if cg_bonds: n_bond_types = 5
else: n_bond_types = 0
else: n_bond_types = 7
last_nos = { 'N' : 0, 'C-Alpha' : 0, 'C-Prime' : 0, 'O' : 0 }
last_chno = { 'N' : 0, 'C-Alpha' : 0, 'C-Prime' : 0, 'O' : 0 }
n_atoms = 0
n_bonds = 0
n_res = 0
group_id = 0
atoms = []
bonds = []
groups = []
fix_string = "4 alpha_carbons backbone beta_atoms oxygens fix_backbone_coeff.data " + seq_file
if go:
fix_string = "2 alpha_carbons gomodel fix_gomodel_coeff.data"
groups.append(["alpha_carbons", "id"])
if not go:
groups.append(["beta_atoms", "id"])
groups.append(["oxygens", "id"])
inp = open(inp_file)
atom_type = 0
for l in inp:
l = l.strip().split()
if len(l)==6:
print "Input file lacks description field!"
exit()
desc = l[6]
chain_no = l[1]
if not go:
if desc == 'C-Beta' or desc == 'H-Beta' or desc == 'C-Alpha' or desc == 'O':
n_atoms += 1
else:
if desc == 'C-Alpha':
n_atoms += 1
if not go:
if desc == 'N0' or desc == 'N':
atom_type = 2
if last_nos['C-Prime']!=0 and last_chno['C-Prime']==chain_no and not cg:
n_bonds += 1
bonds.append( Bond(n_bonds, 3, last_nos['C-Prime'], n_atoms) )
desc = 'N'
last_nos[desc] = n_atoms
last_chno[desc] = chain_no
n_res += 1
elif desc == 'C-Alpha':
if last_nos['N']!=0 and last_chno['N']==chain_no and not cg:
n_bonds += 1
bonds.append( Bond(n_bonds, 1, last_nos['N'], n_atoms) )
if cg and cg_bonds:
if last_nos['C-Alpha']!=0 and last_chno['C-Alpha']==chain_no:
n_bonds += 1
bonds.append( Bond(n_bonds, 1, last_nos['C-Alpha'], n_atoms) )
if last_nos['O']!=0 and last_chno['O']==chain_no:
n_bonds += 1
bonds.append( Bond(n_bonds, 3, last_nos['O'], n_atoms) )
atom_type = 1
last_nos[desc] = n_atoms
last_chno[desc] = chain_no
group_id = 1
elif desc == 'C-Prime':
if last_nos['C-Alpha']!=0 and last_chno['C-Alpha']==chain_no and not cg:
n_bonds += 1
bonds.append( Bond(n_bonds, 2, last_nos['C-Alpha'], n_atoms) )
atom_type = 1
last_nos[desc] = n_atoms
last_chno[desc] = chain_no
elif desc == 'O':
if last_nos['C-Prime']!=0 and last_chno['C-Prime']==chain_no and not cg:
n_bonds += 1
bonds.append( Bond(n_bonds, 6, last_nos['C-Prime'], n_atoms) )
if cg and cg_bonds:
if last_nos['C-Alpha']!=0 and last_chno['C-Alpha']==chain_no:
n_bonds += 1
bonds.append( Bond(n_bonds, 2, last_nos['C-Alpha'], n_atoms) )
atom_type = 3
last_nos[desc] = n_atoms
last_chno[desc] = chain_no
group_id = 3
elif desc == 'C-Beta':
if last_nos['C-Alpha']!=0 and (not cg or cg_bonds):
n_bonds += 1
bonds.append( Bond(n_bonds, 4, last_nos['C-Alpha'], n_atoms) )
atom_type = 4
group_id = 2
elif desc == 'H-Beta':
if last_nos['C-Alpha']!=0 and (not cg or cg_bonds):
n_bonds += 1
bonds.append( Bond(n_bonds, 5, last_nos['C-Alpha'], n_atoms) )
atom_type = 5
group_id = 2
elif desc == 'O-In-The-End':
if last_nos['C-Prime']!=0 and not cg:
n_bonds += 1
bonds.append( Bond(n_bonds, 7, last_nos['C-Prime'], n_atoms) )
atom_type = 3
if not go:
if desc == 'C-Beta' or desc == 'H-Beta' or desc == 'C-Alpha' or desc == 'O':
# n_atoms += 1
atoms.append( Atom(n_atoms, chain_no, n_res, atom_type, 0.0, float(l[3]), float(l[4]), float(l[5])) )
groups[group_id - 1].append(str(n_atoms))
else:
if desc == 'C-Alpha':
atom_type = 1
n_res += 1
atoms.append( Atom(n_atoms, chain_no, n_res, atom_type, 0.0, float(l[3]), float(l[4]), float(l[5])) )
groups[group_id - 1].append(str(n_atoms))
inp.close()
if go:
n_atoms = len(atoms)
n_bonds = 0
n_bond_types = 0
n_atom_types = 1
masses = [118.0]
space11 = " "
out = open(out_file,'w')
out.write("LAMMPS protain data file\n\n")
out.write( (space11+str(n_atoms))[-12:] + " atoms\n" )
out.write( (space11+str(n_bonds))[-12:] + " bonds\n" )
out.write( space11 + "0 angles\n" )
out.write( space11 + "0 dihedrals\n" )
out.write( space11 + "0 impropers\n\n" )
out.write( (space11+str(n_atom_types))[-12:] + " atom types\n" )
out.write( (space11+str(n_bond_types))[-12:] + " bond types\n" )
out.write( space11 + "0 angle types\n" )
out.write( space11 + "0 dihedral types\n" )
out.write( space11 + "0 improper types\n\n" )
out.write ( "\t".join([ str(xlo), str(xhi), "xlo xhi\n" ]) )
out.write ( "\t".join([ str(ylo), str(yhi), "ylo yhi\n" ]) )
out.write ( "\t".join([ str(zlo), str(zhi), "zlo zhi\n\n" ]) )
out.write( "Masses\n\n" )
for i in range(0, len(masses)):
out.write( (space11+str(i+1))[-12:] + "\t" + str(masses[i]) + "\n" )
out.write( "\n" )
out.write( "Atoms\n\n" )
for iAtom in atoms:
iAtom.write_(out)
out.write( "\n" )
if cg and cg_bonds and not go:
out.write( "Bond Coeffs\n\n" )
#out.write( space11 + "1\t20\t3.77\n" )
#out.write( space11 + "2\t20\t2.41\n" )
#out.write( space11 + "3\t20\t2.50\n" )
#out.write( space11 + "4\t20\t1.54\n" )
#out.write( space11 + "5\t20\t1.54\n" )
out.write( space11 + "1\t60\t3.816\n" )
out.write( space11 + "2\t60\t2.40\n" )
out.write( space11 + "3\t60\t2.76\n" )
out.write( space11 + "4\t60\t1.53\n" )
out.write( space11 + "5\t60\t1.09\n" )
if (cg_bonds or not cg) and not go:
out.write( "Bonds\n\n" )
for iBond in bonds:
iBond.write_(out)
out.write( "\n" )
out.close()
groups_string = ""
for igroup in groups:
groups_string += "group\t\t" + " ".join(igroup) + "\n\n"
bonds_string = ""
if cg and cg_bonds and not go:
bonds_string = "bond_style harmonic"
pair_string = ""
if cg and not go:
pair_string = "pair_style vexcluded 2 3.5 3.5"
pair_coeff_string = ""
if cg and not go:
pair_coeff_string = "pair_coeff * * 0.0\n"
pair_coeff_string += "pair_coeff 1 1 20.0 3.5 4.5\n"
pair_coeff_string += "pair_coeff 1 4 20.0 3.5 4.5\n"
pair_coeff_string += "pair_coeff 4 4 20.0 3.5 4.5\n"
pair_coeff_string += "pair_coeff 3 3 20.0 3.5 3.5\n"
replace_rules = [ ["``read_data_file", "read_data " + out_file],
["``groups", groups_string],
["``bonds", bonds_string],
["``main_fix", fix_string],
["``pair_interactions", pair_string],
["``pair_coeff", pair_coeff_string] ]
myhome = os.environ.get("HOME")
inp = open(myhome + "/opt/energy_inFilePattern.data")
inFile = inp.read()
inp.close()
for ir in replace_rules:
inFile = inFile.replace(ir[0], ir[1])
out = open(lammps_out_file,'w')
out.write(inFile)
out.close()
#out = open(groups_out_file,'w')
#for igroup in groups:
# out.write( "group\t\t" )
# out.write( " ".join(igroup) )
# out.write( "\n\n" )
#out.close()
|
|
import time
#import select
import re
from ophyd import Device
#class SerialDevice():
# def __init__(self, prefix='', *args, read_attrs=None, configuration_attrs=None,
# name='SerialDevice', parent=None, **kwargs):
#super().__init__(prefix=prefix, *args, read_attrs=read_attrs, configuration_attrs=configuration_attrs, name=name, parent=parent, **kwargs)
class Agilent_34970A(Device):
# Note: Command terminator is a newline character \n.
# Note: On serial cable, data transmit/receive pins (pins 2 and 3 on Dsub-9 connector) must be reversed.
# Settings as of 07/25/2017: Baud rate = 19200 bits/s, Stop bits = 1, Parity = None, Flow control = None
# Moxa port 9: socket = 10.11.130.53:4009
def __init__(self, prefix='', *args, read_attrs=None, configuration_attrs=None,
name='Agilent_34970A', parent=None, **kwargs):
super().__init__(prefix=prefix, *args, read_attrs=read_attrs, configuration_attrs=configuration_attrs, name=name, parent=parent, **kwargs)
#self.port_number = 9
#self.server_port = 4000 + self.port_number
self.connect_socket()
self.HP34901_channel = 100 # 20 channel multiplexer module card in slot 1
self.HP34907_channel = 300 # DIO/DAC card in slot 3
# Essential socket interaction
########################################
def connect_socket(self):
#self.server_address= '10.11.130.51'
self.server_address= '10.11.130.53' # Moxa inside Endstation hutch
#self.server_IP = '10.11.129.2'
self.port_number = 9
self.server_port = 4000 + self.port_number
import socket
#self.sock = socket.socket()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print(self.server_address)
self.sock.connect((self.server_address, self.server_port))
self.sock.settimeout(0.5)
def disconnect_socket(self):
self.sock.close()
def send_socket(self, msg):
#self.sock.send(chr(13).encode('ascii', 'ignore')) # Carriage return
self.sock.send(msg.encode('ascii', 'ignore'))
#self.sock.send(msg.encode('utf-8'))
def send_get_reply(self, msg, verbosity=3):
#self.send_socket('\r')
self.send_socket(msg)
time.sleep(0.5)
return self.read_socket(verbosity=verbosity)
def read_socket(self, timeout_s=3, verbosity=3):
start_time = time.time()
terminator = chr(0x18)
# Look for the response
amount_received = 0
amount_cutoff = 5000
txt = ''
msg_received = ''
while terminator not in txt and time.time()-start_time<timeout_s and amount_received<amount_cutoff:
try:
data = self.sock.recv(1)
except:
break
amount_received += len(data)
txt = data.decode('ascii')
msg_received += txt
msg_received = msg_received.replace(terminator, '')
if time.time()-start_time>timeout_s:
if verbosity>=1:
print('Read timeout after {:.1f} s.'.format(time.time()-start_time))
return ''
else:
if verbosity>=2:
print(msg_received)
return msg_received
# Commands for Agilent 34970A unit
########################################
# Reset Agilent 34970A unit to factory default settings.
def reset_Agilent34970A(self, verbosity = 3):
self.send_socket('*RST\n')
# Commands for HP34901 20-channel muliplexer module card
########################################
# Reset HP34901 to factory default settings.
def reset_HP34901(self, verbosity = 3):
self.send_socket('SYSTEM:CPON {chan}\n'.format(chan=self.HP34901_channel))
# Read DC voltage on specified channel.
def readDCV(self, channel, verbosity = 1):
if (channel < 1 or channel > 20):
print('Invalid multiplexer channel number; must be 1-20.\n')
return 0
read_channel = int(self.HP34901_channel + channel)
self.send_socket('INPUT:IMP:AUTO ON, (@{chan})\n'.format(chan=read_channel))
self.send_socket('SENSE:ZERO:AUTO ON, (@{chan})\n'.format(chan=read_channel))
self.send_socket('MEAS:VOLT:DC? AUTO,MAX, (@{chan})\n'.format(chan=read_channel))
dcv = float(self.read_socket(verbosity=1))
if (verbosity > 1):
print('Channel {chan} is {volts} VDC.\n'.format(chan=channel, volts=dcv))
return dcv
# Commands for HP34907 DIO/DAC card
########################################
# Output specified voltage on specified DAC channel
def setDAC(self, channel, voltage, verbosity = 1):
if (channel < 1 or channel > 2):
print('Invalid DAC channel number; must be 1 or 2.\n')
return 0
if (voltage < -12.0 or voltage > 12.0):
print('Invalid DAC voltage value; must be within +/-12 volts.\n')
return 0
dac_channel = int(self.HP34907_channel + channel + 3)
self.send_socket('SOURCE:VOLTAGE {volts}, (@{chan})\n'.format(volts=voltage, chan=dac_channel))
#self.send_socket('SOURCE:VOLTAGE {volts}, (@{chan})\r'.format(volts=voltage, chan=dac_channel))
if (verbosity > 1):
print('DAC output channel {chan} set to {volts} VDC.\n'.format(chan=channel, volts=voltage))
return 1
# Query voltage setting on specified DAC channel
def readDAC(self, channel, verbosity = 1):
if (channel < 1 or channel > 2):
print('Invalid DAC channel number; must be 1 or 2.\n')
return 0
dac_channel = int(self.HP34907_channel + channel + 3)
self.send_socket('SOURCE:VOLTAGE? (@{chan})\n'.format(chan=dac_channel))
voltage = float(self.read_socket(verbosity=1))
if (verbosity > 1):
print('DAC output channel {chan} set to {volts} VDC.\n'.format(chan=channel, volts=voltage))
return voltage
# Write digital byte to specified DIO channel
def writeByteDIO(self, channel, value, verbosity = 1):
if (channel < 1 or channel > 2):
print('Invalid DIO channel number; must be 1 or 2.\n')
return 0
dio_channel = int(self.HP34907_channel + channel)
diovalue = ((value ^ 0xf) & 0xf)
#self.send_socket('SOURCE:DIGITAL:DATA:BYTE {byte}, (@{chan})\n'.format(byte=diovalue, chan=dio_channel))
self.send_socket('SOURCE:DIGITAL:DATA:BYTE {byte}, (@{chan})\n'.format(byte=value, chan=dio_channel))
if (verbosity > 1):
print('DIO output channel {chan} set to {val}.\n'.format(chan=channel, val=value))
return 1
# Read digital byte on specified DIO channel
def readByteDIO(self, channel, verbosity = 1):
if (channel < 1 or channel > 2):
print('Invalid DIO channel number; must be 1 or 2.\n')
return 0
dio_channel = int(self.HP34907_channel + channel)
self.send_socket('SOURCE:DIGITAL:DATA:BYTE? (@{chan})\n'.format(chan=dio_channel))
value = int(self.read_socket(verbosity=1))
diovalue = ((value ^ 0xf) & 0xf)
if (verbosity > 1):
print('DIO output channel {chan} set to {val}.\n'.format(chan=channel, val=value))
return value
class Keithley_2000(Device):
# Note: Command terminator is a carriage-return character \r.
# Settings as of 07/25/2017: Baud rate = 19200 bits/s, Stop bits = 1, Parity = None, Flow control = None
# Moxa port 10: socket = 10.11.130.53:4010
def __init__(self, prefix='', *args, read_attrs=None, configuration_attrs=None,
name='Keithley_2000', parent=None, **kwargs):
super().__init__(prefix=prefix, *args, read_attrs=read_attrs, configuration_attrs=configuration_attrs, name=name, parent=parent, **kwargs)
#self.port_number = 10
#self.server_port = 4000 + self.port_number
self.connect_socket()
# Essential socket interaction
########################################
def connect_socket(self):
#self.server_address= '10.11.130.51'
self.server_address= '10.11.130.53' # Moxa inside Endstation hutch
#self.server_IP = '10.11.129.2'
self.port_number = 10
self.server_port = 4000 + self.port_number
import socket
#self.sock = socket.socket()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print(self.server_address)
self.sock.connect((self.server_address, self.server_port))
self.sock.settimeout(0.5)
def disconnect_socket(self):
self.sock.close()
def send_socket(self, msg):
#self.sock.send(chr(13).encode('ascii', 'ignore')) # Carriage return
self.sock.send(msg.encode('ascii', 'ignore'))
#self.sock.send(msg.encode('utf-8'))
def send_get_reply(self, msg, verbosity=3):
#self.send_socket('\r')
self.send_socket(msg)
time.sleep(0.5)
return self.read_socket(verbosity=verbosity)
def read_socket(self, timeout_s=3, verbosity=3):
start_time = time.time()
terminator = chr(0x18)
# Look for the response
amount_received = 0
amount_cutoff = 5000
txt = ''
msg_received = ''
while terminator not in txt and time.time()-start_time<timeout_s and amount_received<amount_cutoff:
try:
data = self.sock.recv(1)
except:
break
amount_received += len(data)
txt = data.decode('ascii')
msg_received += txt
msg_received = msg_received.replace(terminator, '')
if time.time()-start_time>timeout_s:
if verbosity>=1:
print('Read timeout after {:.1f} s.'.format(time.time()-start_time))
return ''
else:
if verbosity>=2:
print(msg_received)
return msg_received
# Select the channel for reading
def selectChannel(self, channel, verbosity = 1):
if (channel < 1 or channel > 10):
print('Invalid channel number; must be between 1 and 10.\n')
return 0
self.send_socket(':ROUT:CLOS (@{chan})\r'.format(chan=channel))
if (verbosity > 1):
print('Keithley 2000 channel set to {chan}.\n'.format(chan=channel))
return 1
# Read resistance on the selected channel
def readOhm(self, channel, verbosity = 1):
self.selectChannel(channel, verbosity=1)
time.sleep(0.1)
self.send_socket(':SENS:FUNC \'RES\'\r')
time.sleep(0.1)
self.send_socket(':SENS:DATA?\r')
time.sleep(0.1)
ohm = float(self.read_socket(verbosity=1))
if (verbosity > 1):
print('The resistance on channel {chan} is {res} Ohm.\n'.format(chan=channel, res=ohm))
return ohm
# Read DC voltage on the selected channel
def readDCV(self, channel, verbosity = 1):
self.selectChannel(channel, verbosity=1)
time.sleep(0.1)
self.send_socket(':SENS:FUNC \'VOLT:DC\'\r')
time.sleep(0.1)
self.send_socket(':SENS:DATA?\r')
time.sleep(0.1)
dcv = float(self.read_socket(verbosity=1))
if (verbosity > 1):
print('The DC voltage on channel {chan} is {volts} VDC.\n'.format(chan=channel, volts=dcv))
return dcv
# Read 30 kOhm thermistor on the selected channel and return T[degC]
def readThermister30kohm(self, channel, verbosity = 1):
ohm = self.readOhm(channel, verbosity=1)
coeff_a = 0.000932681
coeff_b = 0.000221455
coeff_c = 0.000000126
Temp = coeff_a;
Temp += coeff_b * numpy.log(ohm)
Temp += coeff_c * (numpy.log(ohm))**3
Temp = 1.0/Temp - 273.15
if (verbosity > 1):
print('The temperature (30k-ohm thermistor) on channel {chan} is {degC} degC.\n'.format(chan=channel, degC=Temp))
return Temp
# Read 100 kOhm thermistor on the selected channel and return T[degC]
def readThermister100kohm(self, channel, verbosity = 1):
ohm = self.readOhm(channel, verbosity=1)
coeff_a = 0.000827094
coeff_b = 0.000204256
coeff_c = 1.15042e-07
Temp = coeff_a;
Temp += coeff_b * numpy.log(ohm)
Temp += coeff_c * (numpy.log(ohm))**3
Temp = 1.0/Temp - 273.15
if (verbosity > 1):
print('The temperature (100k-ohm thermistor) on channel {chan} is {degC} degC.\n'.format(chan=channel, degC=Temp))
return Temp
# Read Pt100 RTD on the selected channel and return T[degC]
def readPt100(self, channel, verbosity = 1):
ohm = self.readOhm(channel, verbosity=1)
# Conversion formula from:
# http://www.mosaic-industries.com/embedded-systems/microcontroller-projects/temperature-measurement/platinum-rtd-sensors/resistance-calibration-table
c0 = -245.19
c1 = 2.5293
c2 = -0.066046
c3 = 4.0422e-3
c4 = -2.0697e-6
c5 = -0.025422
c6 = 1.6883e-3
c7 = -1.3601e-6
Temp = ohm * (c1 + ohm * (c2 + ohm * (c3 + c4 * ohm)))
Temp /= 1.0 + ohm * (c5 * ohm * (c6 + c7 * ohm))
Temp += c0
if (verbosity > 1):
print('The temperature (Pt100 RTD) on channel {chan} is {degC} degC.\n'.format(chan=channel, degC=Temp))
return Temp
class TTL_control(object):
'''
Uses the 2 8-bit DIO channels on Agilent34970A
Note: agilent = Agilent_34970A(), unit = Agilent DIO channel number, port 1 = bit 1, etc.
Note: If there is an error reading or setting, try to read/write to Agilent DIO channels directly first, and it should start working.
'''
def __init__(self, name='TTL_control', description="", pv=None, **args):
self.name=name
self.description=description
def readPort(self, unit, port, verbosity=2):
if (unit < 1 or unit > 2):
print('Invalid TTL unit number; must be 1 or 2.\n')
return 0
if (port < 1 or port > 8):
print('Invalid TTL port number; must be between 1 and 8.\n')
return 0
value = agilent.readByteDIO(unit, verbosity=1)
bit_pos = int(port)
onoff = int(bin(value)[2:].zfill(8)[-bit_pos])
if (verbosity > 1):
print('TTL unit {uu} port {pp} is currently set to {oo}.\n'.format(uu=unit, pp=bit_pos, oo=onoff))
return onoff
def readPorts(self, unit, verbosity=2):
if (unit < 1 or unit > 2):
print('Invalid TTL unit number; must be 1 or 2.\n')
return 0
value = agilent.readByteDIO(unit, verbosity=1)
bits = []
for i in range(1,8+1):
#b = self.readPort(unit, i, verbosity=verbosity)
b = int(bin(value)[2:].zfill(8)[-i])
bits.append(b)
if (verbosity > 1):
print('TTL unit {uu} ports 1-8 are currently set to {ll}.\n'.format(uu=unit, ll=bits))
return value
def setPort(self, unit, port, onoff, verbosity=2):
if (unit < 1 or unit > 2):
print('Invalid TTL unit number; must be 1 or 2.\n')
return 0
if (port < 1 or port > 8):
print('Invalid TTL port number; must be between 1 and 8.\n')
return 0
# check the current setting and don't do anything if already set as requested
b = self.readPort(unit, port, verbosity=1)
if (onoff == b):
if (verbosity > 1):
print('TTL unit {uu} port {pp} is already set to {oo}.\n'.format(uu=unit, pp=port, oo=onoff))
return 0
value = agilent.readByteDIO(unit, verbosity=1)
bit_pos = int(port)
if (onoff == 1):
value += 2**(bit_pos-1)
elif (onoff == 0):
value -= 2**(bit_pos-1)
else:
pass
agilent.writeByteDIO(unit, value, verbosity=1)
b_new = self.readPort(unit, port, verbosity=1)
if (b_new != onoff):
print('ERROR: TTL unit {uu} port {pp} is still set to {oo}.\n'.format(uu=unit, pp=port, oo=b_new))
return 0
else:
if (verbosity > 1):
print('TTL unit {uu} port {pp} has been set to {oo}.\n'.format(uu=unit, pp=port, oo=b_new))
return 1
def setPortOn(self, unit, port, verbosity=2):
return self.setPort(unit, port, 1, verbosity=verbosity)
def setPortOff(self, unit, port, verbosity=2):
return self.setPort(unit, port, 0, verbosity=verbosity)
#agilent = Agilent_34970A()
#keithley = Keithley_2000()
#ttl = TTL_control()
|
|
"""Test the Z-Wave JS lock platform."""
from zwave_js_server.const.command_class.lock import ATTR_CODE_SLOT, ATTR_USERCODE
from zwave_js_server.event import Event
from zwave_js_server.model.node import NodeStatus
from homeassistant.components.lock import (
DOMAIN as LOCK_DOMAIN,
SERVICE_LOCK,
SERVICE_UNLOCK,
)
from homeassistant.components.zwave_js.const import DOMAIN as ZWAVE_JS_DOMAIN
from homeassistant.components.zwave_js.lock import (
SERVICE_CLEAR_LOCK_USERCODE,
SERVICE_SET_LOCK_USERCODE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
STATE_LOCKED,
STATE_UNAVAILABLE,
STATE_UNLOCKED,
)
from .common import SCHLAGE_BE469_LOCK_ENTITY
async def test_door_lock(hass, client, lock_schlage_be469, integration):
"""Test a lock entity with door lock command class."""
node = lock_schlage_be469
state = hass.states.get(SCHLAGE_BE469_LOCK_ENTITY)
assert state
assert state.state == STATE_UNLOCKED
# Test locking
await hass.services.async_call(
LOCK_DOMAIN,
SERVICE_LOCK,
{ATTR_ENTITY_ID: SCHLAGE_BE469_LOCK_ENTITY},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 1
args = client.async_send_command.call_args[0][0]
assert args["command"] == "node.set_value"
assert args["nodeId"] == 20
assert args["valueId"] == {
"commandClassName": "Door Lock",
"commandClass": 98,
"endpoint": 0,
"property": "targetMode",
"propertyName": "targetMode",
"metadata": {
"type": "number",
"readable": True,
"writeable": True,
"min": 0,
"max": 255,
"label": "Target lock mode",
"states": {
"0": "Unsecured",
"1": "UnsecuredWithTimeout",
"16": "InsideUnsecured",
"17": "InsideUnsecuredWithTimeout",
"32": "OutsideUnsecured",
"33": "OutsideUnsecuredWithTimeout",
"254": "Unknown",
"255": "Secured",
},
},
}
assert args["value"] == 255
client.async_send_command.reset_mock()
# Test locked update from value updated event
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 20,
"args": {
"commandClassName": "Door Lock",
"commandClass": 98,
"endpoint": 0,
"property": "currentMode",
"newValue": 255,
"prevValue": 0,
"propertyName": "currentMode",
},
},
)
node.receive_event(event)
assert hass.states.get(SCHLAGE_BE469_LOCK_ENTITY).state == STATE_LOCKED
client.async_send_command.reset_mock()
# Test unlocking
await hass.services.async_call(
LOCK_DOMAIN,
SERVICE_UNLOCK,
{ATTR_ENTITY_ID: SCHLAGE_BE469_LOCK_ENTITY},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 1
args = client.async_send_command.call_args[0][0]
assert args["command"] == "node.set_value"
assert args["nodeId"] == 20
assert args["valueId"] == {
"commandClassName": "Door Lock",
"commandClass": 98,
"endpoint": 0,
"property": "targetMode",
"propertyName": "targetMode",
"metadata": {
"type": "number",
"readable": True,
"writeable": True,
"min": 0,
"max": 255,
"label": "Target lock mode",
"states": {
"0": "Unsecured",
"1": "UnsecuredWithTimeout",
"16": "InsideUnsecured",
"17": "InsideUnsecuredWithTimeout",
"32": "OutsideUnsecured",
"33": "OutsideUnsecuredWithTimeout",
"254": "Unknown",
"255": "Secured",
},
},
}
assert args["value"] == 0
client.async_send_command.reset_mock()
# Test set usercode service
await hass.services.async_call(
ZWAVE_JS_DOMAIN,
SERVICE_SET_LOCK_USERCODE,
{
ATTR_ENTITY_ID: SCHLAGE_BE469_LOCK_ENTITY,
ATTR_CODE_SLOT: 1,
ATTR_USERCODE: "1234",
},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 1
args = client.async_send_command.call_args[0][0]
assert args["command"] == "node.set_value"
assert args["nodeId"] == 20
assert args["valueId"] == {
"commandClassName": "User Code",
"commandClass": 99,
"endpoint": 0,
"property": "userCode",
"propertyName": "userCode",
"propertyKey": 1,
"propertyKeyName": "1",
"metadata": {
"type": "string",
"readable": True,
"writeable": True,
"minLength": 4,
"maxLength": 10,
"label": "User Code (1)",
},
"value": "**********",
}
assert args["value"] == "1234"
client.async_send_command.reset_mock()
# Test clear usercode
await hass.services.async_call(
ZWAVE_JS_DOMAIN,
SERVICE_CLEAR_LOCK_USERCODE,
{ATTR_ENTITY_ID: SCHLAGE_BE469_LOCK_ENTITY, ATTR_CODE_SLOT: 1},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 1
args = client.async_send_command.call_args[0][0]
assert args["command"] == "node.set_value"
assert args["nodeId"] == 20
assert args["valueId"] == {
"commandClassName": "User Code",
"commandClass": 99,
"endpoint": 0,
"property": "userIdStatus",
"propertyName": "userIdStatus",
"propertyKey": 1,
"propertyKeyName": "1",
"metadata": {
"type": "number",
"readable": True,
"writeable": True,
"label": "User ID status (1)",
"states": {
"0": "Available",
"1": "Enabled",
"2": "Disabled",
},
},
"value": 1,
}
assert args["value"] == 0
event = Event(
type="dead",
data={
"source": "node",
"event": "dead",
"nodeId": 20,
},
)
node.receive_event(event)
assert node.status == NodeStatus.DEAD
assert hass.states.get(SCHLAGE_BE469_LOCK_ENTITY).state == STATE_UNAVAILABLE
|
|
""" Extract Patches
This scripts reads in a shapefile and a raster image extracts patches to be used for machine learning algorithms.
If the shapefile consists of points, then a rectangular patch centered at each point is extracted. If the shapefile
consists of lines, then the first and second point are used to determine an orientation, and a patch is extracted
centered at the midpoint of the line and rotated so that the line would be horizontal, pointing in the +x direction.
The ouput folder will always contain a CSV file with the shapefile and raster name, as well as index of the feature in
the shapefile, the latitude and longitude, and image row and column of each feature.
Usage:
extract_patches.py (-i <shapefile>)... -r <raster> [--odir <outdir>] [options]
extract_patches.py -h
Options:
-i <shapefile> A shapefile with features indicated. Wildcards are allowed.
-r <raster> A raster source image to extract features from. No wildcards -- if you have multiple
raster files consider using `gdalbuildvrt` first to create a virtual mosaic.
--odir <outdir> The output folder. [default: ./patches/]
--otif Outout TIF files [default]
--ojpg Output JPG files
--size <width>,<height> The size of each extracted patch, in pixels. Please make sure there are no space characters
between the arguments unless you put it in quotes! [default: 64,64]
--scale <scale> The amount to scale each image by before extracting the patches (e.g. if the raster is
in a higher resolution you may choose to scale it down). [default: 1.0]
--csv <csvfile> Create a CSV file with the center lat/lon, center pixel xy, angle, and the patch filename
as well as the name of the shapefile source and the raster spource image.
--noprogress Suppress progress updates.
--vclip <min,max> Clip input values to `min` and `max`. Mainly useful when floating point output is not an
option (e.g. JPEG output).
--vstretch <min,max> Stretch output values to 'min' and 'max', after clipping. Mainly useful when floating point
output is not possible. Note that JPEG output must be in 0 to 1.
--debug Set the log level to DEBUG
--info Set the log level to INFO
--logfile <filename> Log to a file [default: '']
--psource <index> Index of the source point used to determine the object position and rotation. [default: 0]
--ptarget <index> Index of the targer point used to determine the object position and rotation. [default: -1]
--pcenter <index> Index of the center point uded to determine the object position. If this option is missing
we use some other scheme to estimate the center
"""
import hashlib
import os
import numpy
import osgeo
import rasterio
from affine import Affine
from math import degrees, atan2, hypot, floor, ceil
from osgeo import ogr, gdalnumeric
from osgeo.osr import SpatialReference
from rasterio._io import RasterReader, RasterUpdater
# from skimage.transform import rescale
from scipy.ndimage import rotate, zoom
from skimage.util.dtype import img_as_ubyte
BREAKING_RELEASE = 0 # Big, rare, when you actually remove deprecated code.
NONBREAKING_RELEASE = 0 # Upload new binaries when you increment this, much less frequently then minor
MINOR_UPDATE = 1 # Increment for pullers
EXPERIMENTAL = 1
VERSION = '{}.{}.{}'.format(BREAKING_RELEASE, NONBREAKING_RELEASE, MINOR_UPDATE)
if EXPERIMENTAL:
VERSION += 'a{}'.format(EXPERIMENTAL)
__version__ = VERSION
from glob import glob
from docopt import docopt
import logging
from progressbar import Percentage, Bar, ETA, ProgressBar
def collect_filenames(wildcard_list):
logger = logging.getLogger(__name__)
results = []
for wildcard in wildcard_list:
matching_files = glob(wildcard)
if len(matching_files) is 0:
logger.warning("No files matching input specification '{}'".format(wildcard))
for filename in matching_files:
results.append(filename)
return results
def main():
args = docopt(__doc__, version=VERSION)
logparams = {}
if args['--debug']:
logparams.update(level=logging.DEBUG)
elif args['--info']:
logparams.update(level=logging.INFO)
else:
logparams.update(level=logging.CRITICAL)
if args['--logfile'] != '':
logparams.update(filename=args['--logfile'])
logging.basicConfig(**logparams)
logger = logging.getLogger('extract_patches')
logger.debug('input \n {}'.format(args))
assert isinstance(logger, logging.Logger)
shapefiles = collect_filenames(args['-i'])
if len(shapefiles) == 0:
logger.error('No matching shapefiles for inoput `{}`'.format(args['-i']))
return
raster = args['-r']
try:
size = [int(x) for x in args['--size'].split(',')]
patch_width, patch_height = size
logger.debug("Set patch size to {} x {}".format(patch_width, patch_height))
except:
logger.error("Unable to parse option '--size'")
return
try:
scale = float(args['--scale'])
assert scale > 0
logger.debug("Set scale to {}".format(scale))
except:
logger.error("Unable to parse option '--scale'")
return
silent = args['--noprogress']
output_folder = args['--odir']
try:
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
logger.debug("Created output folder '{}'".format(output_folder))
else:
logger.debug("Found existing output folder '{}'".format(output_folder))
except:
logger.error("Unable to find or create output directory `{}`".format(output_folder))
return
if args['--ojpg']:
fmt = '.jpg'
else: # args['--otif'] (default)
fmt = '.tif'
logger.debug("Output format set to {}".format(fmt))
clip = args['--vclip'] is not None
if clip:
clipmin, clipmax = [float(x) for x in args['--vclip'].split(',')]
logger.debug("Clipping output to [{}, {}]".format(clipmin, clipmax))
else:
clipmin, clipmax = 0, 1
logger.debug("Not clipping output -- assuming range of value is [{},{}]".format(clipmin, clipmax))
stretch = args['--vstretch'] is not None
if stretch:
stretchmin, stretchmax = [float(x) for x in args['--vstretch'].split(',')]
logger.debug("Output value range will be stretched to [{},{}]".format(stretchmin, stretchmax))
else:
logger.debug("Output values will not be stretched")
psource = int(args["--psource"])
ptarget = int(args["--ptarget"])
use_centroid = True
pcenter = 0
if args["--pcenter"] is not None:
use_centroid = False
pcenter = args["--pcenter"]
logger.debug('Orientation will be determined using point {}, and point {}'.format(psource, ptarget))
if use_centroid:
logger.debug('The center will be determined using the average of psource and ptarget')
else:
logger.debug('The center will be determined using the point {}'.format(pcenter))
if args['--csv']:
csv_file_name = args['--csv']
if os.path.isfile(csv_file_name):
logger.error("CSV File already exists; please remove or rename it first.")
logger.debug("Writing to CSV File '{}'".format(csv_file_name))
return
else:
csv_file_name = None
logger.debug("No CSV output")
# Estimate number of shape features
count = 0
if not silent:
pbar = ProgressBar(len(shapefiles), ['Counting Features:', Percentage(), ' ', Bar(), ' ', ETA()])
pbar.start()
for i, s in enumerate(shapefiles):
vector = ogr.Open(s)
layer = vector.GetLayer()
count += layer.GetFeatureCount()
if not silent:
pbar.update(i)
if not silent:
pbar.finish()
logger.debug("Counted {} features in {} shapefiles".format(count, len(shapefiles)))
# Write header for CSV file
if csv_file_name is not None:
with open(os.path.join(output_folder, csv_file_name), 'w') as csvf:
csvf.write('gx, gy, r1, r2, theta, patch_width, patch_height, image_namei\n')
with rasterio.open(raster) as rf:
assert isinstance(rf, RasterReader)
srs = SpatialReference(str(rf.crs.wkt))
affine = rf.affine
geo_to_pixels = ~affine
logging.debug("Output CRS will be '''{}'''".format(srs.ExportToPrettyWkt()))
if not silent:
pbar = ProgressBar(count, ['Exporting Patches:', Percentage(), ' ', Bar(), ' ', ETA()])
pbar.start()
for sf in shapefiles:
logger.info("Processing input '{}'".format(sf))
vector = ogr.Open(sf)
assert isinstance(vector, ogr.DataSource)
layer = vector.GetLayer()
assert isinstance(layer, ogr.Layer)
if not srs.IsSame(layer.GetSpatialRef()):
logger.warning("Coordinate system mismatch (its ok, I will reproject)")
for f in layer:
if not silent:
pbar.update(pbar.currval + 1)
geom = f.GetGeometryRef()
if geom is None:
logging.warn("Missing Geometry for featur {}".format(f.GetFID()))
continue
assert isinstance(geom, ogr.Geometry)
geom.TransformTo(srs)
geo_points = geom.GetPoints()
# The center and direction are determine based on two points.
# I am using the first and last.
source = geo_points[psource]
target = geo_points[ptarget]
# First the center
sx, sy = geo_to_pixels * source # <- this converts from map coordinates to pixel indices
tx, ty = geo_to_pixels * target
if use_centroid:
cx, cy = (sx + tx) / 2, (sy + ty) / 2
else:
# For trees, we mark three points. In that case, I want the middle point to be considered the
# center
cx, cy = geo_to_pixels * geo_points[pcenter]
# Now the direction
dx, dy = (tx - sx), (ty - sy)
theta = degrees(atan2(dy, dx)) # In PIXELS, CCW from +x. Not necessarily CCW from E (or CW from N)
# We also determine the scale (in pixels) as a radius.
# For trees, there are two radii because we want the image to be big enough to fit the shadow
# and also the canopy, but we want it centered on the tree.
r1 = hypot(tx - cx, ty - cy)
r2 = hypot(cx - sx, cy - sy)
r1, r2 = max(r1, r2), min(r1, r2) # For 3 points, we assume two radii. Else these are duplicates.
# When we write coordinates back out, they shoulf be in map coordinates.
gx, gy = affine * (cx, cy) # Geographic coordinates (e.g. lat lon) of the center.
# We read a square slightly larger than the scaled version of our patch, so that
# we can safely rotate the raster without missing pixels in the corners.
box_radius = hypot(patch_width, patch_height) / (2.0 * scale)
x0, x1 = int(floor(cx - box_radius)), int(ceil(cx + box_radius))
y0, y1 = int(floor(cy - box_radius)), int(ceil(cy + box_radius))
## Now we save the image patch, rotated.
# When we save the image, we need to specify the Affine transform that positions it properly in a map.
# Otherwise the image would not render in the right position if we load it into somethign like QGIS.
patch_affine = (affine *
Affine.translation(cx, cy) *
Affine.rotation(angle=-theta) *
Affine.translation(-patch_width / 2., -patch_height / 2.))
# Determine the file metadata
kwargs = rf.meta
kwargs.update(transform=patch_affine, width=patch_width, height=patch_height)
if fmt == '.tif':
kwargs.update(driver='GTiff', compress='lzw', dtype=numpy.float32)
elif fmt == '.jpg':
kwargs.update(driver='JPEG', quality=90, dtype=numpy.uint8)
# Name patches based on a hash of their position in the map
name = '{}E-{}N-{}x{}'.format(str(gx).replace('.', '_'), str(gy).replace('.', '_'),
patch_width, patch_height)
image_name = os.path.join(output_folder, name + fmt)
box_radius *= scale
if csv_file_name is not None:
with open(os.path.join(output_folder, csv_file_name), 'a+') as csvf:
fields = gx, gy, r1, r2, theta, patch_width, patch_height, image_name
csvf.write(','.join([str(_) for _ in fields]) + '\n')
with rasterio.open(image_name, 'w', **kwargs) as pf:
assert isinstance(pf, RasterUpdater)
for band in range(rf.count):
patch = rf.read(band + 1, window=((y0, y1), (x0, x1)), boundless=True, )
patch = patch.astype(numpy.float32)
# The patch is a square centered on the object.
# We want to rotate it, scale it, and crop it to fit the object.
patch_rotated = rotate(patch, theta, reshape=False)
patch_scaled = zoom(patch_rotated, scale)
i0 = int(round(box_radius - patch_height / 2.))
j0 = int(round(box_radius - patch_width / 2.))
i1 = i0 + patch_height
j1 = j0 + patch_width
patch_cropped = patch_scaled[i0:i1, j0:j1]
# Sometime we want to limit the range of output values (e.g. 0..255)
if clip:
patch_cropped = numpy.clip(patch_cropped, clipmin, clipmax)
else:
clipmin = numpy.percentile(patch_cropped.flat, 5)
clipmax = numpy.percentile(patch_cropped.flat, 95)
# Sometimes we want to stretch the range of output values (e.g. scale it to fit in 0..255)
if stretch:
patch_cropped = (patch_cropped - clipmin) / (clipmax - clipmin)
patch_cropped = patch_cropped * (stretchmax - stretchmin) + stretchmin
patch_cropped.clip(stretchmin, stretchmax)
if fmt == '.jpg':
# JPEG does not support floating point output. All we can do is 8 bit
# (python has not 12bit array type)
patch_cropped = img_as_ubyte(patch_cropped.clip(-1, 1))
pf.write(patch_cropped, band + 1)
if not silent:
pbar.finish()
logger.debug("Finished.")
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# hack to make sure -m transformer/generator works as expected
"""
Poly-encoder agent that ingests image features.
"""
from typing import Optional
from parlai.core.params import ParlaiParser
from parlai.core.opt import Opt
from typing import Any, Dict
import torch
from parlai.agents.image_seq2seq.modules import ContextWithImageEncoder
from parlai.agents.transformer.polyencoder import PolyencoderAgent, PolyEncoderModule
from parlai.core.torch_agent import Batch
from parlai.core.torch_image_agent import TorchImageAgent
from parlai.utils.misc import warn_once
class ImagePolyencoderAgent(PolyencoderAgent, TorchImageAgent):
"""
Poly-encoder Agent that ingests image features.
Agent that allows encoding image features and adding or concatenating them to the
context encoding.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
"""
Add command-line arguments specifically for this agent.
"""
PolyencoderAgent.add_cmdline_args(parser, partial_opt=partial_opt)
TorchImageAgent.add_cmdline_args(parser, partial_opt=partial_opt)
agent = parser.add_argument_group('ImagePolyencoder Args')
agent.add_argument(
'--image-combination-mode',
type=str,
default='prepend',
choices=['add', 'append', 'prepend'],
help='How to combine image embedding (if used) with context embedding',
)
# TODO: more thoroughly test out whether one of these choices is best and add a
# 'recommended' arg here. 'add' and 'prepend' seem to be roughly similar in
# performance
agent.set_defaults(reduction_type=None)
# This agent doesn't support any encoder output reductions
return agent
def build_model(self, states=None):
"""
Return built model.
"""
return ImagePolyencoderModule(self.opt, self.dict, self.NULL_IDX)
def batchify_image_features(self, batch: Batch) -> Batch:
"""
Return the image features as a Tensor of the correct type.
Fill in missing feature vectors. Here, we require image features to be saved in
`batch` as a Tensor for passing through the image encoder. This is required for
data_parallel.
"""
# Checks/formatting of batch.image
bsz = self._get_batch_size(batch)
if batch.image is None or len(batch.image) == 0:
batch.image = [None] * bsz
else:
assert len(batch.image) == bsz
# Process all image feature vectors, or add in zero vectors if missing
processed_features_list = []
processed_zero_features = self._process_image_features(
torch.zeros((self.image_features_dim,))
)
for orig_features in batch.image:
if isinstance(orig_features, torch.Tensor):
processed_features_list.append(
self._process_image_features(orig_features)
)
else:
if orig_features is not None:
warn_once(
'Unsupported image feature format. Image features will be ignored!'
)
processed_features_list.append(processed_zero_features)
# Turn into batchsize x image_features_dim for DataParallel
batch.image = torch.stack(processed_features_list)
return batch
def _get_batch_size(self, batch) -> int:
"""
Return the size of the batch.
Use the size of the text vec if it exists; otherwise, use the length of the
image feature list.
"""
if batch.text_vec is not None:
return batch.text_vec.size(0)
else:
return len(batch.image)
def _model_context_input(self, batch) -> Dict[str, Any]:
"""
Override PolyencoderAgent's context inputs into the model.
"""
return {'ctxt_tokens': batch.text_vec, 'ctxt_image': batch.image}
def load_state_dict(self, state_dict):
"""
Override to account for weights used for image features.
"""
for tensor in ['dummy_image_enc', 'ones_mask']:
key = f'encoder_ctxt.{tensor}'
if hasattr(self.model.encoder_ctxt, tensor) and key not in state_dict:
state_dict[key] = getattr(self.model.encoder_ctxt, tensor)
if hasattr(self.model.encoder_ctxt, 'image_encoder'):
for layer_idx, layer in enumerate(self.model.encoder_ctxt.image_encoder):
for tensor in ['weight', 'bias']:
key = f'encoder_ctxt.image_encoder.{layer_idx}.{tensor}'
if hasattr(layer, tensor) and key not in state_dict:
state_dict[key] = getattr(layer, tensor)
super().load_state_dict(state_dict)
class ImagePolyencoderModule(PolyEncoderModule):
"""
Poly-encoder model with image features.
Model that allows encoding image features and adding or concatenating them to the
context encoding.
"""
def get_encoder(self, opt, dict_, null_idx, reduction_type, for_context: bool):
"""
Return encoder that allows for image features to be passed in, given options.
:param opt:
opt dict
:param dict:
dictionary agent
:param null_idx:
null/pad index into dict
:param reduction_type: only used for compatibility with the superclass method
:param for_context:
whether this is the context encoder (as opposed to the candidate encoder)
:return:
either a TransformerEncoder or a ContextWithImageEncoder, initialized
correctly
"""
if for_context:
if reduction_type is not None:
raise NotImplementedError('No encoder output reductions supported!')
embeddings = self._get_embeddings(
dict_=dict_, null_idx=null_idx, embedding_size=opt['embedding_size']
)
return ContextWithImageEncoder(
opt=opt,
vocabulary_size=len(dict_),
embedding=embeddings,
padding_idx=null_idx,
image_encoder_num_layers=opt['image_encoder_num_layers'],
image_features_dim=opt['image_features_dim'],
image_combination_mode=opt['image_combination_mode'],
n_image_tokens=opt['n_image_tokens'],
)
else:
# The candidate encoder is the same as for PolyEncoderModule
return super().get_encoder(
opt=opt,
dict_=dict_,
null_idx=null_idx,
reduction_type=reduction_type,
for_context=for_context,
)
def _context_encoder_input(self, ctxt_inputs: Dict[str, Any]) -> Dict[str, Any]:
"""
Override PolyEncoderModule's inputs into the context encoder.
"""
assert set(ctxt_inputs.keys()) == {'ctxt_tokens', 'ctxt_image'}
return {
'src_tokens': ctxt_inputs['ctxt_tokens'],
'image_features': ctxt_inputs['ctxt_image'],
}
def _get_context_batch_size(self, **ctxt_inputs: torch.Tensor) -> int:
"""
Return the batch size of the context.
"""
if ctxt_inputs['ctxt_tokens'] is not None:
return ctxt_inputs['ctxt_tokens'].size(0)
else:
return ctxt_inputs['ctxt_image'].size(0)
|
|
# Webhooks for external integrations.
from __future__ import absolute_import
import re
from functools import partial
from six.moves import zip
from typing import Any, Callable, Optional, Text
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import Client, UserProfile
from zerver.lib.webhooks.git import get_push_commits_event_message, SUBJECT_WITH_BRANCH_TEMPLATE,\
get_force_push_commits_event_message, get_remove_branch_event_message, get_pull_request_event_message,\
SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE, get_issue_event_message, get_commits_comment_action_message,\
get_push_tag_event_message
BITBUCKET_SUBJECT_TEMPLATE = '{repository_name}'
USER_PART = 'User {display_name}(login: {username})'
BITBUCKET_FORK_BODY = USER_PART + ' forked the repository into [{fork_name}]({fork_url}).'
BITBUCKET_COMMIT_STATUS_CHANGED_BODY = '[System {key}]({system_url}) changed status of {commit_info} to {status}.'
PULL_REQUEST_SUPPORTED_ACTIONS = [
'approved',
'unapproved',
'created',
'updated',
'rejected',
'fulfilled',
'comment_created',
'comment_updated',
'comment_deleted',
]
class UnknownTriggerType(Exception):
pass
@api_key_only_webhook_view('Bitbucket2')
@has_request_variables
def api_bitbucket2_webhook(request, user_profile, client, payload=REQ(argument_type='body'),
stream=REQ(default='bitbucket')):
# type: (HttpRequest, UserProfile, Client, Dict[str, Any], str) -> HttpResponse
try:
type = get_type(request, payload)
if type != 'push':
subject = get_subject_based_on_type(payload, type)
body = get_body_based_on_type(type)(payload)
check_send_message(user_profile, client, 'stream', [stream], subject, body)
else:
subjects = get_push_subjects(payload)
bodies_list = get_push_bodies(payload)
for body, subject in zip(bodies_list, subjects):
check_send_message(user_profile, client, 'stream', [stream], subject, body)
except KeyError as e:
return json_error(_("Missing key {} in JSON").format(str(e)))
return json_success()
def get_subject_for_branch_specified_events(payload, branch_name=None):
# type: (Dict[str, Any], Optional[Text]) -> Text
return SUBJECT_WITH_BRANCH_TEMPLATE.format(
repo=get_repository_name(payload['repository']),
branch=get_branch_name_for_push_event(payload) if branch_name is None else branch_name
)
def get_push_subjects(payload):
# type: (Dict[str, Any]) -> List[str]
subjects_list = []
for change in payload['push']['changes']:
potential_tag = (change['new'] or change['old'] or {}).get('type')
if potential_tag == 'tag':
subjects_list.append(str(get_subject(payload)))
else:
if change.get('new'):
branch_name = change['new']['name']
else:
branch_name = change['old']['name']
subjects_list.append(str(get_subject_for_branch_specified_events(payload, branch_name)))
return subjects_list
def get_subject(payload):
# type: (Dict[str, Any]) -> str
return BITBUCKET_SUBJECT_TEMPLATE.format(repository_name=get_repository_name(payload['repository']))
def get_subject_based_on_type(payload, type):
# type: (Dict[str, Any], str) -> Text
if type.startswith('pull_request'):
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repository_name(payload.get('repository')),
type='PR',
id=payload['pullrequest']['id'],
title=payload['pullrequest']['title']
)
if type.startswith('issue'):
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repository_name(payload.get('repository')),
type='Issue',
id=payload['issue']['id'],
title=payload['issue']['title']
)
return get_subject(payload)
def get_type(request, payload):
# type: (HttpRequest, Dict[str, Any]) -> str
event_key = request.META.get("HTTP_X_EVENT_KEY")
if payload.get('push'):
return 'push'
elif payload.get('fork'):
return 'fork'
elif payload.get('comment') and payload.get('commit'):
return 'commit_comment'
elif payload.get('commit_status'):
return 'change_commit_status'
elif payload.get('issue'):
if payload.get('changes'):
return "issue_updated"
if payload.get('comment'):
return 'issue_commented'
return "issue_created"
elif payload.get('pullrequest'):
pull_request_template = 'pull_request_{}'
action = re.match('pullrequest:(?P<action>.*)$', event_key)
if action:
action = action.group('action')
if action in PULL_REQUEST_SUPPORTED_ACTIONS:
return pull_request_template.format(action)
raise UnknownTriggerType("We don't support {} event type".format(event_key))
def get_body_based_on_type(type):
# type: (str) -> Any
return GET_SINGLE_MESSAGE_BODY_DEPENDING_ON_TYPE_MAPPER.get(type)
def get_push_bodies(payload):
# type: (Dict[str, Any]) -> List[Text]
messages_list = []
for change in payload['push']['changes']:
potential_tag = (change['new'] or change['old'] or {}).get('type')
if potential_tag == 'tag':
messages_list.append(get_push_tag_body(payload, change))
elif change.get('closed'):
messages_list.append(get_remove_branch_push_body(payload, change))
elif change.get('forced'):
messages_list.append(get_force_push_body(payload, change))
else:
messages_list.append(get_normal_push_body(payload, change))
return messages_list
def get_remove_branch_push_body(payload, change):
# type: (Dict[str, Any], Dict[str, Any]) -> Text
return get_remove_branch_event_message(
get_user_username(payload),
change['old']['name'],
)
def get_force_push_body(payload, change):
# type: (Dict[str, Any], Dict[str, Any]) -> Text
return get_force_push_commits_event_message(
get_user_username(payload),
change['links']['html']['href'],
change['new']['name'],
change['new']['target']['hash']
)
def get_normal_push_body(payload, change):
# type: (Dict[str, Any], Dict[str, Any]) -> Text
commits_data = [{
'sha': commit.get('hash'),
'url': commit.get('links').get('html').get('href'),
'message': commit.get('message'),
} for commit in change['commits']]
return get_push_commits_event_message(
get_user_username(payload),
change['links']['html']['href'],
change['new']['name'],
commits_data,
is_truncated=change['truncated']
)
def get_fork_body(payload):
# type: (Dict[str, Any]) -> str
return BITBUCKET_FORK_BODY.format(
display_name=get_user_display_name(payload),
username=get_user_username(payload),
fork_name=get_repository_full_name(payload['fork']),
fork_url=get_repository_url(payload['fork'])
)
def get_commit_comment_body(payload):
# type: (Dict[str, Any]) -> Text
comment = payload.get('comment')
action = u'[commented]({})'.format(comment['links']['html']['href'])
return get_commits_comment_action_message(
get_user_username(payload),
action,
comment['commit']['links']['html']['href'],
comment['commit']['hash'],
comment['content']['raw'],
)
def get_commit_status_changed_body(payload):
# type: (Dict[str, Any]) -> str
commit_id = re.match('.*/commit/(?P<commit_id>[A-Za-z0-9]*$)', payload['commit_status']['links']['commit']['href'])
if commit_id:
commit_info = "{}/{}".format(get_repository_url(payload['repository']), commit_id.group('commit_id'))
else:
commit_info = 'commit'
return BITBUCKET_COMMIT_STATUS_CHANGED_BODY.format(
key=payload['commit_status']['key'],
system_url=payload['commit_status']['url'],
commit_info=commit_info,
status=payload['commit_status']['state']
)
def get_issue_commented_body(payload):
# type: (Dict[str, Any]) -> Text
action = '[commented]({}) on'.format(payload['comment']['links']['html']['href'])
return get_issue_action_body(payload, action)
def get_issue_action_body(payload, action):
# type: (Dict[str, Any], str) -> Text
issue = payload['issue']
assignee = None
message = None
if action == 'created':
if issue['assignee']:
assignee = issue['assignee'].get('username')
message = issue['content']['raw']
return get_issue_event_message(
get_user_username(payload),
action,
issue['links']['html']['href'],
issue['id'],
message,
assignee
)
def get_pull_request_action_body(payload, action):
# type: (Dict[str, Any], str) -> Text
pull_request = payload['pullrequest']
return get_pull_request_event_message(
get_user_username(payload),
action,
get_pull_request_url(pull_request),
pull_request.get('id')
)
def get_pull_request_created_or_updated_body(payload, action):
# type: (Dict[str, Any], str) -> Text
pull_request = payload['pullrequest']
assignee = None
if pull_request.get('reviewers'):
assignee = pull_request.get('reviewers')[0]['username']
return get_pull_request_event_message(
get_user_username(payload),
action,
get_pull_request_url(pull_request),
pull_request.get('id'),
target_branch=pull_request['source']['branch']['name'],
base_branch=pull_request['destination']['branch']['name'],
message=pull_request['description'],
assignee=assignee
)
def get_pull_request_comment_created_action_body(payload):
# type: (Dict[str, Any]) -> Text
action = '[commented]({})'.format(payload['comment']['links']['html']['href'])
return get_pull_request_comment_action_body(payload, action)
def get_pull_request_deleted_or_updated_comment_action_body(payload, action):
# type: (Dict[str, Any], Text) -> Text
action = "{} a [comment]({})".format(action, payload['comment']['links']['html']['href'])
return get_pull_request_comment_action_body(payload, action)
def get_pull_request_comment_action_body(payload, action):
# type: (Dict[str, Any], str) -> Text
action += ' on'
return get_pull_request_event_message(
get_user_username(payload),
action,
payload['pullrequest']['links']['html']['href'],
payload['pullrequest']['id'],
message=payload['comment']['content']['raw']
)
def get_push_tag_body(payload, change):
# type: (Dict[str, Any], Dict[str, Any]) -> Text
if change.get('created'):
tag = change.get('new')
action = 'pushed'
elif change.get('closed'):
tag = change.get('old')
action = 'removed'
else:
tag = change.get('new')
action = None
return get_push_tag_event_message(
get_user_username(payload),
tag.get('name'),
tag_url=tag.get('links').get('html').get('href'),
action=action
)
def get_pull_request_title(pullrequest_payload):
# type: (Dict[str, Any]) -> str
return pullrequest_payload['title']
def get_pull_request_url(pullrequest_payload):
# type: (Dict[str, Any]) -> str
return pullrequest_payload['links']['html']['href']
def get_repository_url(repository_payload):
# type: (Dict[str, Any]) -> str
return repository_payload['links']['html']['href']
def get_repository_name(repository_payload):
# type: (Dict[str, Any]) -> str
return repository_payload['name']
def get_repository_full_name(repository_payload):
# type: (Dict[str, Any]) -> str
return repository_payload['full_name']
def get_user_display_name(payload):
# type: (Dict[str, Any]) -> str
return payload['actor']['display_name']
def get_user_username(payload):
# type: (Dict[str, Any]) -> str
return payload['actor']['username']
def get_branch_name_for_push_event(payload):
# type: (Dict[str, Any]) -> str
change = payload['push']['changes'][-1]
if change.get('new'):
return change['new']['name']
else:
return change['old']['name']
GET_SINGLE_MESSAGE_BODY_DEPENDING_ON_TYPE_MAPPER = {
'fork': get_fork_body,
'commit_comment': get_commit_comment_body,
'change_commit_status': get_commit_status_changed_body,
'issue_updated': partial(get_issue_action_body, action='updated'),
'issue_created': partial(get_issue_action_body, action='created'),
'issue_commented': get_issue_commented_body,
'pull_request_created': partial(get_pull_request_created_or_updated_body, action='created'),
'pull_request_updated': partial(get_pull_request_created_or_updated_body, action='updated'),
'pull_request_approved': partial(get_pull_request_action_body, action='approved'),
'pull_request_unapproved': partial(get_pull_request_action_body, action='unapproved'),
'pull_request_fulfilled': partial(get_pull_request_action_body, action='merged'),
'pull_request_rejected': partial(get_pull_request_action_body, action='rejected'),
'pull_request_comment_created': get_pull_request_comment_created_action_body,
'pull_request_comment_updated': partial(get_pull_request_deleted_or_updated_comment_action_body, action='updated'),
'pull_request_comment_deleted': partial(get_pull_request_deleted_or_updated_comment_action_body, action='deleted')
}
|
|
from __future__ import division, print_function
import imp
import os
import sys
import shutil
import pickle
import copy
import warnings
import re
from os.path import join
from numpy.distutils import log
from distutils.dep_util import newer
from distutils.sysconfig import get_config_var
from setup_common import *
# Set to True to enable multiple file compilations (experimental)
ENABLE_SEPARATE_COMPILATION = (os.environ.get('NPY_SEPARATE_COMPILATION', "1") != "0")
# Set to True to enable relaxed strides checking. This (mostly) means
# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.
NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0")
# XXX: ugly, we use a class to avoid calling twice some expensive functions in
# config.h/numpyconfig.h. I don't see a better way because distutils force
# config.h generation inside an Extension class, and as such sharing
# configuration informations between extensions is not easy.
# Using a pickled-based memoize does not work because config_cmd is an instance
# method, which cPickle does not like.
#
# Use pickle in all cases, as cPickle is gone in python3 and the difference
# in time is only in build. -- Charles Harris, 2013-03-30
class CallOnceOnly(object):
def __init__(self):
self._check_types = None
self._check_ieee_macros = None
self._check_complex = None
def check_types(self, *a, **kw):
if self._check_types is None:
out = check_types(*a, **kw)
self._check_types = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_types))
return out
def check_ieee_macros(self, *a, **kw):
if self._check_ieee_macros is None:
out = check_ieee_macros(*a, **kw)
self._check_ieee_macros = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_ieee_macros))
return out
def check_complex(self, *a, **kw):
if self._check_complex is None:
out = check_complex(*a, **kw)
self._check_complex = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_complex))
return out
PYTHON_HAS_UNICODE_WIDE = True
def pythonlib_dir():
"""return path where libpython* is."""
if sys.platform == 'win32':
return os.path.join(sys.prefix, "libs")
else:
return get_config_var('LIBDIR')
def is_npy_no_signal():
"""Return True if the NPY_NO_SIGNAL symbol must be defined in configuration
header."""
return sys.platform == 'win32'
def is_npy_no_smp():
"""Return True if the NPY_NO_SMP symbol must be defined in public
header (when SMP support cannot be reliably enabled)."""
# Perhaps a fancier check is in order here.
# so that threads are only enabled if there
# are actually multiple CPUS? -- but
# threaded code can be nice even on a single
# CPU so that long-calculating code doesn't
# block.
return 'NPY_NOSMP' in os.environ
def win32_checks(deflist):
from numpy.distutils.misc_util import get_build_architecture
a = get_build_architecture()
# Distutils hack on AMD64 on windows
print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %
(a, os.name, sys.platform))
if a == 'AMD64':
deflist.append('DISTUTILS_USE_SDK')
# On win32, force long double format string to be 'g', not
# 'Lg', since the MS runtime does not support long double whose
# size is > sizeof(double)
if a == "Intel" or a == "AMD64":
deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')
def check_math_capabilities(config, moredefs, mathlibs):
def check_func(func_name):
return config.check_func(func_name, libraries=mathlibs,
decl=True, call=True)
def check_funcs_once(funcs_name):
decl = dict([(f, True) for f in funcs_name])
st = config.check_funcs_once(funcs_name, libraries=mathlibs,
decl=decl, call=decl)
if st:
moredefs.extend([(fname2def(f), 1) for f in funcs_name])
return st
def check_funcs(funcs_name):
# Use check_funcs_once first, and if it does not work, test func per
# func. Return success only if all the functions are available
if not check_funcs_once(funcs_name):
# Global check failed, check func per func
for f in funcs_name:
if check_func(f):
moredefs.append((fname2def(f), 1))
return 0
else:
return 1
#use_msvc = config.check_decl("_MSC_VER")
if not check_funcs_once(MANDATORY_FUNCS):
raise SystemError("One of the required function to build numpy is not"
" available (the list is %s)." % str(MANDATORY_FUNCS))
# Standard functions which may not be available and for which we have a
# replacement implementation. Note that some of these are C99 functions.
# XXX: hack to circumvent cpp pollution from python: python put its
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
for f in OPTIONAL_STDFUNCS_MAYBE:
if config.check_decl(fname2def(f),
headers=["Python.h", "math.h"]):
OPTIONAL_STDFUNCS.remove(f)
check_funcs(OPTIONAL_STDFUNCS)
for h in OPTIONAL_HEADERS:
if config.check_func("", decl=False, call=False, headers=[h]):
moredefs.append((fname2def(h).replace(".", "_"), 1))
for tup in OPTIONAL_INTRINSICS:
headers = None
if len(tup) == 2:
f, args = tup
else:
f, args, headers = tup[0], tup[1], [tup[2]]
if config.check_func(f, decl=False, call=True, call_args=args,
headers=headers):
moredefs.append((fname2def(f), 1))
for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES:
if config.check_gcc_function_attribute(dec, fn):
moredefs.append((fname2def(fn), 1))
for fn in OPTIONAL_VARIABLE_ATTRIBUTES:
if config.check_gcc_variable_attribute(fn):
m = fn.replace("(", "_").replace(")", "_")
moredefs.append((fname2def(m), 1))
# C99 functions: float and long double versions
check_funcs(C99_FUNCS_SINGLE)
check_funcs(C99_FUNCS_EXTENDED)
def check_complex(config, mathlibs):
priv = []
pub = []
try:
if os.uname()[0] == "Interix":
warnings.warn("Disabling broken complex support. See #1365")
return priv, pub
except:
# os.uname not available on all platforms. blanket except ugly but safe
pass
# Check for complex support
st = config.check_header('complex.h')
if st:
priv.append(('HAVE_COMPLEX_H', 1))
pub.append(('NPY_USE_C99_COMPLEX', 1))
for t in C99_COMPLEX_TYPES:
st = config.check_type(t, headers=["complex.h"])
if st:
pub.append(('NPY_HAVE_%s' % type2def(t), 1))
def check_prec(prec):
flist = [f + prec for f in C99_COMPLEX_FUNCS]
decl = dict([(f, True) for f in flist])
if not config.check_funcs_once(flist, call=decl, decl=decl,
libraries=mathlibs):
for f in flist:
if config.check_func(f, call=True, decl=True,
libraries=mathlibs):
priv.append((fname2def(f), 1))
else:
priv.extend([(fname2def(f), 1) for f in flist])
check_prec('')
check_prec('f')
check_prec('l')
return priv, pub
def check_ieee_macros(config):
priv = []
pub = []
macros = []
def _add_decl(f):
priv.append(fname2def("decl_%s" % f))
pub.append('NPY_%s' % fname2def("decl_%s" % f))
# XXX: hack to circumvent cpp pollution from python: python put its
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
_macros = ["isnan", "isinf", "signbit", "isfinite"]
for f in _macros:
py_symbol = fname2def("decl_%s" % f)
already_declared = config.check_decl(py_symbol,
headers=["Python.h", "math.h"])
if already_declared:
if config.check_macro_true(py_symbol,
headers=["Python.h", "math.h"]):
pub.append('NPY_%s' % fname2def("decl_%s" % f))
else:
macros.append(f)
# Normally, isnan and isinf are macro (C99), but some platforms only have
# func, or both func and macro version. Check for macro only, and define
# replacement ones if not found.
# Note: including Python.h is necessary because it modifies some math.h
# definitions
for f in macros:
st = config.check_decl(f, headers = ["Python.h", "math.h"])
if st:
_add_decl(f)
return priv, pub
def check_types(config_cmd, ext, build_dir):
private_defines = []
public_defines = []
# Expected size (in number of bytes) for each type. This is an
# optimization: those are only hints, and an exhaustive search for the size
# is done if the hints are wrong.
expected = {}
expected['short'] = [2]
expected['int'] = [4]
expected['long'] = [8, 4]
expected['float'] = [4]
expected['double'] = [8]
expected['long double'] = [16, 12, 8]
expected['Py_intptr_t'] = [8, 4]
expected['PY_LONG_LONG'] = [8]
expected['long long'] = [8]
expected['off_t'] = [8, 4]
# Check we have the python header (-dev* packages on Linux)
result = config_cmd.check_header('Python.h')
if not result:
raise SystemError(
"Cannot compile 'Python.h'. Perhaps you need to "\
"install python-dev|python-devel.")
res = config_cmd.check_header("endian.h")
if res:
private_defines.append(('HAVE_ENDIAN_H', 1))
public_defines.append(('NPY_HAVE_ENDIAN_H', 1))
# Check basic types sizes
for type in ('short', 'int', 'long'):
res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers = ["Python.h"])
if res:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type)))
else:
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
for type in ('float', 'double', 'long double'):
already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type),
headers = ["Python.h"])
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
if not already_declared and not type == 'long double':
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
# Compute size of corresponding complex type: used to check that our
# definition is binary compatible with C99 complex type (check done at
# build time in npy_common.h)
complex_def = "struct {%s __x; %s __y;}" % (type, type)
res = config_cmd.check_type_size(complex_def,
expected=[2 * x for x in expected[type]])
if res >= 0:
public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % complex_def)
for type in ('Py_intptr_t', 'off_t'):
res = config_cmd.check_type_size(type, headers=["Python.h"],
library_dirs=[pythonlib_dir()],
expected=expected[type])
if res >= 0:
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
# We check declaration AND type because that's how distutils does it.
if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):
res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'],
library_dirs=[pythonlib_dir()],
expected=expected['PY_LONG_LONG'])
if res >= 0:
private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG')
res = config_cmd.check_type_size('long long',
expected=expected['long long'])
if res >= 0:
#private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % 'long long')
if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):
raise RuntimeError(
"Config wo CHAR_BIT is not supported"\
", please contact the maintainers")
return private_defines, public_defines
def check_mathlib(config_cmd):
# Testing the C math library
mathlibs = []
mathlibs_choices = [[], ['m'], ['cpml']]
mathlib = os.environ.get('MATHLIB')
if mathlib:
mathlibs_choices.insert(0, mathlib.split(','))
for libs in mathlibs_choices:
if config_cmd.check_func("exp", libraries=libs, decl=True, call=True):
mathlibs = libs
break
else:
raise EnvironmentError("math library missing; rerun "
"setup.py after setting the "
"MATHLIB env variable")
return mathlibs
def visibility_define(config):
"""Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty
string)."""
if config.check_compiler_gcc4():
return '__attribute__((visibility("hidden")))'
else:
return ''
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, dot_join
from numpy.distutils.system_info import get_info, default_lib_dirs
config = Configuration('core', parent_package, top_path)
local_dir = config.local_path
codegen_dir = join(local_dir, 'code_generators')
if is_released(config):
warnings.simplefilter('error', MismatchCAPIWarning)
# Check whether we have a mismatch between the set C API VERSION and the
# actual C API VERSION
check_api_version(C_API_VERSION, codegen_dir)
generate_umath_py = join(codegen_dir, 'generate_umath.py')
n = dot_join(config.name, 'generate_umath')
generate_umath = imp.load_module('_'.join(n.split('.')),
open(generate_umath_py, 'U'), generate_umath_py,
('.py', 'U', 1))
header_dir = 'include/numpy' # this is relative to config.path_in_package
cocache = CallOnceOnly()
def generate_config_h(ext, build_dir):
target = join(build_dir, header_dir, 'config.h')
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
if newer(__file__, target):
config_cmd = config.get_config_cmd()
log.info('Generating %s', target)
# Check sizeof
moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)
# Check math library and C99 math funcs availability
mathlibs = check_mathlib(config_cmd)
moredefs.append(('MATHLIB', ','.join(mathlibs)))
check_math_capabilities(config_cmd, moredefs, mathlibs)
moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])
# Signal check
if is_npy_no_signal():
moredefs.append('__NPY_PRIVATE_NO_SIGNAL')
# Windows checks
if sys.platform=='win32' or os.name=='nt':
win32_checks(moredefs)
# C99 restrict keyword
moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict()))
# Inline check
inline = config_cmd.check_inline()
# Check whether we need our own wide character support
if not config_cmd.check_decl('Py_UNICODE_WIDE', headers=['Python.h']):
PYTHON_HAS_UNICODE_WIDE = True
else:
PYTHON_HAS_UNICODE_WIDE = False
if ENABLE_SEPARATE_COMPILATION:
moredefs.append(('ENABLE_SEPARATE_COMPILATION', 1))
if NPY_RELAXED_STRIDES_CHECKING:
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
# Get long double representation
if sys.platform != 'darwin':
rep = check_long_double_representation(config_cmd)
if rep in ['INTEL_EXTENDED_12_BYTES_LE',
'INTEL_EXTENDED_16_BYTES_LE',
'MOTOROLA_EXTENDED_12_BYTES_BE',
'IEEE_QUAD_LE', 'IEEE_QUAD_BE',
'IEEE_DOUBLE_LE', 'IEEE_DOUBLE_BE',
'DOUBLE_DOUBLE_BE', 'DOUBLE_DOUBLE_LE']:
moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
else:
raise ValueError("Unrecognized long double format: %s" % rep)
# Py3K check
if sys.version_info[0] == 3:
moredefs.append(('NPY_PY3K', 1))
# Generate the config.h file from moredefs
target_f = open(target, 'w')
for d in moredefs:
if isinstance(d, str):
target_f.write('#define %s\n' % (d))
else:
target_f.write('#define %s %s\n' % (d[0], d[1]))
# define inline to our keyword, or nothing
target_f.write('#ifndef __cplusplus\n')
if inline == 'inline':
target_f.write('/* #undef inline */\n')
else:
target_f.write('#define inline %s\n' % inline)
target_f.write('#endif\n')
# add the guard to make sure config.h is never included directly,
# but always through npy_config.h
target_f.write("""
#ifndef _NPY_NPY_CONFIG_H_
#error config.h should never be included directly, include npy_config.h instead
#endif
""")
target_f.close()
print('File:', target)
target_f = open(target)
print(target_f.read())
target_f.close()
print('EOF')
else:
mathlibs = []
target_f = open(target)
for line in target_f:
s = '#define MATHLIB'
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
target_f.close()
# Ugly: this can be called within a library and not an extension,
# in which case there is no libraries attributes (and none is
# needed).
if hasattr(ext, 'libraries'):
ext.libraries.extend(mathlibs)
incl_dir = os.path.dirname(target)
if incl_dir not in config.numpy_include_dirs:
config.numpy_include_dirs.append(incl_dir)
return target
def generate_numpyconfig_h(ext, build_dir):
"""Depends on config.h: generate_config_h has to be called before !"""
# put private include directory in build_dir on search path
# allows using code generation in headers headers
config.add_include_dirs(join(build_dir, "src", "private"))
target = join(build_dir, header_dir, '_numpyconfig.h')
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
if newer(__file__, target):
config_cmd = config.get_config_cmd()
log.info('Generating %s', target)
# Check sizeof
ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)
if is_npy_no_signal():
moredefs.append(('NPY_NO_SIGNAL', 1))
if is_npy_no_smp():
moredefs.append(('NPY_NO_SMP', 1))
else:
moredefs.append(('NPY_NO_SMP', 0))
mathlibs = check_mathlib(config_cmd)
moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])
if ENABLE_SEPARATE_COMPILATION:
moredefs.append(('NPY_ENABLE_SEPARATE_COMPILATION', 1))
if NPY_RELAXED_STRIDES_CHECKING:
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
# Check wether we can use inttypes (C99) formats
if config_cmd.check_decl('PRIdPTR', headers = ['inttypes.h']):
moredefs.append(('NPY_USE_C99_FORMATS', 1))
# visibility check
hidden_visibility = visibility_define(config_cmd)
moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))
# Add the C API/ABI versions
moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))
# Add moredefs to header
target_f = open(target, 'w')
for d in moredefs:
if isinstance(d, str):
target_f.write('#define %s\n' % (d))
else:
target_f.write('#define %s %s\n' % (d[0], d[1]))
# Define __STDC_FORMAT_MACROS
target_f.write("""
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS 1
#endif
""")
target_f.close()
# Dump the numpyconfig.h header to stdout
print('File: %s' % target)
target_f = open(target)
print(target_f.read())
target_f.close()
print('EOF')
config.add_data_files((header_dir, target))
return target
def generate_api_func(module_name):
def generate_api(ext, build_dir):
script = join(codegen_dir, module_name + '.py')
sys.path.insert(0, codegen_dir)
try:
m = __import__(module_name)
log.info('executing %s', script)
h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))
finally:
del sys.path[0]
config.add_data_files((header_dir, h_file),
(header_dir, doc_file))
return (h_file,)
return generate_api
generate_numpy_api = generate_api_func('generate_numpy_api')
generate_ufunc_api = generate_api_func('generate_ufunc_api')
config.add_include_dirs(join(local_dir, "src", "private"))
config.add_include_dirs(join(local_dir, "src"))
config.add_include_dirs(join(local_dir))
config.add_data_files('include/numpy/*.h')
config.add_include_dirs(join('src', 'npymath'))
config.add_include_dirs(join('src', 'multiarray'))
config.add_include_dirs(join('src', 'umath'))
config.add_include_dirs(join('src', 'npysort'))
config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])
config.add_define_macros([("_FILE_OFFSET_BITS", "64")])
config.add_define_macros([('_LARGEFILE_SOURCE', '1')])
config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])
config.numpy_include_dirs.extend(config.paths('include'))
deps = [join('src', 'npymath', '_signbit.c'),
join('include', 'numpy', '*object.h'),
'include/numpy/fenv/fenv.c',
'include/numpy/fenv/fenv.h',
join(codegen_dir, 'genapi.py'),
]
# Don't install fenv unless we need them.
if sys.platform == 'cygwin':
config.add_data_dir('include/numpy/fenv')
#######################################################################
# dummy module #
#######################################################################
# npymath needs the config.h and numpyconfig.h files to be generated, but
# build_clib cannot handle generate_config_h and generate_numpyconfig_h
# (don't ask). Because clib are generated before extensions, we have to
# explicitly add an extension which has generate_config_h and
# generate_numpyconfig_h as sources *before* adding npymath.
config.add_extension('_dummy',
sources = [join('src', 'dummymodule.c'),
generate_config_h,
generate_numpyconfig_h,
generate_numpy_api]
)
#######################################################################
# npymath library #
#######################################################################
subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])
def get_mathlib_info(*args):
# Another ugly hack: the mathlib info is known once build_src is run,
# but we cannot use add_installed_pkg_config here either, so we only
# update the substition dictionary during npymath build
config_cmd = config.get_config_cmd()
# Check that the toolchain works, to fail early if it doesn't
# (avoid late errors with MATHLIB which are confusing if the
# compiler does not work).
st = config_cmd.try_link('int main(void) { return 0;}')
if not st:
raise RuntimeError("Broken toolchain: cannot link a simple C program")
mlibs = check_mathlib(config_cmd)
posix_mlib = ' '.join(['-l%s' % l for l in mlibs])
msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])
subst_dict["posix_mathlib"] = posix_mlib
subst_dict["msvc_mathlib"] = msvc_mlib
npymath_sources = [join('src', 'npymath', 'npy_math.c.src'),
join('src', 'npymath', 'ieee754.c.src'),
join('src', 'npymath', 'npy_math_complex.c.src'),
join('src', 'npymath', 'halffloat.c')]
config.add_installed_library('npymath',
sources=npymath_sources + [get_mathlib_info],
install_dir='lib')
config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config",
subst_dict)
config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config",
subst_dict)
#######################################################################
# npysort library #
#######################################################################
# This library is created for the build but it is not installed
npysort_sources=[join('src', 'npysort', 'quicksort.c.src'),
join('src', 'npysort', 'mergesort.c.src'),
join('src', 'npysort', 'heapsort.c.src'),
join('src', 'private', 'npy_partition.h.src'),
join('src', 'npysort', 'selection.c.src'),
join('src', 'private', 'npy_binsearch.h.src'),
join('src', 'npysort', 'binsearch.c.src'),
]
config.add_library('npysort',
sources=npysort_sources,
include_dirs=[])
#######################################################################
# multiarray module #
#######################################################################
# Multiarray version: this function is needed to build foo.c from foo.c.src
# when foo.c is included in another file and as such not in the src
# argument of build_ext command
def generate_multiarray_templated_sources(ext, build_dir):
from numpy.distutils.misc_util import get_cmd
subpath = join('src', 'multiarray')
sources = [join(local_dir, subpath, 'scalartypes.c.src'),
join(local_dir, subpath, 'arraytypes.c.src'),
join(local_dir, subpath, 'nditer_templ.c.src'),
join(local_dir, subpath, 'lowlevel_strided_loops.c.src'),
join(local_dir, subpath, 'einsum.c.src'),
join(local_dir, 'src', 'private', 'templ_common.h.src')]
# numpy.distutils generate .c from .c.src in weird directories, we have
# to add them there as they depend on the build_dir
config.add_include_dirs(join(build_dir, subpath))
cmd = get_cmd('build_src')
cmd.ensure_finalized()
cmd.template_sources(sources, ext)
multiarray_deps = [
join('src', 'multiarray', 'arrayobject.h'),
join('src', 'multiarray', 'arraytypes.h'),
join('src', 'multiarray', 'array_assign.h'),
join('src', 'multiarray', 'buffer.h'),
join('src', 'multiarray', 'calculation.h'),
join('src', 'multiarray', 'common.h'),
join('src', 'multiarray', 'convert_datatype.h'),
join('src', 'multiarray', 'convert.h'),
join('src', 'multiarray', 'conversion_utils.h'),
join('src', 'multiarray', 'ctors.h'),
join('src', 'multiarray', 'descriptor.h'),
join('src', 'multiarray', 'getset.h'),
join('src', 'multiarray', 'hashdescr.h'),
join('src', 'multiarray', 'iterators.h'),
join('src', 'multiarray', 'mapping.h'),
join('src', 'multiarray', 'methods.h'),
join('src', 'multiarray', 'multiarraymodule.h'),
join('src', 'multiarray', 'nditer_impl.h'),
join('src', 'multiarray', 'numpymemoryview.h'),
join('src', 'multiarray', 'number.h'),
join('src', 'multiarray', 'numpyos.h'),
join('src', 'multiarray', 'refcount.h'),
join('src', 'multiarray', 'scalartypes.h'),
join('src', 'multiarray', 'sequence.h'),
join('src', 'multiarray', 'shape.h'),
join('src', 'multiarray', 'ucsnarrow.h'),
join('src', 'multiarray', 'usertypes.h'),
join('src', 'multiarray', 'vdot.h'),
join('src', 'private', 'templ_common.h.src'),
join('src', 'private', 'lowlevel_strided_loops.h'),
join('include', 'numpy', 'arrayobject.h'),
join('include', 'numpy', '_neighborhood_iterator_imp.h'),
join('include', 'numpy', 'npy_endian.h'),
join('include', 'numpy', 'arrayscalars.h'),
join('include', 'numpy', 'noprefix.h'),
join('include', 'numpy', 'npy_interrupt.h'),
join('include', 'numpy', 'npy_3kcompat.h'),
join('include', 'numpy', 'npy_math.h'),
join('include', 'numpy', 'halffloat.h'),
join('include', 'numpy', 'npy_common.h'),
join('include', 'numpy', 'npy_os.h'),
join('include', 'numpy', 'utils.h'),
join('include', 'numpy', 'ndarrayobject.h'),
join('include', 'numpy', 'npy_cpu.h'),
join('include', 'numpy', 'numpyconfig.h'),
join('include', 'numpy', 'ndarraytypes.h'),
join('include', 'numpy', 'npy_1_7_deprecated_api.h'),
join('include', 'numpy', '_numpyconfig.h.in'),
# add library sources as distuils does not consider libraries
# dependencies
] + npysort_sources + npymath_sources
multiarray_src = [
join('src', 'multiarray', 'alloc.c'),
join('src', 'multiarray', 'arrayobject.c'),
join('src', 'multiarray', 'arraytypes.c.src'),
join('src', 'multiarray', 'array_assign.c'),
join('src', 'multiarray', 'array_assign_scalar.c'),
join('src', 'multiarray', 'array_assign_array.c'),
join('src', 'multiarray', 'buffer.c'),
join('src', 'multiarray', 'calculation.c'),
join('src', 'multiarray', 'common.c'),
join('src', 'multiarray', 'convert.c'),
join('src', 'multiarray', 'convert_datatype.c'),
join('src', 'multiarray', 'conversion_utils.c'),
join('src', 'multiarray', 'ctors.c'),
join('src', 'multiarray', 'datetime.c'),
join('src', 'multiarray', 'datetime_strings.c'),
join('src', 'multiarray', 'datetime_busday.c'),
join('src', 'multiarray', 'datetime_busdaycal.c'),
join('src', 'multiarray', 'descriptor.c'),
join('src', 'multiarray', 'dtype_transfer.c'),
join('src', 'multiarray', 'einsum.c.src'),
join('src', 'multiarray', 'flagsobject.c'),
join('src', 'multiarray', 'getset.c'),
join('src', 'multiarray', 'hashdescr.c'),
join('src', 'multiarray', 'item_selection.c'),
join('src', 'multiarray', 'iterators.c'),
join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),
join('src', 'multiarray', 'mapping.c'),
join('src', 'multiarray', 'methods.c'),
join('src', 'multiarray', 'multiarraymodule.c'),
join('src', 'multiarray', 'nditer_templ.c.src'),
join('src', 'multiarray', 'nditer_api.c'),
join('src', 'multiarray', 'nditer_constr.c'),
join('src', 'multiarray', 'nditer_pywrap.c'),
join('src', 'multiarray', 'number.c'),
join('src', 'multiarray', 'numpymemoryview.c'),
join('src', 'multiarray', 'numpyos.c'),
join('src', 'multiarray', 'refcount.c'),
join('src', 'multiarray', 'sequence.c'),
join('src', 'multiarray', 'shape.c'),
join('src', 'multiarray', 'scalarapi.c'),
join('src', 'multiarray', 'scalartypes.c.src'),
join('src', 'multiarray', 'usertypes.c'),
join('src', 'multiarray', 'ucsnarrow.c'),
join('src', 'multiarray', 'vdot.c'),
join('src', 'private', 'templ_common.h.src'),
]
blas_info = get_info('blas_opt', 0)
if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []):
extra_info = blas_info
multiarray_src.append(join('src', 'multiarray', 'cblasfuncs.c'))
else:
extra_info = {}
if not ENABLE_SEPARATE_COMPILATION:
multiarray_deps.extend(multiarray_src)
multiarray_src = [join('src', 'multiarray', 'multiarraymodule_onefile.c')]
multiarray_src.append(generate_multiarray_templated_sources)
config.add_extension('multiarray',
sources=multiarray_src +
[generate_config_h,
generate_numpyconfig_h,
generate_numpy_api,
join(codegen_dir, 'generate_numpy_api.py'),
join('*.py')],
depends=deps + multiarray_deps,
libraries=['npymath', 'npysort'],
extra_info=extra_info)
#######################################################################
# umath module #
#######################################################################
# umath version: this function is needed to build foo.c from foo.c.src
# when foo.c is included in another file and as such not in the src
# argument of build_ext command
def generate_umath_templated_sources(ext, build_dir):
from numpy.distutils.misc_util import get_cmd
subpath = join('src', 'umath')
sources = [
join(local_dir, subpath, 'loops.h.src'),
join(local_dir, subpath, 'loops.c.src'),
join(local_dir, subpath, 'scalarmath.c.src'),
join(local_dir, subpath, 'simd.inc.src')]
# numpy.distutils generate .c from .c.src in weird directories, we have
# to add them there as they depend on the build_dir
config.add_include_dirs(join(build_dir, subpath))
cmd = get_cmd('build_src')
cmd.ensure_finalized()
cmd.template_sources(sources, ext)
def generate_umath_c(ext, build_dir):
target = join(build_dir, header_dir, '__umath_generated.c')
dir = os.path.dirname(target)
if not os.path.exists(dir):
os.makedirs(dir)
script = generate_umath_py
if newer(script, target):
f = open(target, 'w')
f.write(generate_umath.make_code(generate_umath.defdict,
generate_umath.__file__))
f.close()
return []
umath_src = [
join('src', 'umath', 'umathmodule.c'),
join('src', 'umath', 'reduction.c'),
join('src', 'umath', 'funcs.inc.src'),
join('src', 'umath', 'simd.inc.src'),
join('src', 'umath', 'loops.h.src'),
join('src', 'umath', 'loops.c.src'),
join('src', 'umath', 'ufunc_object.c'),
join('src', 'umath', 'scalarmath.c.src'),
join('src', 'umath', 'ufunc_type_resolution.c')]
umath_deps = [
generate_umath_py,
join('src', 'multiarray', 'common.h'),
join('src', 'private', 'templ_common.h.src'),
join('src', 'umath', 'simd.inc.src'),
join(codegen_dir, 'generate_ufunc_api.py'),
join('src', 'private', 'ufunc_override.h')] + npymath_sources
if not ENABLE_SEPARATE_COMPILATION:
umath_deps.extend(umath_src)
umath_src = [join('src', 'umath', 'umathmodule_onefile.c')]
umath_src.append(generate_umath_templated_sources)
umath_src.append(join('src', 'umath', 'funcs.inc.src'))
umath_src.append(join('src', 'umath', 'simd.inc.src'))
config.add_extension('umath',
sources = umath_src +
[generate_config_h,
generate_numpyconfig_h,
generate_umath_c,
generate_ufunc_api],
depends = deps + umath_deps,
libraries = ['npymath'],
)
#######################################################################
# umath_tests module #
#######################################################################
config.add_extension('umath_tests',
sources = [join('src', 'umath', 'umath_tests.c.src')])
#######################################################################
# custom rational dtype module #
#######################################################################
config.add_extension('test_rational',
sources = [join('src', 'umath', 'test_rational.c.src')])
#######################################################################
# struct_ufunc_test module #
#######################################################################
config.add_extension('struct_ufunc_test',
sources = [join('src', 'umath', 'struct_ufunc_test.c.src')])
#######################################################################
# multiarray_tests module #
#######################################################################
config.add_extension('multiarray_tests',
sources = [join('src', 'multiarray', 'multiarray_tests.c.src')])
#######################################################################
# operand_flag_tests module #
#######################################################################
config.add_extension('operand_flag_tests',
sources = [join('src', 'umath', 'operand_flag_tests.c.src')])
config.add_data_dir('tests')
config.add_data_dir('tests/data')
config.make_svn_version_py()
return config
if __name__=='__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
|
|
#!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import optparse
import os
import sys
import webbrowser
from profile_chrome import chrome_controller
from profile_chrome import perf_controller
from profile_chrome import profiler
from profile_chrome import systrace_controller
from profile_chrome import ui
from pylib import android_commands
from pylib.device import device_utils
_DEFAULT_CHROME_CATEGORIES = '_DEFAULT_CHROME_CATEGORIES'
def _ComputeChromeCategories(options):
categories = []
if options.trace_frame_viewer:
categories.append('disabled-by-default-cc.debug')
if options.trace_ubercompositor:
categories.append('disabled-by-default-cc.debug*')
if options.trace_gpu:
categories.append('disabled-by-default-gpu.debug*')
if options.trace_flow:
categories.append('disabled-by-default-toplevel.flow')
if options.trace_memory:
categories.append('disabled-by-default-memory')
if options.chrome_categories:
categories += options.chrome_categories.split(',')
return categories
def _ComputeSystraceCategories(options):
if not options.systrace_categories:
return []
return options.systrace_categories.split(',')
def _ComputePerfCategories(options):
if not perf_controller.PerfProfilerController.IsSupported():
return []
if not options.perf_categories:
return []
return options.perf_categories.split(',')
def _OptionalValueCallback(default_value):
def callback(option, _, __, parser):
value = default_value
if parser.rargs and not parser.rargs[0].startswith('-'):
value = parser.rargs.pop(0)
setattr(parser.values, option.dest, value)
return callback
def _CreateOptionParser():
parser = optparse.OptionParser(description='Record about://tracing profiles '
'from Android browsers. See http://dev.'
'chromium.org/developers/how-tos/trace-event-'
'profiling-tool for detailed instructions for '
'profiling.')
timed_options = optparse.OptionGroup(parser, 'Timed tracing')
timed_options.add_option('-t', '--time', help='Profile for N seconds and '
'download the resulting trace.', metavar='N',
type='float')
parser.add_option_group(timed_options)
cont_options = optparse.OptionGroup(parser, 'Continuous tracing')
cont_options.add_option('--continuous', help='Profile continuously until '
'stopped.', action='store_true')
cont_options.add_option('--ring-buffer', help='Use the trace buffer as a '
'ring buffer and save its contents when stopping '
'instead of appending events into one long trace.',
action='store_true')
parser.add_option_group(cont_options)
chrome_opts = optparse.OptionGroup(parser, 'Chrome tracing options')
chrome_opts.add_option('-c', '--categories', help='Select Chrome tracing '
'categories with comma-delimited wildcards, '
'e.g., "*", "cat1*,-cat1a". Omit this option to trace '
'Chrome\'s default categories. Chrome tracing can be '
'disabled with "--categories=\'\'". Use "list" to '
'see the available categories.',
metavar='CHROME_CATEGORIES', dest='chrome_categories',
default=_DEFAULT_CHROME_CATEGORIES)
chrome_opts.add_option('--trace-cc',
help='Deprecated, use --trace-frame-viewer.',
action='store_true')
chrome_opts.add_option('--trace-frame-viewer',
help='Enable enough trace categories for '
'compositor frame viewing.', action='store_true')
chrome_opts.add_option('--trace-ubercompositor',
help='Enable enough trace categories for '
'ubercompositor frame data.', action='store_true')
chrome_opts.add_option('--trace-gpu', help='Enable extra trace categories '
'for GPU data.', action='store_true')
chrome_opts.add_option('--trace-flow', help='Enable extra trace categories '
'for IPC message flows.', action='store_true')
chrome_opts.add_option('--trace-memory', help='Enable extra trace categories '
'for memory profile. (tcmalloc required)',
action='store_true')
parser.add_option_group(chrome_opts)
systrace_opts = optparse.OptionGroup(parser, 'Systrace tracing options')
systrace_opts.add_option('-s', '--systrace', help='Capture a systrace with '
'the chosen comma-delimited systrace categories. You '
'can also capture a combined Chrome + systrace by '
'enable both types of categories. Use "list" to see '
'the available categories. Systrace is disabled by '
'default.', metavar='SYS_CATEGORIES',
dest='systrace_categories', default='')
parser.add_option_group(systrace_opts)
if perf_controller.PerfProfilerController.IsSupported():
perf_opts = optparse.OptionGroup(parser, 'Perf profiling options')
perf_opts.add_option('-p', '--perf', help='Capture a perf profile with '
'the chosen comma-delimited event categories. '
'Samples CPU cycles by default. Use "list" to see '
'the available sample types.', action='callback',
default='', callback=_OptionalValueCallback('cycles'),
metavar='PERF_CATEGORIES', dest='perf_categories')
parser.add_option_group(perf_opts)
output_options = optparse.OptionGroup(parser, 'Output options')
output_options.add_option('-o', '--output', help='Save trace output to file.')
output_options.add_option('--json', help='Save trace as raw JSON instead of '
'HTML.', action='store_true')
output_options.add_option('--view', help='Open resulting trace file in a '
'browser.', action='store_true')
parser.add_option_group(output_options)
browsers = sorted(profiler.GetSupportedBrowsers().keys())
parser.add_option('-b', '--browser', help='Select among installed browsers. '
'One of ' + ', '.join(browsers) + ', "stable" is used by '
'default.', type='choice', choices=browsers,
default='stable')
parser.add_option('-v', '--verbose', help='Verbose logging.',
action='store_true')
parser.add_option('-z', '--compress', help='Compress the resulting trace '
'with gzip. ', action='store_true')
return parser
def main():
parser = _CreateOptionParser()
options, _args = parser.parse_args()
if options.trace_cc:
parser.parse_error("""--trace-cc is deprecated.
For basic jank busting uses, use --trace-frame-viewer
For detailed study of ubercompositor, pass --trace-ubercompositor.
When in doubt, just try out --trace-frame-viewer.
""")
if options.verbose:
logging.getLogger().setLevel(logging.DEBUG)
devices = android_commands.GetAttachedDevices()
if len(devices) != 1:
parser.error('Exactly 1 device must be attached.')
device = device_utils.DeviceUtils(devices[0])
package_info = profiler.GetSupportedBrowsers()[options.browser]
if options.chrome_categories in ['list', 'help']:
ui.PrintMessage('Collecting record categories list...', eol='')
record_categories = []
disabled_by_default_categories = []
record_categories, disabled_by_default_categories = \
chrome_controller.ChromeTracingController.GetCategories(
device, package_info)
ui.PrintMessage('done')
ui.PrintMessage('Record Categories:')
ui.PrintMessage('\n'.join('\t%s' % item \
for item in sorted(record_categories)))
ui.PrintMessage('\nDisabled by Default Categories:')
ui.PrintMessage('\n'.join('\t%s' % item \
for item in sorted(disabled_by_default_categories)))
return 0
if options.systrace_categories in ['list', 'help']:
ui.PrintMessage('\n'.join(
systrace_controller.SystraceController.GetCategories(device)))
return 0
if (perf_controller.PerfProfilerController.IsSupported() and
options.perf_categories in ['list', 'help']):
ui.PrintMessage('\n'.join(
perf_controller.PerfProfilerController.GetCategories(device)))
return 0
if not options.time and not options.continuous:
ui.PrintMessage('Time interval or continuous tracing should be specified.')
return 1
chrome_categories = _ComputeChromeCategories(options)
systrace_categories = _ComputeSystraceCategories(options)
perf_categories = _ComputePerfCategories(options)
if chrome_categories and 'webview' in systrace_categories:
logging.warning('Using the "webview" category in systrace together with '
'Chrome tracing results in duplicate trace events.')
enabled_controllers = []
if chrome_categories:
enabled_controllers.append(
chrome_controller.ChromeTracingController(device,
package_info,
chrome_categories,
options.ring_buffer,
options.trace_memory))
if systrace_categories:
enabled_controllers.append(
systrace_controller.SystraceController(device,
systrace_categories,
options.ring_buffer))
if perf_categories:
enabled_controllers.append(
perf_controller.PerfProfilerController(device,
perf_categories))
if not enabled_controllers:
ui.PrintMessage('No trace categories enabled.')
return 1
if options.output:
options.output = os.path.expanduser(options.output)
result = profiler.CaptureProfile(
enabled_controllers,
options.time if not options.continuous else 0,
output=options.output,
compress=options.compress,
write_json=options.json)
if options.view:
if sys.platform == 'darwin':
os.system('/usr/bin/open %s' % os.path.abspath(result))
else:
webbrowser.open(result)
|
|
from __future__ import with_statement
from sqlalchemy import Column
from sqlalchemy import event
from sqlalchemy import exc as sa_exc
from sqlalchemy import func
from sqlalchemy import inspect
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy.orm import attributes
from sqlalchemy.orm import create_session
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.orm import mapper
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm import session as _session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.util import identity_key
from sqlalchemy.sql import elements
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import assert_warnings
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_warnings
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_not_
from sqlalchemy.testing import is_true
from sqlalchemy.testing import mock
from sqlalchemy.testing.util import gc_collect
from test.orm._fixtures import FixtureTest
class SessionTransactionTest(fixtures.RemovesEvents, FixtureTest):
run_inserts = None
__backend__ = True
def test_no_close_transaction_on_flush(self):
User, users = self.classes.User, self.tables.users
c = testing.db.connect()
try:
mapper(User, users)
s = create_session(bind=c)
s.begin()
tran = s.transaction
s.add(User(name="first"))
s.flush()
c.exec_driver_sql("select * from users")
u = User(name="two")
s.add(u)
s.flush()
u = User(name="third")
s.add(u)
s.flush()
assert s.transaction is tran
tran.close()
finally:
c.close()
@engines.close_open_connections
def test_subtransaction_on_external(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
conn = testing.db.connect()
trans = conn.begin()
sess = create_session(bind=conn, autocommit=False, autoflush=True)
sess.begin(subtransactions=True)
u = User(name="ed")
sess.add(u)
sess.flush()
sess.commit() # commit does nothing
trans.rollback() # rolls back
assert len(sess.query(User).all()) == 0
sess.close()
@testing.requires.savepoints
@engines.close_open_connections
def test_external_nested_transaction(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
try:
conn = testing.db.connect()
trans = conn.begin()
sess = create_session(bind=conn, autocommit=False, autoflush=True)
u1 = User(name="u1")
sess.add(u1)
sess.flush()
sess.begin_nested()
u2 = User(name="u2")
sess.add(u2)
sess.flush()
sess.rollback()
trans.commit()
assert len(sess.query(User).all()) == 1
except Exception:
conn.close()
raise
@testing.requires.savepoints
def test_nested_accounting_new_items_removed(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
session = create_session(bind=testing.db)
session.begin()
session.begin_nested()
u1 = User(name="u1")
session.add(u1)
session.commit()
assert u1 in session
session.rollback()
assert u1 not in session
@testing.requires.savepoints
def test_nested_accounting_deleted_items_restored(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
session = create_session(bind=testing.db)
session.begin()
u1 = User(name="u1")
session.add(u1)
session.commit()
session.begin()
u1 = session.query(User).first()
session.begin_nested()
session.delete(u1)
session.commit()
assert u1 not in session
session.rollback()
assert u1 in session
@testing.requires.savepoints
def test_heavy_nesting(self):
users = self.tables.users
session = create_session(bind=testing.db)
session.begin()
session.connection().execute(users.insert().values(name="user1"))
session.begin(subtransactions=True)
session.begin_nested()
session.connection().execute(users.insert().values(name="user2"))
assert (
session.connection()
.exec_driver_sql("select count(1) from users")
.scalar()
== 2
)
session.rollback()
assert (
session.connection()
.exec_driver_sql("select count(1) from users")
.scalar()
== 1
)
session.connection().execute(users.insert().values(name="user3"))
session.commit()
assert (
session.connection()
.exec_driver_sql("select count(1) from users")
.scalar()
== 2
)
@testing.requires.savepoints
def test_dirty_state_transferred_deep_nesting(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
s = Session(testing.db)
u1 = User(name="u1")
s.add(u1)
s.commit()
nt1 = s.begin_nested()
nt2 = s.begin_nested()
u1.name = "u2"
assert attributes.instance_state(u1) not in nt2._dirty
assert attributes.instance_state(u1) not in nt1._dirty
s.flush()
assert attributes.instance_state(u1) in nt2._dirty
assert attributes.instance_state(u1) not in nt1._dirty
s.commit()
assert attributes.instance_state(u1) in nt2._dirty
assert attributes.instance_state(u1) in nt1._dirty
s.rollback()
assert attributes.instance_state(u1).expired
eq_(u1.name, "u1")
@testing.requires.independent_connections
def test_transactions_isolated(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
s1 = create_session(bind=testing.db, autocommit=False)
s2 = create_session(bind=testing.db, autocommit=False)
u1 = User(name="u1")
s1.add(u1)
s1.flush()
assert s2.query(User).all() == []
@testing.requires.two_phase_transactions
def test_twophase(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
# TODO: mock up a failure condition here
# to ensure a rollback succeeds
mapper(User, users)
mapper(Address, addresses)
engine2 = engines.testing_engine()
sess = create_session(autocommit=True, autoflush=False, twophase=True)
sess.bind_mapper(User, testing.db)
sess.bind_mapper(Address, engine2)
sess.begin()
u1 = User(name="u1")
a1 = Address(email_address="u1@e")
sess.add_all((u1, a1))
sess.commit()
sess.close()
engine2.dispose()
eq_(select([func.count("*")]).select_from(users).scalar(), 1)
eq_(select([func.count("*")]).select_from(addresses).scalar(), 1)
@testing.requires.independent_connections
def test_invalidate(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = Session()
u = User(name="u1")
sess.add(u)
sess.flush()
c1 = sess.connection(User)
sess.invalidate()
assert c1.invalidated
eq_(sess.query(User).all(), [])
c2 = sess.connection(User)
assert not c2.invalidated
def test_subtransaction_on_noautocommit(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = create_session(autocommit=False, autoflush=True)
sess.begin(subtransactions=True)
u = User(name="u1")
sess.add(u)
sess.flush()
sess.commit() # commit does nothing
sess.rollback() # rolls back
assert len(sess.query(User).all()) == 0
sess.close()
@testing.requires.savepoints
def test_nested_transaction(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = create_session()
sess.begin()
u = User(name="u1")
sess.add(u)
sess.flush()
sess.begin_nested() # nested transaction
u2 = User(name="u2")
sess.add(u2)
sess.flush()
sess.rollback()
sess.commit()
assert len(sess.query(User).all()) == 1
sess.close()
@testing.requires.savepoints
def test_nested_autotrans(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = create_session(autocommit=False)
u = User(name="u1")
sess.add(u)
sess.flush()
sess.begin_nested() # nested transaction
u2 = User(name="u2")
sess.add(u2)
sess.flush()
sess.rollback()
sess.commit()
assert len(sess.query(User).all()) == 1
sess.close()
@testing.requires.savepoints
def test_nested_transaction_connection_add(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = create_session(autocommit=True)
sess.begin()
sess.begin_nested()
u1 = User(name="u1")
sess.add(u1)
sess.flush()
sess.rollback()
u2 = User(name="u2")
sess.add(u2)
sess.commit()
eq_(set(sess.query(User).all()), set([u2]))
sess.begin()
sess.begin_nested()
u3 = User(name="u3")
sess.add(u3)
sess.commit() # commit the nested transaction
sess.rollback()
eq_(set(sess.query(User).all()), set([u2]))
sess.close()
@testing.requires.savepoints
def test_mixed_transaction_control(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = create_session(autocommit=True)
sess.begin()
sess.begin_nested()
transaction = sess.begin(subtransactions=True)
sess.add(User(name="u1"))
transaction.commit()
sess.commit()
sess.commit()
sess.close()
eq_(len(sess.query(User).all()), 1)
t1 = sess.begin()
t2 = sess.begin_nested()
sess.add(User(name="u2"))
t2.commit()
assert sess.transaction is t1
sess.close()
@testing.requires.savepoints
def test_mixed_transaction_close(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = create_session(autocommit=False)
sess.begin_nested()
sess.add(User(name="u1"))
sess.flush()
sess.close()
sess.add(User(name="u2"))
sess.commit()
sess.close()
eq_(len(sess.query(User).all()), 1)
def test_begin_fails_connection_is_closed(self):
eng = engines.testing_engine()
state = []
@event.listens_for(eng, "begin")
def do_begin(conn):
state.append((conn, conn.connection))
raise Exception("failure")
s1 = Session(eng)
assert_raises_message(Exception, "failure", s1.execute, "select 1")
conn, fairy = state[0]
assert not fairy.is_valid
assert conn.closed
assert not conn.invalidated
s1.close()
# close does not occur because references were not saved, however
# the underlying DBAPI connection was closed
assert not fairy.is_valid
assert conn.closed
assert not conn.invalidated
def test_begin_savepoint_fails_connection_is_not_closed(self):
eng = engines.testing_engine()
state = []
@event.listens_for(eng, "savepoint")
def do_begin(conn, name):
state.append((conn, conn.connection))
raise Exception("failure")
s1 = Session(eng)
s1.begin_nested()
assert_raises_message(Exception, "failure", s1.execute, "select 1")
conn, fairy = state[0]
assert fairy.is_valid
assert not conn.closed
assert not conn.invalidated
s1.close()
assert conn.closed
assert not fairy.is_valid
def test_continue_flushing_on_commit(self):
"""test that post-flush actions get flushed also if
we're in commit()"""
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
to_flush = [User(name="ed"), User(name="jack"), User(name="wendy")]
@event.listens_for(sess, "after_flush_postexec")
def add_another_user(session, ctx):
if to_flush:
session.add(to_flush.pop(0))
x = [1]
@event.listens_for(sess, "after_commit") # noqa
def add_another_user(session):
x[0] += 1
sess.add(to_flush.pop())
sess.commit()
eq_(x, [2])
eq_(sess.scalar(select([func.count(users.c.id)])), 3)
def test_continue_flushing_guard(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
@event.listens_for(sess, "after_flush_postexec")
def add_another_user(session, ctx):
session.add(User(name="x"))
sess.add(User(name="x"))
assert_raises_message(
orm_exc.FlushError,
"Over 100 subsequent flushes have occurred",
sess.commit,
)
def test_error_on_using_inactive_session_commands(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = create_session(autocommit=True)
sess.begin()
sess.begin(subtransactions=True)
sess.add(User(name="u1"))
sess.flush()
sess.rollback()
assert_raises_message(
sa_exc.InvalidRequestError,
"This session is in 'inactive' state, due to the SQL transaction "
"being rolled back; no further SQL can be emitted within this "
"transaction.",
sess.begin,
subtransactions=True,
)
sess.close()
def test_no_sql_during_commit(self):
sess = create_session(bind=testing.db, autocommit=False)
@event.listens_for(sess, "after_commit")
def go(session):
session.execute("select 1")
assert_raises_message(
sa_exc.InvalidRequestError,
"This session is in 'committed' state; no further "
"SQL can be emitted within this transaction.",
sess.commit,
)
def test_no_sql_during_prepare(self):
sess = create_session(bind=testing.db, autocommit=False, twophase=True)
sess.prepare()
assert_raises_message(
sa_exc.InvalidRequestError,
"This session is in 'prepared' state; no further "
"SQL can be emitted within this transaction.",
sess.execute,
"select 1",
)
def test_no_sql_during_rollback(self):
sess = create_session(bind=testing.db, autocommit=False)
sess.connection()
@event.listens_for(sess, "after_rollback")
def go(session):
session.execute("select 1")
assert_raises_message(
sa_exc.InvalidRequestError,
"This session is in 'inactive' state, due to the SQL transaction "
"being rolled back; no further SQL can be emitted within this "
"transaction.",
sess.rollback,
)
@testing.emits_warning(".*previous exception")
def test_failed_rollback_deactivates_transaction(self):
# test #4050
users, User = self.tables.users, self.classes.User
mapper(User, users)
session = Session(bind=testing.db)
rollback_error = testing.db.dialect.dbapi.InterfaceError(
"Can't roll back to savepoint"
)
def prevent_savepoint_rollback(
cursor, statement, parameters, context=None
):
if (
context is not None
and context.compiled
and isinstance(
context.compiled.statement,
elements.RollbackToSavepointClause,
)
):
raise rollback_error
self.event_listen(
testing.db.dialect, "do_execute", prevent_savepoint_rollback
)
with session.transaction:
session.add(User(id=1, name="x"))
session.begin_nested()
# raises IntegrityError on flush
session.add(User(id=1, name="x"))
assert_raises_message(
sa_exc.InterfaceError,
"Can't roll back to savepoint",
session.commit,
)
# rollback succeeds, because the Session is deactivated
eq_(session.transaction._state, _session.DEACTIVE)
session.rollback()
# back to normal
eq_(session.transaction._state, _session.ACTIVE)
trans = session.transaction
# leave the outermost trans
session.rollback()
# trans is now closed
eq_(trans._state, _session.CLOSED)
# outermost transaction is new
is_not_(session.transaction, trans)
# outermost is active
eq_(session.transaction._state, _session.ACTIVE)
@testing.emits_warning(".*previous exception")
def test_failed_rollback_deactivates_transaction_ctx_integration(self):
# test #4050 in the same context as that of oslo.db
users, User = self.tables.users, self.classes.User
mapper(User, users)
session = Session(bind=testing.db, autocommit=True)
evented_exceptions = []
caught_exceptions = []
def canary(context):
evented_exceptions.append(context.original_exception)
rollback_error = testing.db.dialect.dbapi.InterfaceError(
"Can't roll back to savepoint"
)
def prevent_savepoint_rollback(
cursor, statement, parameters, context=None
):
if (
context is not None
and context.compiled
and isinstance(
context.compiled.statement,
elements.RollbackToSavepointClause,
)
):
raise rollback_error
self.event_listen(testing.db, "handle_error", canary, retval=True)
self.event_listen(
testing.db.dialect, "do_execute", prevent_savepoint_rollback
)
with session.begin():
session.add(User(id=1, name="x"))
try:
with session.begin():
try:
with session.begin_nested():
# raises IntegrityError on flush
session.add(User(id=1, name="x"))
# outermost is the failed SAVEPOINT rollback
# from the "with session.begin_nested()"
except sa_exc.DBAPIError as dbe_inner:
caught_exceptions.append(dbe_inner.orig)
raise
except sa_exc.DBAPIError as dbe_outer:
caught_exceptions.append(dbe_outer.orig)
is_true(
isinstance(
evented_exceptions[0], testing.db.dialect.dbapi.IntegrityError
)
)
eq_(evented_exceptions[1], rollback_error)
eq_(len(evented_exceptions), 2)
eq_(caught_exceptions, [rollback_error, rollback_error])
def test_no_prepare_wo_twophase(self):
sess = create_session(bind=testing.db, autocommit=False)
assert_raises_message(
sa_exc.InvalidRequestError,
"'twophase' mode not enabled, or not root "
"transaction; can't prepare.",
sess.prepare,
)
def test_closed_status_check(self):
sess = create_session()
trans = sess.begin()
trans.rollback()
assert_raises_message(
sa_exc.ResourceClosedError,
"This transaction is closed",
trans.rollback,
)
assert_raises_message(
sa_exc.ResourceClosedError,
"This transaction is closed",
trans.commit,
)
def test_deactive_status_check(self):
sess = create_session()
trans = sess.begin()
trans2 = sess.begin(subtransactions=True)
trans2.rollback()
assert_raises_message(
sa_exc.InvalidRequestError,
"This session is in 'inactive' state, due to the SQL transaction "
"being rolled back; no further SQL can be emitted within this "
"transaction.",
trans.commit,
)
def test_deactive_status_check_w_exception(self):
sess = create_session()
trans = sess.begin()
trans2 = sess.begin(subtransactions=True)
try:
raise Exception("test")
except Exception:
trans2.rollback(_capture_exception=True)
assert_raises_message(
sa_exc.InvalidRequestError,
r"This Session's transaction has been rolled back due to a "
r"previous exception during flush. To begin a new transaction "
r"with this Session, first issue Session.rollback\(\). "
r"Original exception was: test",
trans.commit,
)
def _inactive_flushed_session_fixture(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
u1 = User(id=1, name="u1")
sess.add(u1)
sess.commit()
sess.add(User(id=1, name="u2"))
with expect_warnings("New instance"):
assert_raises(sa_exc.IntegrityError, sess.flush)
return sess, u1
def test_execution_options_begin_transaction(self):
bind = mock.Mock()
sess = Session(bind=bind)
c1 = sess.connection(execution_options={"isolation_level": "FOO"})
eq_(
bind.mock_calls,
[
mock.call.connect(),
mock.call.connect().execution_options(isolation_level="FOO"),
mock.call.connect().execution_options().begin(),
],
)
eq_(c1, bind.connect().execution_options())
def test_execution_options_ignored_mid_transaction(self):
bind = mock.Mock()
conn = mock.Mock(engine=bind)
bind.connect = mock.Mock(return_value=conn)
sess = Session(bind=bind)
sess.execute("select 1")
with expect_warnings(
"Connection is already established for the "
"given bind; execution_options ignored"
):
sess.connection(execution_options={"isolation_level": "FOO"})
def test_warning_on_using_inactive_session_new(self):
User = self.classes.User
sess, u1 = self._inactive_flushed_session_fixture()
u2 = User(name="u2")
sess.add(u2)
def go():
sess.rollback()
assert_warnings(
go,
[
"Session's state has been changed on a "
"non-active transaction - this state "
"will be discarded."
],
)
assert u2 not in sess
assert u1 in sess
def test_warning_on_using_inactive_session_dirty(self):
sess, u1 = self._inactive_flushed_session_fixture()
u1.name = "newname"
def go():
sess.rollback()
assert_warnings(
go,
[
"Session's state has been changed on a "
"non-active transaction - this state "
"will be discarded."
],
)
assert u1 in sess
assert u1 not in sess.dirty
def test_warning_on_using_inactive_session_delete(self):
sess, u1 = self._inactive_flushed_session_fixture()
sess.delete(u1)
def go():
sess.rollback()
assert_warnings(
go,
[
"Session's state has been changed on a "
"non-active transaction - this state "
"will be discarded."
],
)
assert u1 in sess
assert u1 not in sess.deleted
def test_warning_on_using_inactive_session_rollback_evt(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
u1 = User(id=1, name="u1")
sess.add(u1)
sess.commit()
u3 = User(name="u3")
@event.listens_for(sess, "after_rollback")
def evt(s):
sess.add(u3)
sess.add(User(id=1, name="u2"))
def go():
assert_raises(orm_exc.FlushError, sess.flush)
assert u3 not in sess
def test_preserve_flush_error(self):
User = self.classes.User
sess, u1 = self._inactive_flushed_session_fixture()
for i in range(5):
assert_raises_message(
sa_exc.InvalidRequestError,
"^This Session's transaction has been "
r"rolled back due to a previous exception "
"during flush. To "
"begin a new transaction with this "
"Session, first issue "
r"Session.rollback\(\). Original exception "
"was:",
sess.commit,
)
sess.rollback()
sess.add(User(id=5, name="some name"))
sess.commit()
def test_no_autocommit_with_explicit_commit(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
session = create_session(autocommit=False)
session.add(User(name="ed"))
session.transaction.commit()
assert (
session.transaction is not None
), "autocommit=False should start a new transaction"
@testing.requires.python2
@testing.requires.savepoints_w_release
def test_report_primary_error_when_rollback_fails(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
session = Session(testing.db)
with expect_warnings(".*during handling of a previous exception.*"):
session.begin_nested()
savepoint = (
session.connection()._Connection__transaction._savepoint
)
# force the savepoint to disappear
session.connection().dialect.do_release_savepoint(
session.connection(), savepoint
)
# now do a broken flush
session.add_all([User(id=1), User(id=1)])
assert_raises_message(
sa_exc.DBAPIError, "ROLLBACK TO SAVEPOINT ", session.flush
)
class _LocalFixture(FixtureTest):
run_setup_mappers = "once"
run_inserts = None
session = sessionmaker()
@classmethod
def setup_mappers(cls):
User, Address = cls.classes.User, cls.classes.Address
users, addresses = cls.tables.users, cls.tables.addresses
mapper(
User,
users,
properties={
"addresses": relationship(
Address,
backref="user",
cascade="all, delete-orphan",
order_by=addresses.c.id,
)
},
)
mapper(Address, addresses)
class FixtureDataTest(_LocalFixture):
run_inserts = "each"
__backend__ = True
def test_attrs_on_rollback(self):
User = self.classes.User
sess = self.session()
u1 = sess.query(User).get(7)
u1.name = "ed"
sess.rollback()
eq_(u1.name, "jack")
def test_commit_persistent(self):
User = self.classes.User
sess = self.session()
u1 = sess.query(User).get(7)
u1.name = "ed"
sess.flush()
sess.commit()
eq_(u1.name, "ed")
def test_concurrent_commit_persistent(self):
User = self.classes.User
s1 = self.session()
u1 = s1.query(User).get(7)
u1.name = "ed"
s1.commit()
s2 = self.session()
u2 = s2.query(User).get(7)
assert u2.name == "ed"
u2.name = "will"
s2.commit()
assert u1.name == "will"
class CleanSavepointTest(FixtureTest):
"""test the behavior for [ticket:2452] - rollback on begin_nested()
only expires objects tracked as being modified in that transaction.
"""
run_inserts = None
__backend__ = True
def _run_test(self, update_fn):
User, users = self.classes.User, self.tables.users
mapper(User, users)
s = Session(bind=testing.db)
u1 = User(name="u1")
u2 = User(name="u2")
s.add_all([u1, u2])
s.commit()
u1.name
u2.name
s.begin_nested()
update_fn(s, u2)
eq_(u2.name, "u2modified")
s.rollback()
eq_(u1.__dict__["name"], "u1")
assert "name" not in u2.__dict__
eq_(u2.name, "u2")
@testing.requires.savepoints
def test_rollback_ignores_clean_on_savepoint(self):
def update_fn(s, u2):
u2.name = "u2modified"
self._run_test(update_fn)
@testing.requires.savepoints
def test_rollback_ignores_clean_on_savepoint_agg_upd_eval(self):
User = self.classes.User
def update_fn(s, u2):
s.query(User).filter_by(name="u2").update(
dict(name="u2modified"), synchronize_session="evaluate"
)
self._run_test(update_fn)
@testing.requires.savepoints
def test_rollback_ignores_clean_on_savepoint_agg_upd_fetch(self):
User = self.classes.User
def update_fn(s, u2):
s.query(User).filter_by(name="u2").update(
dict(name="u2modified"), synchronize_session="fetch"
)
self._run_test(update_fn)
class ContextManagerTest(FixtureTest):
run_inserts = None
__backend__ = True
@testing.requires.savepoints
@engines.close_open_connections
def test_contextmanager_nested_rollback(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
def go():
with sess.begin_nested():
sess.add(User()) # name can't be null
sess.flush()
# and not InvalidRequestError
assert_raises(sa_exc.DBAPIError, go)
with sess.begin_nested():
sess.add(User(name="u1"))
eq_(sess.query(User).count(), 1)
def test_contextmanager_commit(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session(autocommit=True)
with sess.begin():
sess.add(User(name="u1"))
sess.rollback()
eq_(sess.query(User).count(), 1)
def test_contextmanager_rollback(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session(autocommit=True)
def go():
with sess.begin():
sess.add(User()) # name can't be null
assert_raises(sa_exc.DBAPIError, go)
eq_(sess.query(User).count(), 0)
with sess.begin():
sess.add(User(name="u1"))
eq_(sess.query(User).count(), 1)
class AutoExpireTest(_LocalFixture):
__backend__ = True
def test_expunge_pending_on_rollback(self):
User = self.classes.User
sess = self.session()
u2 = User(name="newuser")
sess.add(u2)
assert u2 in sess
sess.rollback()
assert u2 not in sess
def test_trans_pending_cleared_on_commit(self):
User = self.classes.User
sess = self.session()
u2 = User(name="newuser")
sess.add(u2)
assert u2 in sess
sess.commit()
assert u2 in sess
u3 = User(name="anotheruser")
sess.add(u3)
sess.rollback()
assert u3 not in sess
assert u2 in sess
def test_update_deleted_on_rollback(self):
User = self.classes.User
s = self.session()
u1 = User(name="ed")
s.add(u1)
s.commit()
# this actually tests that the delete() operation,
# when cascaded to the "addresses" collection, does not
# trigger a flush (via lazyload) before the cascade is complete.
s.delete(u1)
assert u1 in s.deleted
s.rollback()
assert u1 in s
assert u1 not in s.deleted
@testing.requires.predictable_gc
def test_gced_delete_on_rollback(self):
User, users = self.classes.User, self.tables.users
s = self.session()
u1 = User(name="ed")
s.add(u1)
s.commit()
s.delete(u1)
u1_state = attributes.instance_state(u1)
assert u1_state in s.identity_map.all_states()
assert u1_state in s._deleted
s.flush()
assert u1_state not in s.identity_map.all_states()
assert u1_state not in s._deleted
del u1
gc_collect()
assert u1_state.obj() is None
s.rollback()
# new in 1.1, not in identity map if the object was
# gc'ed and we restore snapshot; we've changed update_impl
# to just skip this object
assert u1_state not in s.identity_map.all_states()
# in any version, the state is replaced by the query
# because the identity map would switch it
u1 = s.query(User).filter_by(name="ed").one()
assert u1_state not in s.identity_map.all_states()
eq_(s.scalar(select([func.count("*")]).select_from(users)), 1)
s.delete(u1)
s.flush()
eq_(s.scalar(select([func.count("*")]).select_from(users)), 0)
s.commit()
def test_trans_deleted_cleared_on_rollback(self):
User = self.classes.User
s = self.session()
u1 = User(name="ed")
s.add(u1)
s.commit()
s.delete(u1)
s.commit()
assert u1 not in s
s.rollback()
assert u1 not in s
def test_update_deleted_on_rollback_cascade(self):
User, Address = self.classes.User, self.classes.Address
s = self.session()
u1 = User(name="ed", addresses=[Address(email_address="foo")])
s.add(u1)
s.commit()
s.delete(u1)
assert u1 in s.deleted
assert u1.addresses[0] in s.deleted
s.rollback()
assert u1 in s
assert u1 not in s.deleted
assert u1.addresses[0] not in s.deleted
def test_update_deleted_on_rollback_orphan(self):
User, Address = self.classes.User, self.classes.Address
s = self.session()
u1 = User(name="ed", addresses=[Address(email_address="foo")])
s.add(u1)
s.commit()
a1 = u1.addresses[0]
u1.addresses.remove(a1)
s.flush()
eq_(s.query(Address).filter(Address.email_address == "foo").all(), [])
s.rollback()
assert a1 not in s.deleted
assert u1.addresses == [a1]
def test_commit_pending(self):
User = self.classes.User
sess = self.session()
u1 = User(name="newuser")
sess.add(u1)
sess.flush()
sess.commit()
eq_(u1.name, "newuser")
def test_concurrent_commit_pending(self):
User = self.classes.User
s1 = self.session()
u1 = User(name="edward")
s1.add(u1)
s1.commit()
s2 = self.session()
u2 = s2.query(User).filter(User.name == "edward").one()
u2.name = "will"
s2.commit()
assert u1.name == "will"
class TwoPhaseTest(_LocalFixture):
__backend__ = True
@testing.requires.two_phase_transactions
def test_rollback_on_prepare(self):
User = self.classes.User
s = self.session(twophase=True)
u = User(name="ed")
s.add(u)
s.prepare()
s.rollback()
assert u not in s
class RollbackRecoverTest(_LocalFixture):
__backend__ = True
def test_pk_violation(self):
User, Address = self.classes.User, self.classes.Address
s = self.session()
a1 = Address(email_address="foo")
u1 = User(id=1, name="ed", addresses=[a1])
s.add(u1)
s.commit()
a2 = Address(email_address="bar")
u2 = User(id=1, name="jack", addresses=[a2])
u1.name = "edward"
a1.email_address = "foober"
s.add(u2)
with expect_warnings("New instance"):
assert_raises(sa_exc.IntegrityError, s.commit)
assert_raises(sa_exc.InvalidRequestError, s.commit)
s.rollback()
assert u2 not in s
assert a2 not in s
assert u1 in s
assert a1 in s
assert u1.name == "ed"
assert a1.email_address == "foo"
u1.name = "edward"
a1.email_address = "foober"
s.commit()
eq_(
s.query(User).all(),
[
User(
id=1,
name="edward",
addresses=[Address(email_address="foober")],
)
],
)
@testing.requires.savepoints
def test_pk_violation_with_savepoint(self):
User, Address = self.classes.User, self.classes.Address
s = self.session()
a1 = Address(email_address="foo")
u1 = User(id=1, name="ed", addresses=[a1])
s.add(u1)
s.commit()
a2 = Address(email_address="bar")
u2 = User(id=1, name="jack", addresses=[a2])
u1.name = "edward"
a1.email_address = "foober"
s.begin_nested()
s.add(u2)
with expect_warnings("New instance"):
assert_raises(sa_exc.IntegrityError, s.commit)
assert_raises(sa_exc.InvalidRequestError, s.commit)
s.rollback()
assert u2 not in s
assert a2 not in s
assert u1 in s
assert a1 in s
s.commit()
eq_(
s.query(User).all(),
[
User(
id=1,
name="edward",
addresses=[Address(email_address="foober")],
)
],
)
class SavepointTest(_LocalFixture):
__backend__ = True
@testing.requires.savepoints
def test_savepoint_rollback(self):
User = self.classes.User
s = self.session()
u1 = User(name="ed")
u2 = User(name="jack")
s.add_all([u1, u2])
s.begin_nested()
u3 = User(name="wendy")
u4 = User(name="foo")
u1.name = "edward"
u2.name = "jackward"
s.add_all([u3, u4])
eq_(
s.query(User.name).order_by(User.id).all(),
[("edward",), ("jackward",), ("wendy",), ("foo",)],
)
s.rollback()
assert u1.name == "ed"
assert u2.name == "jack"
eq_(s.query(User.name).order_by(User.id).all(), [("ed",), ("jack",)])
s.commit()
assert u1.name == "ed"
assert u2.name == "jack"
eq_(s.query(User.name).order_by(User.id).all(), [("ed",), ("jack",)])
@testing.requires.savepoints
def test_savepoint_delete(self):
User = self.classes.User
s = self.session()
u1 = User(name="ed")
s.add(u1)
s.commit()
eq_(s.query(User).filter_by(name="ed").count(), 1)
s.begin_nested()
s.delete(u1)
s.commit()
eq_(s.query(User).filter_by(name="ed").count(), 0)
s.commit()
@testing.requires.savepoints
def test_savepoint_commit(self):
User = self.classes.User
s = self.session()
u1 = User(name="ed")
u2 = User(name="jack")
s.add_all([u1, u2])
s.begin_nested()
u3 = User(name="wendy")
u4 = User(name="foo")
u1.name = "edward"
u2.name = "jackward"
s.add_all([u3, u4])
eq_(
s.query(User.name).order_by(User.id).all(),
[("edward",), ("jackward",), ("wendy",), ("foo",)],
)
s.commit()
def go():
assert u1.name == "edward"
assert u2.name == "jackward"
eq_(
s.query(User.name).order_by(User.id).all(),
[("edward",), ("jackward",), ("wendy",), ("foo",)],
)
self.assert_sql_count(testing.db, go, 1)
s.commit()
eq_(
s.query(User.name).order_by(User.id).all(),
[("edward",), ("jackward",), ("wendy",), ("foo",)],
)
@testing.requires.savepoints
def test_savepoint_rollback_collections(self):
User, Address = self.classes.User, self.classes.Address
s = self.session()
u1 = User(name="ed", addresses=[Address(email_address="foo")])
s.add(u1)
s.commit()
u1.name = "edward"
u1.addresses.append(Address(email_address="bar"))
s.begin_nested()
u2 = User(name="jack", addresses=[Address(email_address="bat")])
s.add(u2)
eq_(
s.query(User).order_by(User.id).all(),
[
User(
name="edward",
addresses=[
Address(email_address="foo"),
Address(email_address="bar"),
],
),
User(name="jack", addresses=[Address(email_address="bat")]),
],
)
s.rollback()
eq_(
s.query(User).order_by(User.id).all(),
[
User(
name="edward",
addresses=[
Address(email_address="foo"),
Address(email_address="bar"),
],
)
],
)
s.commit()
eq_(
s.query(User).order_by(User.id).all(),
[
User(
name="edward",
addresses=[
Address(email_address="foo"),
Address(email_address="bar"),
],
)
],
)
@testing.requires.savepoints
def test_savepoint_commit_collections(self):
User, Address = self.classes.User, self.classes.Address
s = self.session()
u1 = User(name="ed", addresses=[Address(email_address="foo")])
s.add(u1)
s.commit()
u1.name = "edward"
u1.addresses.append(Address(email_address="bar"))
s.begin_nested()
u2 = User(name="jack", addresses=[Address(email_address="bat")])
s.add(u2)
eq_(
s.query(User).order_by(User.id).all(),
[
User(
name="edward",
addresses=[
Address(email_address="foo"),
Address(email_address="bar"),
],
),
User(name="jack", addresses=[Address(email_address="bat")]),
],
)
s.commit()
eq_(
s.query(User).order_by(User.id).all(),
[
User(
name="edward",
addresses=[
Address(email_address="foo"),
Address(email_address="bar"),
],
),
User(name="jack", addresses=[Address(email_address="bat")]),
],
)
s.commit()
eq_(
s.query(User).order_by(User.id).all(),
[
User(
name="edward",
addresses=[
Address(email_address="foo"),
Address(email_address="bar"),
],
),
User(name="jack", addresses=[Address(email_address="bat")]),
],
)
@testing.requires.savepoints
def test_expunge_pending_on_rollback(self):
User = self.classes.User
sess = self.session()
sess.begin_nested()
u2 = User(name="newuser")
sess.add(u2)
assert u2 in sess
sess.rollback()
assert u2 not in sess
@testing.requires.savepoints
def test_update_deleted_on_rollback(self):
User = self.classes.User
s = self.session()
u1 = User(name="ed")
s.add(u1)
s.commit()
s.begin_nested()
s.delete(u1)
assert u1 in s.deleted
s.rollback()
assert u1 in s
assert u1 not in s.deleted
@testing.requires.savepoints_w_release
def test_savepoint_lost_still_runs(self):
User = self.classes.User
s = self.session(bind=self.bind)
trans = s.begin_nested()
s.connection()
u1 = User(name="ed")
s.add(u1)
# kill off the transaction
nested_trans = trans._connections[self.bind][1]
nested_trans._do_commit()
is_(s.transaction, trans)
assert_raises(sa_exc.DBAPIError, s.rollback)
assert u1 not in s.new
is_(trans._state, _session.CLOSED)
is_not_(s.transaction, trans)
is_(s.transaction._state, _session.ACTIVE)
is_(s.transaction.nested, False)
is_(s.transaction._parent, None)
class AccountingFlagsTest(_LocalFixture):
__backend__ = True
def test_no_expire_on_commit(self):
User, users = self.classes.User, self.tables.users
sess = sessionmaker(expire_on_commit=False)()
u1 = User(name="ed")
sess.add(u1)
sess.commit()
testing.db.execute(
users.update(users.c.name == "ed").values(name="edward")
)
assert u1.name == "ed"
sess.expire_all()
assert u1.name == "edward"
class AutoCommitTest(_LocalFixture):
__backend__ = True
def test_begin_nested_requires_trans(self):
sess = create_session(autocommit=True)
assert_raises(sa_exc.InvalidRequestError, sess.begin_nested)
def test_begin_preflush(self):
User = self.classes.User
sess = create_session(autocommit=True)
u1 = User(name="ed")
sess.add(u1)
sess.begin()
u2 = User(name="some other user")
sess.add(u2)
sess.rollback()
assert u2 not in sess
assert u1 in sess
assert sess.query(User).filter_by(name="ed").one() is u1
def test_accounting_commit_fails_add(self):
User = self.classes.User
sess = create_session(autocommit=True)
fail = False
def fail_fn(*arg, **kw):
if fail:
raise Exception("commit fails")
event.listen(sess, "after_flush_postexec", fail_fn)
u1 = User(name="ed")
sess.add(u1)
fail = True
assert_raises(Exception, sess.flush)
fail = False
assert u1 not in sess
u1new = User(id=2, name="fred")
sess.add(u1new)
sess.add(u1)
sess.flush()
assert u1 in sess
eq_(
sess.query(User.name).order_by(User.name).all(),
[("ed",), ("fred",)],
)
def test_accounting_commit_fails_delete(self):
User = self.classes.User
sess = create_session(autocommit=True)
fail = False
def fail_fn(*arg, **kw):
if fail:
raise Exception("commit fails")
event.listen(sess, "after_flush_postexec", fail_fn)
u1 = User(name="ed")
sess.add(u1)
sess.flush()
sess.delete(u1)
fail = True
assert_raises(Exception, sess.flush)
fail = False
assert u1 in sess
assert u1 not in sess.deleted
sess.delete(u1)
sess.flush()
assert u1 not in sess
eq_(sess.query(User.name).order_by(User.name).all(), [])
@testing.requires.updateable_autoincrement_pks
def test_accounting_no_select_needed(self):
"""test that flush accounting works on non-expired instances
when autocommit=True/expire_on_commit=True."""
User = self.classes.User
sess = create_session(autocommit=True, expire_on_commit=True)
u1 = User(id=1, name="ed")
sess.add(u1)
sess.flush()
u1.id = 3
u1.name = "fred"
self.assert_sql_count(testing.db, sess.flush, 1)
assert "id" not in u1.__dict__
eq_(u1.id, 3)
class NaturalPKRollbackTest(fixtures.MappedTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table("users", metadata, Column("name", String(50), primary_key=True))
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
def test_rollback_recover(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
session = sessionmaker()()
u1, u2, u3 = User(name="u1"), User(name="u2"), User(name="u3")
session.add_all([u1, u2, u3])
session.commit()
session.delete(u2)
u4 = User(name="u2")
session.add(u4)
session.flush()
u5 = User(name="u3")
session.add(u5)
with expect_warnings("New instance"):
assert_raises(sa_exc.IntegrityError, session.flush)
assert u5 not in session
assert u2 not in session.deleted
session.rollback()
def test_reloaded_deleted_checked_for_expiry(self):
"""test issue #3677"""
users, User = self.tables.users, self.classes.User
mapper(User, users)
u1 = User(name="u1")
s = Session()
s.add(u1)
s.flush()
del u1
gc_collect()
u1 = s.query(User).first() # noqa
s.rollback()
u2 = User(name="u1")
s.add(u2)
s.commit()
assert inspect(u2).persistent
def test_key_replaced_by_update(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
u1 = User(name="u1")
u2 = User(name="u2")
s = Session()
s.add_all([u1, u2])
s.commit()
s.delete(u1)
s.flush()
u2.name = "u1"
s.flush()
assert u1 not in s
s.rollback()
assert u1 in s
assert u2 in s
assert s.identity_map[identity_key(User, ("u1",))] is u1
assert s.identity_map[identity_key(User, ("u2",))] is u2
@testing.requires.savepoints
def test_key_replaced_by_update_nested(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
u1 = User(name="u1")
s = Session()
s.add(u1)
s.commit()
with s.begin_nested():
u2 = User(name="u2")
s.add(u2)
s.flush()
u2.name = "u3"
s.rollback()
assert u1 in s
assert u2 not in s
u1.name = "u5"
s.commit()
def test_multiple_key_replaced_by_update(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
u1 = User(name="u1")
u2 = User(name="u2")
u3 = User(name="u3")
s = Session()
s.add_all([u1, u2, u3])
s.commit()
s.delete(u1)
s.delete(u2)
s.flush()
u3.name = "u1"
s.flush()
u3.name = "u2"
s.flush()
s.rollback()
assert u1 in s
assert u2 in s
assert u3 in s
assert s.identity_map[identity_key(User, ("u1",))] is u1
assert s.identity_map[identity_key(User, ("u2",))] is u2
assert s.identity_map[identity_key(User, ("u3",))] is u3
def test_key_replaced_by_oob_insert(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
u1 = User(name="u1")
s = Session()
s.add(u1)
s.commit()
s.delete(u1)
s.flush()
s.execute(users.insert().values(name="u1"))
u2 = s.query(User).get("u1")
assert u1 not in s
s.rollback()
assert u1 in s
assert u2 not in s
assert s.identity_map[identity_key(User, ("u1",))] is u1
|
|
"""Tests for the Home Assistant Websocket API."""
import asyncio
from unittest.mock import patch, Mock
from aiohttp import WSMsgType
from async_timeout import timeout
import pytest
from homeassistant.core import callback
from homeassistant.components import websocket_api as wapi
from homeassistant.setup import async_setup_component
from tests.common import mock_coro, async_mock_service
API_PASSWORD = 'test1234'
@pytest.fixture
def websocket_client(hass, hass_ws_client):
"""Create a websocket client."""
return hass.loop.run_until_complete(hass_ws_client(hass))
@pytest.fixture
def no_auth_websocket_client(hass, loop, aiohttp_client):
"""Websocket connection that requires authentication."""
assert loop.run_until_complete(
async_setup_component(hass, 'websocket_api', {
'http': {
'api_password': API_PASSWORD
}
}))
client = loop.run_until_complete(aiohttp_client(hass.http.app))
ws = loop.run_until_complete(client.ws_connect(wapi.URL))
auth_ok = loop.run_until_complete(ws.receive_json())
assert auth_ok['type'] == wapi.TYPE_AUTH_REQUIRED
yield ws
if not ws.closed:
loop.run_until_complete(ws.close())
@pytest.fixture
def mock_low_queue():
"""Mock a low queue."""
with patch.object(wapi, 'MAX_PENDING_MSG', 5):
yield
@asyncio.coroutine
def test_auth_via_msg(no_auth_websocket_client):
"""Test authenticating."""
yield from no_auth_websocket_client.send_json({
'type': wapi.TYPE_AUTH,
'api_password': API_PASSWORD
})
msg = yield from no_auth_websocket_client.receive_json()
assert msg['type'] == wapi.TYPE_AUTH_OK
@asyncio.coroutine
def test_auth_via_msg_incorrect_pass(no_auth_websocket_client):
"""Test authenticating."""
with patch('homeassistant.components.websocket_api.process_wrong_login',
return_value=mock_coro()) as mock_process_wrong_login:
yield from no_auth_websocket_client.send_json({
'type': wapi.TYPE_AUTH,
'api_password': API_PASSWORD + 'wrong'
})
msg = yield from no_auth_websocket_client.receive_json()
assert mock_process_wrong_login.called
assert msg['type'] == wapi.TYPE_AUTH_INVALID
assert msg['message'] == 'Invalid access token or password'
@asyncio.coroutine
def test_pre_auth_only_auth_allowed(no_auth_websocket_client):
"""Verify that before authentication, only auth messages are allowed."""
yield from no_auth_websocket_client.send_json({
'type': wapi.TYPE_CALL_SERVICE,
'domain': 'domain_test',
'service': 'test_service',
'service_data': {
'hello': 'world'
}
})
msg = yield from no_auth_websocket_client.receive_json()
assert msg['type'] == wapi.TYPE_AUTH_INVALID
assert msg['message'].startswith('Message incorrectly formatted')
@asyncio.coroutine
def test_invalid_message_format(websocket_client):
"""Test sending invalid JSON."""
yield from websocket_client.send_json({'type': 5})
msg = yield from websocket_client.receive_json()
assert msg['type'] == wapi.TYPE_RESULT
error = msg['error']
assert error['code'] == wapi.ERR_INVALID_FORMAT
assert error['message'].startswith('Message incorrectly formatted')
@asyncio.coroutine
def test_invalid_json(websocket_client):
"""Test sending invalid JSON."""
yield from websocket_client.send_str('this is not JSON')
msg = yield from websocket_client.receive()
assert msg.type == WSMsgType.close
@asyncio.coroutine
def test_quiting_hass(hass, websocket_client):
"""Test sending invalid JSON."""
with patch.object(hass.loop, 'stop'):
yield from hass.async_stop()
msg = yield from websocket_client.receive()
assert msg.type == WSMsgType.CLOSE
@asyncio.coroutine
def test_call_service(hass, websocket_client):
"""Test call service command."""
calls = []
@callback
def service_call(call):
calls.append(call)
hass.services.async_register('domain_test', 'test_service', service_call)
yield from websocket_client.send_json({
'id': 5,
'type': wapi.TYPE_CALL_SERVICE,
'domain': 'domain_test',
'service': 'test_service',
'service_data': {
'hello': 'world'
}
})
msg = yield from websocket_client.receive_json()
assert msg['id'] == 5
assert msg['type'] == wapi.TYPE_RESULT
assert msg['success']
assert len(calls) == 1
call = calls[0]
assert call.domain == 'domain_test'
assert call.service == 'test_service'
assert call.data == {'hello': 'world'}
@asyncio.coroutine
def test_subscribe_unsubscribe_events(hass, websocket_client):
"""Test subscribe/unsubscribe events command."""
init_count = sum(hass.bus.async_listeners().values())
yield from websocket_client.send_json({
'id': 5,
'type': wapi.TYPE_SUBSCRIBE_EVENTS,
'event_type': 'test_event'
})
msg = yield from websocket_client.receive_json()
assert msg['id'] == 5
assert msg['type'] == wapi.TYPE_RESULT
assert msg['success']
# Verify we have a new listener
assert sum(hass.bus.async_listeners().values()) == init_count + 1
hass.bus.async_fire('ignore_event')
hass.bus.async_fire('test_event', {'hello': 'world'})
hass.bus.async_fire('ignore_event')
with timeout(3, loop=hass.loop):
msg = yield from websocket_client.receive_json()
assert msg['id'] == 5
assert msg['type'] == wapi.TYPE_EVENT
event = msg['event']
assert event['event_type'] == 'test_event'
assert event['data'] == {'hello': 'world'}
assert event['origin'] == 'LOCAL'
yield from websocket_client.send_json({
'id': 6,
'type': wapi.TYPE_UNSUBSCRIBE_EVENTS,
'subscription': 5
})
msg = yield from websocket_client.receive_json()
assert msg['id'] == 6
assert msg['type'] == wapi.TYPE_RESULT
assert msg['success']
# Check our listener got unsubscribed
assert sum(hass.bus.async_listeners().values()) == init_count
@asyncio.coroutine
def test_get_states(hass, websocket_client):
"""Test get_states command."""
hass.states.async_set('greeting.hello', 'world')
hass.states.async_set('greeting.bye', 'universe')
yield from websocket_client.send_json({
'id': 5,
'type': wapi.TYPE_GET_STATES,
})
msg = yield from websocket_client.receive_json()
assert msg['id'] == 5
assert msg['type'] == wapi.TYPE_RESULT
assert msg['success']
states = []
for state in hass.states.async_all():
state = state.as_dict()
state['last_changed'] = state['last_changed'].isoformat()
state['last_updated'] = state['last_updated'].isoformat()
states.append(state)
assert msg['result'] == states
@asyncio.coroutine
def test_get_services(hass, websocket_client):
"""Test get_services command."""
yield from websocket_client.send_json({
'id': 5,
'type': wapi.TYPE_GET_SERVICES,
})
msg = yield from websocket_client.receive_json()
assert msg['id'] == 5
assert msg['type'] == wapi.TYPE_RESULT
assert msg['success']
assert msg['result'] == hass.services.async_services()
@asyncio.coroutine
def test_get_config(hass, websocket_client):
"""Test get_config command."""
yield from websocket_client.send_json({
'id': 5,
'type': wapi.TYPE_GET_CONFIG,
})
msg = yield from websocket_client.receive_json()
assert msg['id'] == 5
assert msg['type'] == wapi.TYPE_RESULT
assert msg['success']
if 'components' in msg['result']:
msg['result']['components'] = set(msg['result']['components'])
if 'whitelist_external_dirs' in msg['result']:
msg['result']['whitelist_external_dirs'] = \
set(msg['result']['whitelist_external_dirs'])
assert msg['result'] == hass.config.as_dict()
@asyncio.coroutine
def test_ping(websocket_client):
"""Test get_panels command."""
yield from websocket_client.send_json({
'id': 5,
'type': wapi.TYPE_PING,
})
msg = yield from websocket_client.receive_json()
assert msg['id'] == 5
assert msg['type'] == wapi.TYPE_PONG
@asyncio.coroutine
def test_pending_msg_overflow(hass, mock_low_queue, websocket_client):
"""Test get_panels command."""
for idx in range(10):
yield from websocket_client.send_json({
'id': idx + 1,
'type': wapi.TYPE_PING,
})
msg = yield from websocket_client.receive()
assert msg.type == WSMsgType.close
@asyncio.coroutine
def test_unknown_command(websocket_client):
"""Test get_panels command."""
yield from websocket_client.send_json({
'id': 5,
'type': 'unknown_command',
})
msg = yield from websocket_client.receive_json()
assert not msg['success']
assert msg['error']['code'] == wapi.ERR_UNKNOWN_COMMAND
async def test_auth_active_with_token(hass, aiohttp_client, hass_access_token):
"""Test authenticating with a token."""
assert await async_setup_component(hass, 'websocket_api', {
'http': {
'api_password': API_PASSWORD
}
})
client = await aiohttp_client(hass.http.app)
async with client.ws_connect(wapi.URL) as ws:
with patch('homeassistant.auth.AuthManager.active') as auth_active:
auth_active.return_value = True
auth_msg = await ws.receive_json()
assert auth_msg['type'] == wapi.TYPE_AUTH_REQUIRED
await ws.send_json({
'type': wapi.TYPE_AUTH,
'access_token': hass_access_token
})
auth_msg = await ws.receive_json()
assert auth_msg['type'] == wapi.TYPE_AUTH_OK
async def test_auth_active_user_inactive(hass, aiohttp_client,
hass_access_token):
"""Test authenticating with a token."""
refresh_token = await hass.auth.async_validate_access_token(
hass_access_token)
refresh_token.user.is_active = False
assert await async_setup_component(hass, 'websocket_api', {
'http': {
'api_password': API_PASSWORD
}
})
client = await aiohttp_client(hass.http.app)
async with client.ws_connect(wapi.URL) as ws:
with patch('homeassistant.auth.AuthManager.active') as auth_active:
auth_active.return_value = True
auth_msg = await ws.receive_json()
assert auth_msg['type'] == wapi.TYPE_AUTH_REQUIRED
await ws.send_json({
'type': wapi.TYPE_AUTH,
'access_token': hass_access_token
})
auth_msg = await ws.receive_json()
assert auth_msg['type'] == wapi.TYPE_AUTH_INVALID
async def test_auth_active_with_password_not_allow(hass, aiohttp_client):
"""Test authenticating with a token."""
assert await async_setup_component(hass, 'websocket_api', {
'http': {
'api_password': API_PASSWORD
}
})
client = await aiohttp_client(hass.http.app)
async with client.ws_connect(wapi.URL) as ws:
with patch('homeassistant.auth.AuthManager.active',
return_value=True):
auth_msg = await ws.receive_json()
assert auth_msg['type'] == wapi.TYPE_AUTH_REQUIRED
await ws.send_json({
'type': wapi.TYPE_AUTH,
'api_password': API_PASSWORD
})
auth_msg = await ws.receive_json()
assert auth_msg['type'] == wapi.TYPE_AUTH_INVALID
async def test_auth_legacy_support_with_password(hass, aiohttp_client):
"""Test authenticating with a token."""
assert await async_setup_component(hass, 'websocket_api', {
'http': {
'api_password': API_PASSWORD
}
})
client = await aiohttp_client(hass.http.app)
async with client.ws_connect(wapi.URL) as ws:
with patch('homeassistant.auth.AuthManager.active',
return_value=True),\
patch('homeassistant.auth.AuthManager.support_legacy',
return_value=True):
auth_msg = await ws.receive_json()
assert auth_msg['type'] == wapi.TYPE_AUTH_REQUIRED
await ws.send_json({
'type': wapi.TYPE_AUTH,
'api_password': API_PASSWORD
})
auth_msg = await ws.receive_json()
assert auth_msg['type'] == wapi.TYPE_AUTH_OK
async def test_auth_with_invalid_token(hass, aiohttp_client):
"""Test authenticating with a token."""
assert await async_setup_component(hass, 'websocket_api', {
'http': {
'api_password': API_PASSWORD
}
})
client = await aiohttp_client(hass.http.app)
async with client.ws_connect(wapi.URL) as ws:
with patch('homeassistant.auth.AuthManager.active') as auth_active:
auth_active.return_value = True
auth_msg = await ws.receive_json()
assert auth_msg['type'] == wapi.TYPE_AUTH_REQUIRED
await ws.send_json({
'type': wapi.TYPE_AUTH,
'access_token': 'incorrect'
})
auth_msg = await ws.receive_json()
assert auth_msg['type'] == wapi.TYPE_AUTH_INVALID
async def test_call_service_context_with_user(hass, aiohttp_client,
hass_access_token):
"""Test that the user is set in the service call context."""
assert await async_setup_component(hass, 'websocket_api', {
'http': {
'api_password': API_PASSWORD
}
})
calls = async_mock_service(hass, 'domain_test', 'test_service')
client = await aiohttp_client(hass.http.app)
async with client.ws_connect(wapi.URL) as ws:
with patch('homeassistant.auth.AuthManager.active') as auth_active:
auth_active.return_value = True
auth_msg = await ws.receive_json()
assert auth_msg['type'] == wapi.TYPE_AUTH_REQUIRED
await ws.send_json({
'type': wapi.TYPE_AUTH,
'access_token': hass_access_token
})
auth_msg = await ws.receive_json()
assert auth_msg['type'] == wapi.TYPE_AUTH_OK
await ws.send_json({
'id': 5,
'type': wapi.TYPE_CALL_SERVICE,
'domain': 'domain_test',
'service': 'test_service',
'service_data': {
'hello': 'world'
}
})
msg = await ws.receive_json()
assert msg['success']
refresh_token = await hass.auth.async_validate_access_token(
hass_access_token)
assert len(calls) == 1
call = calls[0]
assert call.domain == 'domain_test'
assert call.service == 'test_service'
assert call.data == {'hello': 'world'}
assert call.context.user_id == refresh_token.user.id
async def test_call_service_context_no_user(hass, aiohttp_client):
"""Test that connection without user sets context."""
assert await async_setup_component(hass, 'websocket_api', {
'http': {
'api_password': API_PASSWORD
}
})
calls = async_mock_service(hass, 'domain_test', 'test_service')
client = await aiohttp_client(hass.http.app)
async with client.ws_connect(wapi.URL) as ws:
auth_msg = await ws.receive_json()
assert auth_msg['type'] == wapi.TYPE_AUTH_REQUIRED
await ws.send_json({
'type': wapi.TYPE_AUTH,
'api_password': API_PASSWORD
})
auth_msg = await ws.receive_json()
assert auth_msg['type'] == wapi.TYPE_AUTH_OK
await ws.send_json({
'id': 5,
'type': wapi.TYPE_CALL_SERVICE,
'domain': 'domain_test',
'service': 'test_service',
'service_data': {
'hello': 'world'
}
})
msg = await ws.receive_json()
assert msg['success']
assert len(calls) == 1
call = calls[0]
assert call.domain == 'domain_test'
assert call.service == 'test_service'
assert call.data == {'hello': 'world'}
assert call.context.user_id is None
async def test_handler_failing(hass, websocket_client):
"""Test a command that raises."""
hass.components.websocket_api.async_register_command(
'bla', Mock(side_effect=TypeError),
wapi.BASE_COMMAND_MESSAGE_SCHEMA.extend({'type': 'bla'}))
await websocket_client.send_json({
'id': 5,
'type': 'bla',
})
msg = await websocket_client.receive_json()
assert msg['id'] == 5
assert msg['type'] == wapi.TYPE_RESULT
assert not msg['success']
assert msg['error']['code'] == wapi.ERR_UNKNOWN_ERROR
|
|
# -*- coding: utf-8 -*-
"""This file contains BagMRU Windows Registry plugins (shellbags)."""
from dtfabric.runtime import data_maps as dtfabric_data_maps
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.lib import errors
from plaso.parsers.shared import shell_items
from plaso.parsers import winreg_parser
from plaso.parsers.winreg_plugins import dtfabric_plugin
from plaso.parsers.winreg_plugins import interface
class BagMRUEventData(events.EventData):
"""BagMRU event data attribute container.
Attributes:
entries (str): most recently used (MRU) entries.
key_path (str): Windows Registry key path.
"""
DATA_TYPE = 'windows:registry:bagmru'
def __init__(self):
"""Initializes event data."""
super(BagMRUEventData, self).__init__(data_type=self.DATA_TYPE)
self.entries = None
self.key_path = None
class BagMRUWindowsRegistryPlugin(
dtfabric_plugin.DtFabricBaseWindowsRegistryPlugin):
"""Class that defines a BagMRU Windows Registry plugin."""
NAME = 'bagmru'
DATA_FORMAT = 'BagMRU (or ShellBags) Registry data'
FILTERS = frozenset([
interface.WindowsRegistryKeyPathFilter(
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\Shell\\BagMRU'),
interface.WindowsRegistryKeyPathFilter(
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\ShellNoRoam\\'
'BagMRU'),
interface.WindowsRegistryKeyPathFilter(
'HKEY_CURRENT_USER\\Software\\Classes\\Local Settings\\Software\\'
'Microsoft\\Windows\\Shell\\BagMRU'),
interface.WindowsRegistryKeyPathFilter(
'HKEY_CURRENT_USER\\Software\\Classes\\Local Settings\\Software\\'
'Microsoft\\Windows\\ShellNoRoam\\BagMRU'),
interface.WindowsRegistryKeyPathFilter(
'HKEY_CURRENT_USER\\Local Settings\\Software\\Microsoft\\Windows\\'
'Shell\\BagMRU'),
interface.WindowsRegistryKeyPathFilter(
'HKEY_CURRENT_USER\\Local Settings\\Software\\Microsoft\\Windows\\'
'ShellNoRoam\\BagMRU')])
_DEFINITION_FILE = 'mru.yaml'
def _ParseMRUListExEntryValue(
self, parser_mediator, registry_key, entry_number, parent_path_segments,
codepage='cp1252'):
"""Parses the MRUListEx entry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUListEx value.
entry_number (int): entry number.
parent_path_segments (list[str]): parent shell item path segments.
codepage (Optional[str]): extended ASCII string codepage.
Returns:
tuple[str, str]: path and upper path segment of the shell item or
None, None if not available.
"""
value = registry_key.GetValueByName('{0:d}'.format(entry_number))
if value is None:
parser_mediator.ProduceExtractionWarning(
'Missing MRUListEx entry value: {0:d} in key: {1:s}.'.format(
entry_number, registry_key.path))
return None, None
if not value.DataIsBinaryData():
parser_mediator.ProduceExtractionWarning(
'Non-binary MRUListEx entry value: {0:d} in key: {1:s}.'.format(
entry_number, registry_key.path))
return None, None
path = None
upper_path_segment = None
if value.data:
shell_items_parser = shell_items.ShellItemsParser(registry_key.path)
shell_items_parser.ParseByteStream(
parser_mediator, value.data,
parent_path_segments=parent_path_segments, codepage=codepage)
path = shell_items_parser.CopyToPath()
upper_path_segment = shell_items_parser.GetUpperPathSegment()
return path, upper_path_segment
def _ParseMRUListExValue(self, registry_key):
"""Parses the MRUListEx value in a given Registry key.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUListEx value.
Returns:
mrulistex_entries: MRUListEx entries or None if not available.
"""
mrulistex_value = registry_key.GetValueByName('MRUListEx')
# The key exists but does not contain a value named "MRUList".
if not mrulistex_value:
return None
mrulistex_entries_map = self._GetDataTypeMap('mrulistex_entries')
context = dtfabric_data_maps.DataTypeMapContext(values={
'data_size': len(mrulistex_value.data)})
return self._ReadStructureFromByteStream(
mrulistex_value.data, 0, mrulistex_entries_map, context=context)
def _ParseSubKey(
self, parser_mediator, registry_key, parent_path_segments,
codepage='cp1252'):
"""Extract event objects from a MRUListEx Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
parent_path_segments (list[str]): parent shell item path segments.
codepage (Optional[str]): extended ASCII string codepage.
"""
try:
mrulistex = self._ParseMRUListExValue(registry_key)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse MRUListEx value with error: {0!s}'.format(exception))
return
if not mrulistex:
return
entries = []
entry_numbers = {}
found_terminator = False
for entry_index, entry_number in enumerate(mrulistex):
# The MRU list is terminated with -1 (0xffffffff).
if entry_number == -1:
continue
if found_terminator:
parser_mediator.ProduceExtractionWarning((
'found additional MRUListEx entries after terminator in key: '
'{0:s}.').format(registry_key.path))
# Only create one parser error per terminator.
found_terminator = False
path, upper_path_segment = self._ParseMRUListExEntryValue(
parser_mediator, registry_key, entry_number, parent_path_segments,
codepage=codepage)
entry_numbers[entry_number] = upper_path_segment
entry = 'Index: {0:d} [MRU Value {1:d}]: Shell item path: {2:s}'.format(
entry_index + 1, entry_number, path or 'N/A')
entries.append(entry)
event_data = BagMRUEventData()
event_data.entries = ' '.join(entries)
event_data.key_path = registry_key.path
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
for entry_number, path_segment in entry_numbers.items():
sub_key_name = '{0:d}'.format(entry_number)
sub_key = registry_key.GetSubkeyByName(sub_key_name)
if not sub_key:
parser_mediator.ProduceExtractionWarning(
'Missing BagMRU sub key: {0:d} in key: {1:s}.'.format(
entry_number, registry_key.path))
continue
parent_path_segments.append(path_segment)
self._ParseSubKey(
parser_mediator, sub_key, parent_path_segments, codepage=codepage)
parent_path_segments.pop()
# pylint: disable=arguments-differ
def ExtractEvents(
self, parser_mediator, registry_key, codepage='cp1252', **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
codepage (Optional[str]): extended ASCII string codepage.
"""
self._ParseSubKey(parser_mediator, registry_key, [], codepage=codepage)
winreg_parser.WinRegistryParser.RegisterPlugin(BagMRUWindowsRegistryPlugin)
|
|
# pylint: skip-file
from __future__ import absolute_import
import pytest
from collections import OrderedDict
import itertools
import time
from kafka.client_async import KafkaClient
from kafka.consumer.fetcher import (
CompletedFetch, ConsumerRecord, Fetcher, NoOffsetForPartitionError
)
from kafka.consumer.subscription_state import SubscriptionState
from kafka.future import Future
from kafka.metrics import Metrics
from kafka.protocol.fetch import FetchRequest, FetchResponse
from kafka.protocol.offset import OffsetResponse
from kafka.errors import (
StaleMetadata, LeaderNotAvailableError, NotLeaderForPartitionError,
UnknownTopicOrPartitionError, OffsetOutOfRangeError
)
from kafka.record.memory_records import MemoryRecordsBuilder, MemoryRecords
from kafka.structs import OffsetAndMetadata, TopicPartition
@pytest.fixture
def client(mocker):
return mocker.Mock(spec=KafkaClient(bootstrap_servers=(), api_version=(0, 9)))
@pytest.fixture
def subscription_state():
return SubscriptionState()
@pytest.fixture
def topic():
return 'foobar'
@pytest.fixture
def fetcher(client, subscription_state, topic):
subscription_state.subscribe(topics=[topic])
assignment = [TopicPartition(topic, i) for i in range(3)]
subscription_state.assign_from_subscribed(assignment)
for tp in assignment:
subscription_state.seek(tp, 0)
return Fetcher(client, subscription_state, Metrics())
def _build_record_batch(msgs, compression=0):
builder = MemoryRecordsBuilder(
magic=1, compression_type=0, batch_size=9999999)
for msg in msgs:
key, value, timestamp = msg
builder.append(key=key, value=value, timestamp=timestamp, headers=[])
builder.close()
return builder.buffer()
def test_send_fetches(fetcher, topic, mocker):
fetch_requests = [
FetchRequest[0](
-1, fetcher.config['fetch_max_wait_ms'],
fetcher.config['fetch_min_bytes'],
[(topic, [
(0, 0, fetcher.config['max_partition_fetch_bytes']),
(1, 0, fetcher.config['max_partition_fetch_bytes']),
])]),
FetchRequest[0](
-1, fetcher.config['fetch_max_wait_ms'],
fetcher.config['fetch_min_bytes'],
[(topic, [
(2, 0, fetcher.config['max_partition_fetch_bytes']),
])])
]
mocker.patch.object(fetcher, '_create_fetch_requests',
return_value=dict(enumerate(fetch_requests)))
ret = fetcher.send_fetches()
for node, request in enumerate(fetch_requests):
fetcher._client.send.assert_any_call(node, request, wakeup=False)
assert len(ret) == len(fetch_requests)
@pytest.mark.parametrize(("api_version", "fetch_version"), [
((0, 10, 1), 3),
((0, 10, 0), 2),
((0, 9), 1),
((0, 8), 0)
])
def test_create_fetch_requests(fetcher, mocker, api_version, fetch_version):
fetcher._client.in_flight_request_count.return_value = 0
fetcher.config['api_version'] = api_version
by_node = fetcher._create_fetch_requests()
requests = by_node.values()
assert all([isinstance(r, FetchRequest[fetch_version]) for r in requests])
def test_update_fetch_positions(fetcher, topic, mocker):
mocker.patch.object(fetcher, '_reset_offset')
partition = TopicPartition(topic, 0)
# unassigned partition
fetcher.update_fetch_positions([TopicPartition('fizzbuzz', 0)])
assert fetcher._reset_offset.call_count == 0
# fetchable partition (has offset, not paused)
fetcher.update_fetch_positions([partition])
assert fetcher._reset_offset.call_count == 0
# partition needs reset, no committed offset
fetcher._subscriptions.need_offset_reset(partition)
fetcher._subscriptions.assignment[partition].awaiting_reset = False
fetcher.update_fetch_positions([partition])
fetcher._reset_offset.assert_called_with(partition)
assert fetcher._subscriptions.assignment[partition].awaiting_reset is True
fetcher.update_fetch_positions([partition])
fetcher._reset_offset.assert_called_with(partition)
# partition needs reset, has committed offset
fetcher._reset_offset.reset_mock()
fetcher._subscriptions.need_offset_reset(partition)
fetcher._subscriptions.assignment[partition].awaiting_reset = False
fetcher._subscriptions.assignment[partition].committed = OffsetAndMetadata(123, b'')
mocker.patch.object(fetcher._subscriptions, 'seek')
fetcher.update_fetch_positions([partition])
assert fetcher._reset_offset.call_count == 0
fetcher._subscriptions.seek.assert_called_with(partition, 123)
def test__reset_offset(fetcher, mocker):
tp = TopicPartition("topic", 0)
fetcher._subscriptions.subscribe(topics="topic")
fetcher._subscriptions.assign_from_subscribed([tp])
fetcher._subscriptions.need_offset_reset(tp)
mocked = mocker.patch.object(fetcher, '_retrieve_offsets')
mocked.return_value = {tp: (1001, None)}
fetcher._reset_offset(tp)
assert not fetcher._subscriptions.assignment[tp].awaiting_reset
assert fetcher._subscriptions.assignment[tp].position == 1001
def test__send_offset_requests(fetcher, mocker):
tp = TopicPartition("topic_send_offset", 1)
mocked_send = mocker.patch.object(fetcher, "_send_offset_request")
send_futures = []
def send_side_effect(*args, **kw):
f = Future()
send_futures.append(f)
return f
mocked_send.side_effect = send_side_effect
mocked_leader = mocker.patch.object(
fetcher._client.cluster, "leader_for_partition")
# First we report unavailable leader 2 times different ways and later
# always as available
mocked_leader.side_effect = itertools.chain(
[None, -1], itertools.cycle([0]))
# Leader == None
fut = fetcher._send_offset_requests({tp: 0})
assert fut.failed()
assert isinstance(fut.exception, StaleMetadata)
assert not mocked_send.called
# Leader == -1
fut = fetcher._send_offset_requests({tp: 0})
assert fut.failed()
assert isinstance(fut.exception, LeaderNotAvailableError)
assert not mocked_send.called
# Leader == 0, send failed
fut = fetcher._send_offset_requests({tp: 0})
assert not fut.is_done
assert mocked_send.called
# Check that we bound the futures correctly to chain failure
send_futures.pop().failure(NotLeaderForPartitionError(tp))
assert fut.failed()
assert isinstance(fut.exception, NotLeaderForPartitionError)
# Leader == 0, send success
fut = fetcher._send_offset_requests({tp: 0})
assert not fut.is_done
assert mocked_send.called
# Check that we bound the futures correctly to chain success
send_futures.pop().success({tp: (10, 10000)})
assert fut.succeeded()
assert fut.value == {tp: (10, 10000)}
def test__send_offset_requests_multiple_nodes(fetcher, mocker):
tp1 = TopicPartition("topic_send_offset", 1)
tp2 = TopicPartition("topic_send_offset", 2)
tp3 = TopicPartition("topic_send_offset", 3)
tp4 = TopicPartition("topic_send_offset", 4)
mocked_send = mocker.patch.object(fetcher, "_send_offset_request")
send_futures = []
def send_side_effect(node_id, timestamps):
f = Future()
send_futures.append((node_id, timestamps, f))
return f
mocked_send.side_effect = send_side_effect
mocked_leader = mocker.patch.object(
fetcher._client.cluster, "leader_for_partition")
mocked_leader.side_effect = itertools.cycle([0, 1])
# -- All node succeeded case
tss = OrderedDict([(tp1, 0), (tp2, 0), (tp3, 0), (tp4, 0)])
fut = fetcher._send_offset_requests(tss)
assert not fut.is_done
assert mocked_send.call_count == 2
req_by_node = {}
second_future = None
for node, timestamps, f in send_futures:
req_by_node[node] = timestamps
if node == 0:
# Say tp3 does not have any messages so it's missing
f.success({tp1: (11, 1001)})
else:
second_future = f
assert req_by_node == {
0: {tp1: 0, tp3: 0},
1: {tp2: 0, tp4: 0}
}
# We only resolved 1 future so far, so result future is not yet ready
assert not fut.is_done
second_future.success({tp2: (12, 1002), tp4: (14, 1004)})
assert fut.succeeded()
assert fut.value == {tp1: (11, 1001), tp2: (12, 1002), tp4: (14, 1004)}
# -- First succeeded second not
del send_futures[:]
fut = fetcher._send_offset_requests(tss)
assert len(send_futures) == 2
send_futures[0][2].success({tp1: (11, 1001)})
send_futures[1][2].failure(UnknownTopicOrPartitionError(tp1))
assert fut.failed()
assert isinstance(fut.exception, UnknownTopicOrPartitionError)
# -- First fails second succeeded
del send_futures[:]
fut = fetcher._send_offset_requests(tss)
assert len(send_futures) == 2
send_futures[0][2].failure(UnknownTopicOrPartitionError(tp1))
send_futures[1][2].success({tp1: (11, 1001)})
assert fut.failed()
assert isinstance(fut.exception, UnknownTopicOrPartitionError)
def test__handle_offset_response(fetcher, mocker):
# Broker returns UnsupportedForMessageFormatError, will omit partition
fut = Future()
res = OffsetResponse[1]([
("topic", [(0, 43, -1, -1)]),
("topic", [(1, 0, 1000, 9999)])
])
fetcher._handle_offset_response(fut, res)
assert fut.succeeded()
assert fut.value == {TopicPartition("topic", 1): (9999, 1000)}
# Broker returns NotLeaderForPartitionError
fut = Future()
res = OffsetResponse[1]([
("topic", [(0, 6, -1, -1)]),
])
fetcher._handle_offset_response(fut, res)
assert fut.failed()
assert isinstance(fut.exception, NotLeaderForPartitionError)
# Broker returns UnknownTopicOrPartitionError
fut = Future()
res = OffsetResponse[1]([
("topic", [(0, 3, -1, -1)]),
])
fetcher._handle_offset_response(fut, res)
assert fut.failed()
assert isinstance(fut.exception, UnknownTopicOrPartitionError)
# Broker returns many errors and 1 result
# Will fail on 1st error and return
fut = Future()
res = OffsetResponse[1]([
("topic", [(0, 43, -1, -1)]),
("topic", [(1, 6, -1, -1)]),
("topic", [(2, 3, -1, -1)]),
("topic", [(3, 0, 1000, 9999)])
])
fetcher._handle_offset_response(fut, res)
assert fut.failed()
assert isinstance(fut.exception, NotLeaderForPartitionError)
def test_fetched_records(fetcher, topic, mocker):
fetcher.config['check_crcs'] = False
tp = TopicPartition(topic, 0)
msgs = []
for i in range(10):
msgs.append((None, b"foo", None))
completed_fetch = CompletedFetch(
tp, 0, 0, [0, 100, _build_record_batch(msgs)],
mocker.MagicMock()
)
fetcher._completed_fetches.append(completed_fetch)
records, partial = fetcher.fetched_records()
assert tp in records
assert len(records[tp]) == len(msgs)
assert all(map(lambda x: isinstance(x, ConsumerRecord), records[tp]))
assert partial is False
@pytest.mark.parametrize(("fetch_request", "fetch_response", "num_partitions"), [
(
FetchRequest[0](
-1, 100, 100,
[('foo', [(0, 0, 1000),])]),
FetchResponse[0](
[("foo", [(0, 0, 1000, [(0, b'xxx'),])]),]),
1,
),
(
FetchRequest[1](
-1, 100, 100,
[('foo', [(0, 0, 1000), (1, 0, 1000),])]),
FetchResponse[1](
0,
[("foo", [
(0, 0, 1000, [(0, b'xxx'),]),
(1, 0, 1000, [(0, b'xxx'),]),
]),]),
2,
),
(
FetchRequest[2](
-1, 100, 100,
[('foo', [(0, 0, 1000),])]),
FetchResponse[2](
0, [("foo", [(0, 0, 1000, [(0, b'xxx'),])]),]),
1,
),
(
FetchRequest[3](
-1, 100, 100, 10000,
[('foo', [(0, 0, 1000),])]),
FetchResponse[3](
0, [("foo", [(0, 0, 1000, [(0, b'xxx'),])]),]),
1,
),
(
FetchRequest[4](
-1, 100, 100, 10000, 0,
[('foo', [(0, 0, 1000),])]),
FetchResponse[4](
0, [("foo", [(0, 0, 1000, 0, [], [(0, b'xxx'),])]),]),
1,
),
(
# This may only be used in broker-broker api calls
FetchRequest[5](
-1, 100, 100, 10000, 0,
[('foo', [(0, 0, 1000),])]),
FetchResponse[5](
0, [("foo", [(0, 0, 1000, 0, 0, [], [(0, b'xxx'),])]),]),
1,
),
])
def test__handle_fetch_response(fetcher, fetch_request, fetch_response, num_partitions):
fetcher._handle_fetch_response(fetch_request, time.time(), fetch_response)
assert len(fetcher._completed_fetches) == num_partitions
def test__unpack_message_set(fetcher):
fetcher.config['check_crcs'] = False
tp = TopicPartition('foo', 0)
messages = [
(None, b"a", None),
(None, b"b", None),
(None, b"c", None),
]
memory_records = MemoryRecords(_build_record_batch(messages))
records = list(fetcher._unpack_message_set(tp, memory_records))
assert len(records) == 3
assert all(map(lambda x: isinstance(x, ConsumerRecord), records))
assert records[0].value == b'a'
assert records[1].value == b'b'
assert records[2].value == b'c'
assert records[0].offset == 0
assert records[1].offset == 1
assert records[2].offset == 2
def test__message_generator(fetcher, topic, mocker):
fetcher.config['check_crcs'] = False
tp = TopicPartition(topic, 0)
msgs = []
for i in range(10):
msgs.append((None, b"foo", None))
completed_fetch = CompletedFetch(
tp, 0, 0, [0, 100, _build_record_batch(msgs)],
mocker.MagicMock()
)
fetcher._completed_fetches.append(completed_fetch)
for i in range(10):
msg = next(fetcher)
assert isinstance(msg, ConsumerRecord)
assert msg.offset == i
assert msg.value == b'foo'
def test__parse_fetched_data(fetcher, topic, mocker):
fetcher.config['check_crcs'] = False
tp = TopicPartition(topic, 0)
msgs = []
for i in range(10):
msgs.append((None, b"foo", None))
completed_fetch = CompletedFetch(
tp, 0, 0, [0, 100, _build_record_batch(msgs)],
mocker.MagicMock()
)
partition_record = fetcher._parse_fetched_data(completed_fetch)
assert isinstance(partition_record, fetcher.PartitionRecords)
assert len(partition_record) == 10
def test__parse_fetched_data__paused(fetcher, topic, mocker):
fetcher.config['check_crcs'] = False
tp = TopicPartition(topic, 0)
msgs = []
for i in range(10):
msgs.append((None, b"foo", None))
completed_fetch = CompletedFetch(
tp, 0, 0, [0, 100, _build_record_batch(msgs)],
mocker.MagicMock()
)
fetcher._subscriptions.pause(tp)
partition_record = fetcher._parse_fetched_data(completed_fetch)
assert partition_record is None
def test__parse_fetched_data__stale_offset(fetcher, topic, mocker):
fetcher.config['check_crcs'] = False
tp = TopicPartition(topic, 0)
msgs = []
for i in range(10):
msgs.append((None, b"foo", None))
completed_fetch = CompletedFetch(
tp, 10, 0, [0, 100, _build_record_batch(msgs)],
mocker.MagicMock()
)
partition_record = fetcher._parse_fetched_data(completed_fetch)
assert partition_record is None
def test__parse_fetched_data__not_leader(fetcher, topic, mocker):
fetcher.config['check_crcs'] = False
tp = TopicPartition(topic, 0)
completed_fetch = CompletedFetch(
tp, 0, 0, [NotLeaderForPartitionError.errno, -1, None],
mocker.MagicMock()
)
partition_record = fetcher._parse_fetched_data(completed_fetch)
assert partition_record is None
fetcher._client.cluster.request_update.assert_called_with()
def test__parse_fetched_data__unknown_tp(fetcher, topic, mocker):
fetcher.config['check_crcs'] = False
tp = TopicPartition(topic, 0)
completed_fetch = CompletedFetch(
tp, 0, 0, [UnknownTopicOrPartitionError.errno, -1, None],
mocker.MagicMock()
)
partition_record = fetcher._parse_fetched_data(completed_fetch)
assert partition_record is None
fetcher._client.cluster.request_update.assert_called_with()
def test__parse_fetched_data__out_of_range(fetcher, topic, mocker):
fetcher.config['check_crcs'] = False
tp = TopicPartition(topic, 0)
completed_fetch = CompletedFetch(
tp, 0, 0, [OffsetOutOfRangeError.errno, -1, None],
mocker.MagicMock()
)
partition_record = fetcher._parse_fetched_data(completed_fetch)
assert partition_record is None
assert fetcher._subscriptions.assignment[tp].awaiting_reset is True
def test_partition_records_offset():
"""Test that compressed messagesets are handle correctly
when fetch offset is in the middle of the message list
"""
batch_start = 120
batch_end = 130
fetch_offset = 123
tp = TopicPartition('foo', 0)
messages = [ConsumerRecord(tp.topic, tp.partition, i,
None, None, 'key', 'value', [], 'checksum', 0, 0, -1)
for i in range(batch_start, batch_end)]
records = Fetcher.PartitionRecords(fetch_offset, None, messages)
assert len(records) > 0
msgs = records.take(1)
assert msgs[0].offset == fetch_offset
assert records.fetch_offset == fetch_offset + 1
msgs = records.take(2)
assert len(msgs) == 2
assert len(records) > 0
records.discard()
assert len(records) == 0
def test_partition_records_empty():
records = Fetcher.PartitionRecords(0, None, [])
assert len(records) == 0
def test_partition_records_no_fetch_offset():
batch_start = 0
batch_end = 100
fetch_offset = 123
tp = TopicPartition('foo', 0)
messages = [ConsumerRecord(tp.topic, tp.partition, i,
None, None, 'key', 'value', None, 'checksum', 0, 0, -1)
for i in range(batch_start, batch_end)]
records = Fetcher.PartitionRecords(fetch_offset, None, messages)
assert len(records) == 0
def test_partition_records_compacted_offset():
"""Test that messagesets are handle correctly
when the fetch offset points to a message that has been compacted
"""
batch_start = 0
batch_end = 100
fetch_offset = 42
tp = TopicPartition('foo', 0)
messages = [ConsumerRecord(tp.topic, tp.partition, i,
None, None, 'key', 'value', None, 'checksum', 0, 0, -1)
for i in range(batch_start, batch_end) if i != fetch_offset]
records = Fetcher.PartitionRecords(fetch_offset, None, messages)
assert len(records) == batch_end - fetch_offset - 1
msgs = records.take(1)
assert msgs[0].offset == fetch_offset + 1
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import operator
from django.template.defaultfilters import filesizeformat # noqa
from django.utils.text import normalize_newlines # noqa
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from django.views.decorators.debug import sensitive_variables # noqa
from horizon import exceptions
from horizon import forms
from horizon.utils import functions
from horizon.utils import validators
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.api import base
from openstack_dashboard.api import cinder
from openstack_dashboard.api import nova
from openstack_dashboard.usage import quotas
from openstack_dashboard.dashboards.project.images \
import utils as image_utils
from openstack_dashboard.dashboards.project.instances \
import utils as instance_utils
LOG = logging.getLogger(__name__)
class SelectProjectUserAction(workflows.Action):
project_id = forms.ChoiceField(label=_("Project"))
user_id = forms.ChoiceField(label=_("User"))
def __init__(self, request, *args, **kwargs):
super(SelectProjectUserAction, self).__init__(request, *args, **kwargs)
# Set our project choices
projects = [(tenant.id, tenant.name)
for tenant in request.user.authorized_tenants]
self.fields['project_id'].choices = projects
# Set our user options
users = [(request.user.id, request.user.username)]
self.fields['user_id'].choices = users
class Meta:
name = _("Project & User")
# Unusable permission so this is always hidden. However, we
# keep this step in the workflow for validation/verification purposes.
permissions = ("!",)
class SelectProjectUser(workflows.Step):
action_class = SelectProjectUserAction
contributes = ("project_id", "user_id")
class SetInstanceDetailsAction(workflows.Action):
availability_zone = forms.ChoiceField(label=_("Availability Zone"),
required=False)
name = forms.CharField(label=_("Instance Name"),
max_length=255)
flavor = forms.ChoiceField(label=_("Flavor"),
help_text=_("Size of image to launch."))
count = forms.IntegerField(label=_("Instance Count"),
min_value=1,
initial=1,
help_text=_("Number of instances to launch."))
source_type = forms.ChoiceField(label=_("Instance Boot Source"),
help_text=_("Choose Your Boot Source "
"Type."))
instance_snapshot_id = forms.ChoiceField(label=_("Instance Snapshot"),
required=False)
volume_id = forms.ChoiceField(label=_("Volume"), required=False)
volume_snapshot_id = forms.ChoiceField(label=_("Volume Snapshot"),
required=False)
image_id = forms.ChoiceField(
label=_("Image Name"),
required=False,
widget=forms.SelectWidget(
data_attrs=('volume_size',),
transform=lambda x: ("%s (%s)" % (x.name,
filesizeformat(x.bytes)))))
volume_size = forms.IntegerField(label=_("Device size (GB)"),
initial=1,
min_value=0,
required=False,
help_text=_("Volume size in gigabytes "
"(integer value)."))
device_name = forms.CharField(label=_("Device Name"),
required=False,
initial="vda",
help_text=_("Volume mount point (e.g. 'vda' "
"mounts at '/dev/vda'). Leave "
"this field blank to let the "
"system choose a device name "
"for you."))
delete_on_terminate = forms.BooleanField(label=_("Delete on Terminate"),
initial=False,
required=False,
help_text=_("Delete volume on "
"instance terminate"))
class Meta:
name = _("Details")
help_text_template = ("project/instances/"
"_launch_details_help.html")
def __init__(self, request, context, *args, **kwargs):
self._init_images_cache()
self.request = request
self.context = context
super(SetInstanceDetailsAction, self).__init__(
request, context, *args, **kwargs)
# Hide the device field if the hypervisor doesn't support it.
if not nova.can_set_mount_point():
self.fields['device_name'].widget = forms.widgets.HiddenInput()
source_type_choices = [
('', _("Select source")),
("image_id", _("Boot from image")),
("instance_snapshot_id", _("Boot from snapshot")),
]
if base.is_service_enabled(request, 'volume'):
source_type_choices.append(("volume_id", _("Boot from volume")))
try:
if api.nova.extension_supported("BlockDeviceMappingV2Boot",
request):
source_type_choices.append(
("volume_image_id",
_("Boot from image (creates a new volume)")))
except Exception:
exceptions.handle(request, _('Unable to retrieve extensions '
'information.'))
source_type_choices.append(
("volume_snapshot_id",
_("Boot from volume snapshot (creates a new volume)")))
self.fields['source_type'].choices = source_type_choices
def clean(self):
cleaned_data = super(SetInstanceDetailsAction, self).clean()
count = cleaned_data.get('count', 1)
# Prevent launching more instances than the quota allows
usages = quotas.tenant_quota_usages(self.request)
available_count = usages['instances']['available']
if available_count < count:
error_message = ungettext_lazy('The requested instance '
'cannot be launched as you only '
'have %(avail)i of your quota '
'available. ',
'The requested %(req)i instances '
'cannot be launched as you only '
'have %(avail)i of your quota '
'available.',
count)
params = {'req': count,
'avail': available_count}
raise forms.ValidationError(error_message % params)
try:
flavor_id = cleaned_data.get('flavor')
# We want to retrieve details for a given flavor,
# however flavor_list uses a memoized decorator
# so it is used instead of flavor_get to reduce the number
# of API calls.
flavors = instance_utils.flavor_list(self.request)
flavor = [x for x in flavors if x.id == flavor_id][0]
except IndexError:
flavor = None
count_error = []
# Validate cores and ram.
available_cores = usages['cores']['available']
if flavor and available_cores < count * flavor.vcpus:
count_error.append(_("Cores(Available: %(avail)s, "
"Requested: %(req)s)")
% {'avail': available_cores,
'req': count * flavor.vcpus})
available_ram = usages['ram']['available']
if flavor and available_ram < count * flavor.ram:
count_error.append(_("RAM(Available: %(avail)s, "
"Requested: %(req)s)")
% {'avail': available_ram,
'req': count * flavor.ram})
if count_error:
value_str = ", ".join(count_error)
msg = (_('The requested instance cannot be launched. '
'The following requested resource(s) exceed '
'quota(s): %s.') % value_str)
if count == 1:
self._errors['flavor'] = self.error_class([msg])
else:
self._errors['count'] = self.error_class([msg])
# Validate our instance source.
source_type = self.data.get('source_type', None)
if source_type in ('image_id', 'volume_image_id'):
if source_type == 'volume_image_id':
volume_size = self.data.get('volume_size', None)
if not volume_size:
msg = _("You must set volume size")
self._errors['volume_size'] = self.error_class([msg])
if float(volume_size) <= 0:
msg = _("Volume size must be greater than 0")
self._errors['volume_size'] = self.error_class([msg])
if not cleaned_data.get('image_id'):
msg = _("You must select an image.")
self._errors['image_id'] = self.error_class([msg])
else:
# Prevents trying to launch an image needing more resources.
try:
image_id = cleaned_data.get('image_id')
# We want to retrieve details for a given image,
# however get_available_images uses a cache of image list,
# so it is used instead of image_get to reduce the number
# of API calls.
images = image_utils.get_available_images(
self.request,
self.context.get('project_id'),
self._images_cache)
image = [x for x in images if x.id == image_id][0]
except IndexError:
image = None
if image and flavor:
props_mapping = (("min_ram", "ram"), ("min_disk", "disk"))
for iprop, fprop in props_mapping:
if getattr(image, iprop) > 0 and \
getattr(image, iprop) > getattr(flavor, fprop):
msg = (_("The flavor '%(flavor)s' is too small "
"for requested image.\n"
"Minimum requirements: "
"%(min_ram)s MB of RAM and "
"%(min_disk)s GB of Root Disk.") %
{'flavor': flavor.name,
'min_ram': image.min_ram,
'min_disk': image.min_disk})
self._errors['image_id'] = self.error_class([msg])
break # Not necessary to continue the tests.
volume_size = cleaned_data.get('volume_size')
if volume_size and source_type == 'volume_image_id':
volume_size = int(volume_size)
img_gigs = functions.bytes_to_gigabytes(image.size)
smallest_size = max(img_gigs, image.min_disk)
if volume_size < smallest_size:
msg = (_("The Volume size is too small for the"
" '%(image_name)s' image and has to be"
" greater than or equal to "
"'%(smallest_size)d' GB.") %
{'image_name': image.name,
'smallest_size': smallest_size})
self._errors['volume_size'] = self.error_class(
[msg])
elif source_type == 'instance_snapshot_id':
if not cleaned_data['instance_snapshot_id']:
msg = _("You must select a snapshot.")
self._errors['instance_snapshot_id'] = self.error_class([msg])
elif source_type == 'volume_id':
if not cleaned_data.get('volume_id'):
msg = _("You must select a volume.")
self._errors['volume_id'] = self.error_class([msg])
# Prevent launching multiple instances with the same volume.
# TODO(gabriel): is it safe to launch multiple instances with
# a snapshot since it should be cloned to new volumes?
if count > 1:
msg = _('Launching multiple instances is only supported for '
'images and instance snapshots.')
raise forms.ValidationError(msg)
elif source_type == 'volume_snapshot_id':
if not cleaned_data.get('volume_snapshot_id'):
msg = _("You must select a snapshot.")
self._errors['volume_snapshot_id'] = self.error_class([msg])
return cleaned_data
def populate_flavor_choices(self, request, context):
flavors = instance_utils.flavor_list(request)
if flavors:
return instance_utils.sort_flavor_list(request, flavors)
return []
def populate_availability_zone_choices(self, request, context):
try:
zones = api.nova.availability_zone_list(request)
except Exception:
zones = []
exceptions.handle(request,
_('Unable to retrieve availability zones.'))
zone_list = [(zone.zoneName, zone.zoneName)
for zone in zones if zone.zoneState['available']]
zone_list.sort()
if not zone_list:
zone_list.insert(0, ("", _("No availability zones found")))
elif len(zone_list) > 1:
zone_list.insert(0, ("", _("Any Availability Zone")))
return zone_list
def get_help_text(self, extra_context=None):
extra = extra_context or {}
try:
extra['usages'] = api.nova.tenant_absolute_limits(self.request)
extra['usages_json'] = json.dumps(extra['usages'])
flavors = json.dumps([f._info for f in
instance_utils.flavor_list(self.request)])
extra['flavors'] = flavors
images = image_utils.get_available_images(
self.request, self.initial['project_id'], self._images_cache)
if images is not None:
attrs = [{'id': i.id,
'min_disk': getattr(i, 'min_disk', 0),
'min_ram': getattr(i, 'min_ram', 0)}
for i in images]
extra['images'] = json.dumps(attrs)
except Exception:
exceptions.handle(self.request,
_("Unable to retrieve quota information."))
return super(SetInstanceDetailsAction, self).get_help_text(extra)
def _init_images_cache(self):
if not hasattr(self, '_images_cache'):
self._images_cache = {}
def _get_volume_display_name(self, volume):
if hasattr(volume, "volume_id"):
vol_type = "snap"
visible_label = _("Snapshot")
else:
vol_type = "vol"
visible_label = _("Volume")
return (("%s:%s" % (volume.id, vol_type)),
(_("%(name)s - %(size)s GB (%(label)s)") %
{'name': volume.name,
'size': volume.size,
'label': visible_label}))
def populate_image_id_choices(self, request, context):
choices = []
images = image_utils.get_available_images(request,
context.get('project_id'),
self._images_cache)
for image in images:
image.bytes = image.size
image.volume_size = max(
image.min_disk, functions.bytes_to_gigabytes(image.bytes))
choices.append((image.id, image))
if context.get('image_id') == image.id and \
'volume_size' not in context:
context['volume_size'] = image.volume_size
if choices:
choices.sort(key=lambda c: c[1].name)
choices.insert(0, ("", _("Select Image")))
else:
choices.insert(0, ("", _("No images available")))
return choices
def populate_instance_snapshot_id_choices(self, request, context):
images = image_utils.get_available_images(request,
context.get('project_id'),
self._images_cache)
choices = [(image.id, image.name)
for image in images
if image.properties.get("image_type", '') == "snapshot"]
if choices:
choices.sort(key=operator.itemgetter(1))
choices.insert(0, ("", _("Select Instance Snapshot")))
else:
choices.insert(0, ("", _("No snapshots available")))
return choices
def populate_volume_id_choices(self, request, context):
try:
volumes = [self._get_volume_display_name(v)
for v in cinder.volume_list(self.request)
if (v.status == api.cinder.VOLUME_STATE_AVAILABLE
and v.bootable == 'true')]
except Exception:
volumes = []
exceptions.handle(self.request,
_('Unable to retrieve list of volumes.'))
if volumes:
volumes.insert(0, ("", _("Select Volume")))
else:
volumes.insert(0, ("", _("No volumes available")))
return volumes
def populate_volume_snapshot_id_choices(self, request, context):
try:
snapshots = cinder.volume_snapshot_list(self.request)
snapshots = [self._get_volume_display_name(s) for s in snapshots
if s.status == api.cinder.VOLUME_STATE_AVAILABLE]
except Exception:
snapshots = []
exceptions.handle(self.request,
_('Unable to retrieve list of volume '
'snapshots.'))
if snapshots:
snapshots.insert(0, ("", _("Select Volume Snapshot")))
else:
snapshots.insert(0, ("", _("No volume snapshots available")))
return snapshots
class SetInstanceDetails(workflows.Step):
action_class = SetInstanceDetailsAction
depends_on = ("project_id", "user_id")
contributes = ("source_type", "source_id",
"availability_zone", "name", "count", "flavor",
"device_name", # Can be None for an image.
"delete_on_terminate")
def prepare_action_context(self, request, context):
if 'source_type' in context and 'source_id' in context:
context[context['source_type']] = context['source_id']
return context
def contribute(self, data, context):
context = super(SetInstanceDetails, self).contribute(data, context)
# Allow setting the source dynamically.
if ("source_type" in context and "source_id" in context
and context["source_type"] not in context):
context[context["source_type"]] = context["source_id"]
# Translate form input to context for source values.
if "source_type" in data:
if data["source_type"] in ["image_id", "volume_image_id"]:
context["source_id"] = data.get("image_id", None)
else:
context["source_id"] = data.get(data["source_type"], None)
if "volume_size" in data:
context["volume_size"] = data["volume_size"]
return context
KEYPAIR_IMPORT_URL = "horizon:project:access_and_security:keypairs:import"
class SetAccessControlsAction(workflows.Action):
keypair = forms.DynamicChoiceField(label=_("Key Pair"),
required=False,
help_text=_("Key pair to use for "
"authentication."),
add_item_link=KEYPAIR_IMPORT_URL)
admin_pass = forms.RegexField(
label=_("Admin Pass"),
required=False,
widget=forms.PasswordInput(render_value=False),
regex=validators.password_validator(),
error_messages={'invalid': validators.password_validator_msg()})
confirm_admin_pass = forms.CharField(
label=_("Confirm Admin Pass"),
required=False,
widget=forms.PasswordInput(render_value=False))
groups = forms.MultipleChoiceField(label=_("Security Groups"),
initial=["default"],
widget=forms.CheckboxSelectMultiple(),
help_text=_("Launch instance in these "
"security groups."))
class Meta:
name = _("Access & Security")
help_text = _("Control access to your instance via key pairs, "
"security groups, and other mechanisms.")
def __init__(self, request, *args, **kwargs):
super(SetAccessControlsAction, self).__init__(request, *args, **kwargs)
if not api.nova.can_set_server_password():
del self.fields['admin_pass']
del self.fields['confirm_admin_pass']
def populate_keypair_choices(self, request, context):
try:
keypairs = api.nova.keypair_list(request)
keypair_list = [(kp.name, kp.name) for kp in keypairs]
except Exception:
keypair_list = []
exceptions.handle(request,
_('Unable to retrieve key pairs.'))
if keypair_list:
if len(keypair_list) == 1:
self.fields['keypair'].initial = keypair_list[0][0]
keypair_list.insert(0, ("", _("Select a key pair")))
else:
keypair_list = (("", _("No key pairs available")),)
return keypair_list
def populate_groups_choices(self, request, context):
try:
groups = api.network.security_group_list(request)
security_group_list = [(sg.name, sg.name) for sg in groups]
except Exception:
exceptions.handle(request,
_('Unable to retrieve list of security groups'))
security_group_list = []
return security_group_list
def clean(self):
'''Check to make sure password fields match.'''
cleaned_data = super(SetAccessControlsAction, self).clean()
if 'admin_pass' in cleaned_data:
if cleaned_data['admin_pass'] != cleaned_data.get(
'confirm_admin_pass', None):
raise forms.ValidationError(_('Passwords do not match.'))
return cleaned_data
class SetAccessControls(workflows.Step):
action_class = SetAccessControlsAction
depends_on = ("project_id", "user_id")
contributes = ("keypair_id", "security_group_ids",
"admin_pass", "confirm_admin_pass")
def contribute(self, data, context):
if data:
post = self.workflow.request.POST
context['security_group_ids'] = post.getlist("groups")
context['keypair_id'] = data.get("keypair", "")
context['admin_pass'] = data.get("admin_pass", "")
context['confirm_admin_pass'] = data.get("confirm_admin_pass", "")
return context
class CustomizeAction(workflows.Action):
class Meta:
name = _("Post-Creation")
help_text_template = ("project/instances/"
"_launch_customize_help.html")
source_choices = [('raw', _('Direct Input')),
('file', _('File'))]
attributes = {'class': 'switchable', 'data-slug': 'scriptsource'}
script_source = forms.ChoiceField(label=_('Customization Script Source'),
choices=source_choices,
widget=forms.Select(attrs=attributes))
script_help = _("A script or set of commands to be executed after the "
"instance has been built (max 16kb).")
script_upload = forms.FileField(
label=_('Script File'),
help_text=script_help,
widget=forms.FileInput(attrs={
'class': 'switched',
'data-switch-on': 'scriptsource',
'data-scriptsource-file': _('Script File')}),
required=False)
script_data = forms.CharField(
label=_('Script Data'),
help_text=script_help,
widget=forms.widgets.Textarea(attrs={
'class': 'switched',
'data-switch-on': 'scriptsource',
'data-scriptsource-raw': _('Script Data')}),
required=False)
def __init__(self, *args):
super(CustomizeAction, self).__init__(*args)
def clean(self):
cleaned = super(CustomizeAction, self).clean()
files = self.request.FILES
script = self.clean_uploaded_files('script', files)
if script is not None:
cleaned['script_data'] = script
return cleaned
def clean_uploaded_files(self, prefix, files):
upload_str = prefix + "_upload"
has_upload = upload_str in files
if has_upload:
upload_file = files[upload_str]
log_script_name = upload_file.name
LOG.info('got upload %s' % log_script_name)
if upload_file._size > 16 * 1024: # 16kb
msg = _('File exceeds maximum size (16kb)')
raise forms.ValidationError(msg)
else:
script = upload_file.read()
if script != "":
try:
normalize_newlines(script)
except Exception as e:
msg = _('There was a problem parsing the'
' %(prefix)s: %(error)s')
msg = msg % {'prefix': prefix, 'error': e}
raise forms.ValidationError(msg)
return script
else:
return None
class PostCreationStep(workflows.Step):
action_class = CustomizeAction
contributes = ("script_data",)
class SetNetworkAction(workflows.Action):
network = forms.MultipleChoiceField(label=_("Networks"),
widget=forms.CheckboxSelectMultiple(),
error_messages={
'required': _(
"At least one network must"
" be specified.")},
help_text=_("Launch instance with"
" these networks"))
if api.neutron.is_port_profiles_supported():
widget = None
else:
widget = forms.HiddenInput()
profile = forms.ChoiceField(label=_("Policy Profiles"),
required=False,
widget=widget,
help_text=_("Launch instance with "
"this policy profile"))
def __init__(self, request, *args, **kwargs):
super(SetNetworkAction, self).__init__(request, *args, **kwargs)
network_list = self.fields["network"].choices
if len(network_list) == 1:
self.fields['network'].initial = [network_list[0][0]]
if api.neutron.is_port_profiles_supported():
self.fields['profile'].choices = (
self.get_policy_profile_choices(request))
class Meta:
name = _("Networking")
permissions = ('openstack.services.network',)
help_text = _("Select networks for your instance.")
def populate_network_choices(self, request, context):
network_list = []
try:
tenant_id = self.request.user.tenant_id
networks = api.neutron.network_list_for_tenant(request, tenant_id)
for n in networks:
n.set_id_as_name_if_empty()
network_list.append((n.id, n.name))
sorted(network_list, key=lambda obj: obj[1])
except Exception:
exceptions.handle(request,
_('Unable to retrieve networks.'))
return network_list
def get_policy_profile_choices(self, request):
profile_choices = [('', _("Select a profile"))]
for profile in self._get_profiles(request, 'policy'):
profile_choices.append((profile.id, profile.name))
return profile_choices
def _get_profiles(self, request, type_p):
profiles = []
try:
profiles = api.neutron.profile_list(request, type_p)
except Exception:
msg = _('Network Profiles could not be retrieved.')
exceptions.handle(request, msg)
return profiles
class SetNetwork(workflows.Step):
action_class = SetNetworkAction
# Disabling the template drag/drop only in the case port profiles
# are used till the issue with the drag/drop affecting the
# profile_id detection is fixed.
if api.neutron.is_port_profiles_supported():
contributes = ("network_id", "profile_id",)
else:
template_name = "project/instances/_update_networks.html"
contributes = ("network_id",)
def contribute(self, data, context):
if data:
networks = self.workflow.request.POST.getlist("network")
# If no networks are explicitly specified, network list
# contains an empty string, so remove it.
networks = [n for n in networks if n != '']
if networks:
context['network_id'] = networks
if api.neutron.is_port_profiles_supported():
context['profile_id'] = data.get('profile', None)
return context
class SetAdvancedAction(workflows.Action):
disk_config = forms.ChoiceField(
label=_("Disk Partition"), required=False,
help_text=_("Automatic: The entire disk is a single partition and "
"automatically resizes. Manual: Results in faster build "
"times but requires manual partitioning."))
config_drive = forms.BooleanField(
label=_("Configuration Drive"),
required=False, help_text=_("Configure OpenStack to write metadata to "
"a special configuration drive that "
"attaches to the instance when it boots."))
def __init__(self, request, context, *args, **kwargs):
super(SetAdvancedAction, self).__init__(request, context,
*args, **kwargs)
try:
if not api.nova.extension_supported("DiskConfig", request):
del self.fields['disk_config']
else:
# Set our disk_config choices
config_choices = [("AUTO", _("Automatic")),
("MANUAL", _("Manual"))]
self.fields['disk_config'].choices = config_choices
# Only show the Config Drive option for the Launch Instance
# workflow (not Resize Instance) and only if the extension
# is supported.
if context.get('workflow_slug') != 'launch_instance' or (
not api.nova.extension_supported("ConfigDrive", request)):
del self.fields['config_drive']
except Exception:
exceptions.handle(request, _('Unable to retrieve extensions '
'information.'))
class Meta:
name = _("Advanced Options")
help_text_template = ("project/instances/"
"_launch_advanced_help.html")
class SetAdvanced(workflows.Step):
action_class = SetAdvancedAction
contributes = ("disk_config", "config_drive",)
def prepare_action_context(self, request, context):
context = super(SetAdvanced, self).prepare_action_context(request,
context)
# Add the workflow slug to the context so that we can tell which
# workflow is being used when creating the action. This step is
# used by both the Launch Instance and Resize Instance workflows.
context['workflow_slug'] = self.workflow.slug
return context
class LaunchInstance(workflows.Workflow):
slug = "launch_instance"
name = _("Launch Instance")
finalize_button_name = _("Launch")
success_message = _('Launched %(count)s named "%(name)s".')
failure_message = _('Unable to launch %(count)s named "%(name)s".')
success_url = "horizon:project:instances:index"
multipart = True
default_steps = (SelectProjectUser,
SetInstanceDetails,
SetAccessControls,
SetNetwork,
PostCreationStep,
SetAdvanced)
def format_status_message(self, message):
name = self.context.get('name', 'unknown instance')
count = self.context.get('count', 1)
if int(count) > 1:
return message % {"count": _("%s instances") % count,
"name": name}
else:
return message % {"count": _("instance"), "name": name}
@sensitive_variables('context')
def handle(self, request, context):
custom_script = context.get('script_data', '')
dev_mapping_1 = None
dev_mapping_2 = None
image_id = ''
# Determine volume mapping options
source_type = context.get('source_type', None)
if source_type in ['image_id', 'instance_snapshot_id']:
image_id = context['source_id']
elif source_type in ['volume_id', 'volume_snapshot_id']:
dev_mapping_1 = {context['device_name']:
'%s::%s' %
(context['source_id'],
int(bool(context['delete_on_terminate'])))}
elif source_type == 'volume_image_id':
device_name = context.get('device_name', '').strip() or None
dev_mapping_2 = [
{'device_name': device_name, # None auto-selects device
'source_type': 'image',
'destination_type': 'volume',
'delete_on_termination':
int(bool(context['delete_on_terminate'])),
'uuid': context['source_id'],
'boot_index': '0',
'volume_size': context['volume_size']
}
]
netids = context.get('network_id', None)
if netids:
nics = [{"net-id": netid, "v4-fixed-ip": ""}
for netid in netids]
else:
nics = None
avail_zone = context.get('availability_zone', None)
# Create port with Network Name and Port Profile
# for the use with the plugin supporting port profiles.
# neutron port-create <Network name> --n1kv:profile <Port Profile ID>
# for net_id in context['network_id']:
# HACK for now use first network.
if api.neutron.is_port_profiles_supported():
net_id = context['network_id'][0]
LOG.debug("Horizon->Create Port with %(netid)s %(profile_id)s",
{'netid': net_id, 'profile_id': context['profile_id']})
port = None
try:
port = api.neutron.port_create(
request, net_id, policy_profile_id=context['profile_id'])
except Exception:
msg = (_('Port not created for profile-id (%s).') %
context['profile_id'])
exceptions.handle(request, msg)
if port and port.id:
nics = [{"port-id": port.id}]
try:
api.nova.server_create(request,
context['name'],
image_id,
context['flavor'],
context['keypair_id'],
normalize_newlines(custom_script),
context['security_group_ids'],
block_device_mapping=dev_mapping_1,
block_device_mapping_v2=dev_mapping_2,
nics=nics,
availability_zone=avail_zone,
instance_count=int(context['count']),
admin_pass=context['admin_pass'],
disk_config=context.get('disk_config'),
config_drive=context.get('config_drive'))
return True
except Exception:
exceptions.handle(request)
return False
|
|
"""
Core components of Home Assistant.
Home Assistant is a Home Automation framework for observing the state
of entities and react to changes.
"""
# pylint: disable=unused-import, too-many-lines
import asyncio
from concurrent.futures import ThreadPoolExecutor
import enum
import logging
import os
import re
import signal
import sys
import threading
from types import MappingProxyType
from typing import Optional, Any, Callable, List # NOQA
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.const import (
ATTR_DOMAIN, ATTR_FRIENDLY_NAME, ATTR_NOW, ATTR_SERVICE,
ATTR_SERVICE_CALL_ID, ATTR_SERVICE_DATA, EVENT_CALL_SERVICE,
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP,
EVENT_SERVICE_EXECUTED, EVENT_SERVICE_REGISTERED, EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED, MATCH_ALL, RESTART_EXIT_CODE,
SERVICE_HOMEASSISTANT_RESTART, SERVICE_HOMEASSISTANT_STOP, __version__)
from homeassistant.exceptions import (
HomeAssistantError, InvalidEntityFormatError, ShuttingDown)
from homeassistant.util.async import (
run_coroutine_threadsafe, run_callback_threadsafe)
import homeassistant.util as util
import homeassistant.util.dt as dt_util
import homeassistant.util.location as location
from homeassistant.util.unit_system import UnitSystem, METRIC_SYSTEM # NOQA
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
DOMAIN = 'homeassistant'
# How often time_changed event should fire
TIMER_INTERVAL = 1 # seconds
# How long we wait for the result of a service call
SERVICE_CALL_LIMIT = 10 # seconds
# Pattern for validating entity IDs (format: <domain>.<entity>)
ENTITY_ID_PATTERN = re.compile(r"^(\w+)\.(\w+)$")
# Size of a executor pool
EXECUTOR_POOL_SIZE = 10
# AsyncHandler for logging
DATA_ASYNCHANDLER = 'log_asynchandler'
_LOGGER = logging.getLogger(__name__)
def split_entity_id(entity_id: str) -> List[str]:
"""Split a state entity_id into domain, object_id."""
return entity_id.split(".", 1)
def valid_entity_id(entity_id: str) -> bool:
"""Test if an entity ID is a valid format."""
return ENTITY_ID_PATTERN.match(entity_id) is not None
def callback(func: Callable[..., None]) -> Callable[..., None]:
"""Annotation to mark method as safe to call from within the event loop."""
# pylint: disable=protected-access
func._hass_callback = True
return func
def is_callback(func: Callable[..., Any]) -> bool:
"""Check if function is safe to be called in the event loop."""
return '_hass_callback' in func.__dict__
class CoreState(enum.Enum):
"""Represent the current state of Home Assistant."""
not_running = 'NOT_RUNNING'
starting = 'STARTING'
running = 'RUNNING'
stopping = 'STOPPING'
def __str__(self) -> str:
"""Return the event."""
return self.value
class HomeAssistant(object):
"""Root object of the Home Assistant home automation."""
def __init__(self, loop=None):
"""Initialize new Home Assistant object."""
if sys.platform == 'win32':
self.loop = loop or asyncio.ProactorEventLoop()
else:
self.loop = loop or asyncio.get_event_loop()
self.executor = ThreadPoolExecutor(max_workers=EXECUTOR_POOL_SIZE)
self.loop.set_default_executor(self.executor)
self.loop.set_exception_handler(self._async_exception_handler)
self._pending_tasks = []
self.bus = EventBus(self)
self.services = ServiceRegistry(self)
self.states = StateMachine(self.bus, self.loop)
self.config = Config() # type: Config
# This is a dictionary that any component can store any data on.
self.data = {}
self.state = CoreState.not_running
self.exit_code = None
@property
def is_running(self) -> bool:
"""Return if Home Assistant is running."""
return self.state in (CoreState.starting, CoreState.running)
def start(self) -> None:
"""Start home assistant."""
# Register the async start
self.loop.create_task(self.async_start())
# Run forever and catch keyboard interrupt
try:
# Block until stopped
_LOGGER.info("Starting Home Assistant core loop")
self.loop.run_forever()
except KeyboardInterrupt:
self.loop.call_soon(self._async_stop_handler)
self.loop.run_forever()
finally:
self.loop.close()
@asyncio.coroutine
def async_start(self):
"""Finalize startup from inside the event loop.
This method is a coroutine.
"""
_LOGGER.info("Starting Home Assistant")
self.state = CoreState.starting
# Register the restart/stop event
self.services.async_register(
DOMAIN, SERVICE_HOMEASSISTANT_STOP, self._async_stop_handler)
self.services.async_register(
DOMAIN, SERVICE_HOMEASSISTANT_RESTART, self._async_restart_handler)
# Setup signal handling
if sys.platform != 'win32':
try:
self.loop.add_signal_handler(
signal.SIGTERM, self._async_stop_handler)
except ValueError:
_LOGGER.warning("Could not bind to SIGTERM")
try:
self.loop.add_signal_handler(
signal.SIGHUP, self._async_restart_handler)
except ValueError:
_LOGGER.warning("Could not bind to SIGHUP")
# pylint: disable=protected-access
self.loop._thread_ident = threading.get_ident()
_async_create_timer(self)
self.bus.async_fire(EVENT_HOMEASSISTANT_START)
self.state = CoreState.running
def add_job(self, target: Callable[..., None], *args: Any) -> None:
"""Add job to the executor pool.
target: target to call.
args: parameters for method to call.
"""
if target is None:
raise ValueError("Don't call add_job with None")
self.loop.call_soon_threadsafe(self.async_add_job, target, *args)
@callback
def _async_add_job(self, target: Callable[..., None], *args: Any) -> None:
"""Add a job from within the eventloop.
This method must be run in the event loop.
target: target to call.
args: parameters for method to call.
"""
if asyncio.iscoroutine(target):
self.loop.create_task(target)
elif is_callback(target):
self.loop.call_soon(target, *args)
elif asyncio.iscoroutinefunction(target):
self.loop.create_task(target(*args))
else:
self.loop.run_in_executor(None, target, *args)
async_add_job = _async_add_job
@callback
def _async_add_job_tracking(self, target: Callable[..., None],
*args: Any) -> None:
"""Add a job from within the eventloop.
This method must be run in the event loop.
target: target to call.
args: parameters for method to call.
"""
task = None
if asyncio.iscoroutine(target):
task = self.loop.create_task(target)
elif is_callback(target):
self.loop.call_soon(target, *args)
elif asyncio.iscoroutinefunction(target):
task = self.loop.create_task(target(*args))
else:
task = self.loop.run_in_executor(None, target, *args)
# if a task is sheduled
if task is not None:
self._pending_tasks.append(task)
@callback
def async_track_tasks(self):
"""Track tasks so you can wait for all tasks to be done."""
self.async_add_job = self._async_add_job_tracking
@asyncio.coroutine
def async_stop_track_tasks(self):
"""Track tasks so you can wait for all tasks to be done."""
yield from self.async_block_till_done()
self.async_add_job = self._async_add_job
@callback
def async_run_job(self, target: Callable[..., None], *args: Any) -> None:
"""Run a job from within the event loop.
This method must be run in the event loop.
target: target to call.
args: parameters for method to call.
"""
if is_callback(target):
target(*args)
else:
self.async_add_job(target, *args)
def block_till_done(self) -> None:
"""Block till all pending work is done."""
run_coroutine_threadsafe(
self.async_block_till_done(), loop=self.loop).result()
@asyncio.coroutine
def async_block_till_done(self):
"""Block till all pending work is done."""
# To flush out any call_soon_threadsafe
yield from asyncio.sleep(0, loop=self.loop)
while self._pending_tasks:
pending = [task for task in self._pending_tasks
if not task.done()]
self._pending_tasks.clear()
if len(pending) > 0:
yield from asyncio.wait(pending, loop=self.loop)
else:
yield from asyncio.sleep(0, loop=self.loop)
def stop(self) -> None:
"""Stop Home Assistant and shuts down all threads."""
run_coroutine_threadsafe(self.async_stop(), self.loop)
@asyncio.coroutine
def async_stop(self) -> None:
"""Stop Home Assistant and shuts down all threads.
This method is a coroutine.
"""
import homeassistant.helpers.aiohttp_client as aiohttp_client
self.state = CoreState.stopping
self.async_track_tasks()
self.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
yield from self.async_block_till_done()
self.executor.shutdown()
self.state = CoreState.not_running
# cleanup connector pool from aiohttp
yield from aiohttp_client.async_cleanup_websession(self)
# cleanup async layer from python logging
if self.data.get(DATA_ASYNCHANDLER):
handler = self.data.pop(DATA_ASYNCHANDLER)
logging.getLogger('').removeHandler(handler)
yield from handler.async_close(blocking=True)
self.loop.stop()
# pylint: disable=no-self-use
@callback
def _async_exception_handler(self, loop, context):
"""Handle all exception inside the core loop."""
kwargs = {}
exception = context.get('exception')
if exception:
# Do not report on shutting down exceptions.
if isinstance(exception, ShuttingDown):
return
kwargs['exc_info'] = (type(exception), exception,
exception.__traceback__)
_LOGGER.error("Error doing job: %s", context['message'], **kwargs)
@callback
def _async_stop_handler(self, *args):
"""Stop Home Assistant."""
self.exit_code = 0
self.loop.create_task(self.async_stop())
@callback
def _async_restart_handler(self, *args):
"""Restart Home Assistant."""
self.exit_code = RESTART_EXIT_CODE
self.loop.create_task(self.async_stop())
class EventOrigin(enum.Enum):
"""Represent the origin of an event."""
local = 'LOCAL'
remote = 'REMOTE'
def __str__(self):
"""Return the event."""
return self.value
class Event(object):
"""Represents an event within the Bus."""
__slots__ = ['event_type', 'data', 'origin', 'time_fired']
def __init__(self, event_type, data=None, origin=EventOrigin.local,
time_fired=None):
"""Initialize a new event."""
self.event_type = event_type
self.data = data or {}
self.origin = origin
self.time_fired = time_fired or dt_util.utcnow()
def as_dict(self):
"""Create a dict representation of this Event.
Async friendly.
"""
return {
'event_type': self.event_type,
'data': dict(self.data),
'origin': str(self.origin),
'time_fired': self.time_fired,
}
def __repr__(self):
"""Return the representation."""
# pylint: disable=maybe-no-member
if self.data:
return "<Event {}[{}]: {}>".format(
self.event_type, str(self.origin)[0],
util.repr_helper(self.data))
else:
return "<Event {}[{}]>".format(self.event_type,
str(self.origin)[0])
def __eq__(self, other):
"""Return the comparison."""
return (self.__class__ == other.__class__ and
self.event_type == other.event_type and
self.data == other.data and
self.origin == other.origin and
self.time_fired == other.time_fired)
class EventBus(object):
"""Allows firing of and listening for events."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize a new event bus."""
self._listeners = {}
self._hass = hass
@callback
def async_listeners(self):
"""Dict with events and the number of listeners.
This method must be run in the event loop.
"""
return {key: len(self._listeners[key])
for key in self._listeners}
@property
def listeners(self):
"""Dict with events and the number of listeners."""
return run_callback_threadsafe(
self._hass.loop, self.async_listeners
).result()
def fire(self, event_type: str, event_data=None, origin=EventOrigin.local):
"""Fire an event."""
self._hass.loop.call_soon_threadsafe(
self.async_fire, event_type, event_data, origin)
@callback
def async_fire(self, event_type: str, event_data=None,
origin=EventOrigin.local, wait=False):
"""Fire an event.
This method must be run in the event loop.
"""
if event_type != EVENT_HOMEASSISTANT_STOP and \
self._hass.state == CoreState.stopping:
raise ShuttingDown("Home Assistant is shutting down")
# Copy the list of the current listeners because some listeners
# remove themselves as a listener while being executed which
# causes the iterator to be confused.
get = self._listeners.get
listeners = get(MATCH_ALL, []) + get(event_type, [])
event = Event(event_type, event_data, origin)
if event_type != EVENT_TIME_CHANGED:
_LOGGER.info("Bus:Handling %s", event)
if not listeners:
return
for func in listeners:
self._hass.async_add_job(func, event)
def listen(self, event_type, listener):
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
"""
async_remove_listener = run_callback_threadsafe(
self._hass.loop, self.async_listen, event_type, listener).result()
def remove_listener():
"""Remove the listener."""
run_callback_threadsafe(
self._hass.loop, async_remove_listener).result()
return remove_listener
@callback
def async_listen(self, event_type, listener):
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
This method must be run in the event loop.
"""
if event_type in self._listeners:
self._listeners[event_type].append(listener)
else:
self._listeners[event_type] = [listener]
def remove_listener():
"""Remove the listener."""
self._async_remove_listener(event_type, listener)
return remove_listener
def listen_once(self, event_type, listener):
"""Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns function to unsubscribe the listener.
"""
async_remove_listener = run_callback_threadsafe(
self._hass.loop, self.async_listen_once, event_type, listener,
).result()
def remove_listener():
"""Remove the listener."""
run_callback_threadsafe(
self._hass.loop, async_remove_listener).result()
return remove_listener
@callback
def async_listen_once(self, event_type, listener):
"""Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns registered listener that can be used with remove_listener.
This method must be run in the event loop.
"""
@callback
def onetime_listener(event):
"""Remove listener from eventbus and then fire listener."""
if hasattr(onetime_listener, 'run'):
return
# Set variable so that we will never run twice.
# Because the event bus loop might have async_fire queued multiple
# times, its possible this listener may already be lined up
# multiple times as well.
# This will make sure the second time it does nothing.
setattr(onetime_listener, 'run', True)
self._async_remove_listener(event_type, onetime_listener)
self._hass.async_run_job(listener, event)
return self.async_listen(event_type, onetime_listener)
@callback
def _async_remove_listener(self, event_type, listener):
"""Remove a listener of a specific event_type.
This method must be run in the event loop.
"""
try:
self._listeners[event_type].remove(listener)
# delete event_type list if empty
if not self._listeners[event_type]:
self._listeners.pop(event_type)
except (KeyError, ValueError):
# KeyError is key event_type listener did not exist
# ValueError if listener did not exist within event_type
_LOGGER.warning("Unable to remove unknown listener %s", listener)
class State(object):
"""Object to represent a state within the state machine.
entity_id: the entity that is represented.
state: the state of the entity
attributes: extra information on entity and state
last_changed: last time the state was changed, not the attributes.
last_updated: last time this object was updated.
"""
__slots__ = ['entity_id', 'state', 'attributes',
'last_changed', 'last_updated']
def __init__(self, entity_id, state, attributes=None, last_changed=None,
last_updated=None):
"""Initialize a new state."""
if not valid_entity_id(entity_id):
raise InvalidEntityFormatError((
"Invalid entity id encountered: {}. "
"Format should be <domain>.<object_id>").format(entity_id))
self.entity_id = entity_id.lower()
self.state = str(state)
self.attributes = MappingProxyType(attributes or {})
self.last_updated = last_updated or dt_util.utcnow()
self.last_changed = last_changed or self.last_updated
@property
def domain(self):
"""Domain of this state."""
return split_entity_id(self.entity_id)[0]
@property
def object_id(self):
"""Object id of this state."""
return split_entity_id(self.entity_id)[1]
@property
def name(self):
"""Name of this state."""
return (
self.attributes.get(ATTR_FRIENDLY_NAME) or
self.object_id.replace('_', ' '))
def as_dict(self):
"""Return a dict representation of the State.
Async friendly.
To be used for JSON serialization.
Ensures: state == State.from_dict(state.as_dict())
"""
return {'entity_id': self.entity_id,
'state': self.state,
'attributes': dict(self.attributes),
'last_changed': self.last_changed,
'last_updated': self.last_updated}
@classmethod
def from_dict(cls, json_dict):
"""Initialize a state from a dict.
Async friendly.
Ensures: state == State.from_json_dict(state.to_json_dict())
"""
if not (json_dict and 'entity_id' in json_dict and
'state' in json_dict):
return None
last_changed = json_dict.get('last_changed')
if isinstance(last_changed, str):
last_changed = dt_util.parse_datetime(last_changed)
last_updated = json_dict.get('last_updated')
if isinstance(last_updated, str):
last_updated = dt_util.parse_datetime(last_updated)
return cls(json_dict['entity_id'], json_dict['state'],
json_dict.get('attributes'), last_changed, last_updated)
def __eq__(self, other):
"""Return the comparison of the state."""
return (self.__class__ == other.__class__ and
self.entity_id == other.entity_id and
self.state == other.state and
self.attributes == other.attributes)
def __repr__(self):
"""Return the representation of the states."""
attr = "; {}".format(util.repr_helper(self.attributes)) \
if self.attributes else ""
return "<state {}={}{} @ {}>".format(
self.entity_id, self.state, attr,
dt_util.as_local(self.last_changed).isoformat())
class StateMachine(object):
"""Helper class that tracks the state of different entities."""
def __init__(self, bus, loop):
"""Initialize state machine."""
self._states = {}
self._bus = bus
self._loop = loop
def entity_ids(self, domain_filter=None):
"""List of entity ids that are being tracked."""
future = run_callback_threadsafe(
self._loop, self.async_entity_ids, domain_filter
)
return future.result()
@callback
def async_entity_ids(self, domain_filter=None):
"""List of entity ids that are being tracked.
This method must be run in the event loop.
"""
if domain_filter is None:
return list(self._states.keys())
domain_filter = domain_filter.lower()
return [state.entity_id for state in self._states.values()
if state.domain == domain_filter]
def all(self):
"""Create a list of all states."""
return run_callback_threadsafe(self._loop, self.async_all).result()
@callback
def async_all(self):
"""Create a list of all states.
This method must be run in the event loop.
"""
return list(self._states.values())
def get(self, entity_id):
"""Retrieve state of entity_id or None if not found.
Async friendly.
"""
return self._states.get(entity_id.lower())
def is_state(self, entity_id, state):
"""Test if entity exists and is specified state.
Async friendly.
"""
state_obj = self.get(entity_id)
return state_obj and state_obj.state == state
def is_state_attr(self, entity_id, name, value):
"""Test if entity exists and has a state attribute set to value.
Async friendly.
"""
state_obj = self.get(entity_id)
return state_obj and state_obj.attributes.get(name, None) == value
def remove(self, entity_id):
"""Remove the state of an entity.
Returns boolean to indicate if an entity was removed.
"""
return run_callback_threadsafe(
self._loop, self.async_remove, entity_id).result()
@callback
def async_remove(self, entity_id):
"""Remove the state of an entity.
Returns boolean to indicate if an entity was removed.
This method must be run in the event loop.
"""
entity_id = entity_id.lower()
old_state = self._states.pop(entity_id, None)
if old_state is None:
return False
event_data = {
'entity_id': entity_id,
'old_state': old_state,
'new_state': None,
}
self._bus.async_fire(EVENT_STATE_CHANGED, event_data)
return True
def set(self, entity_id, new_state, attributes=None, force_update=False):
"""Set the state of an entity, add entity if it does not exist.
Attributes is an optional dict to specify attributes of this state.
If you just update the attributes and not the state, last changed will
not be affected.
"""
run_callback_threadsafe(
self._loop,
self.async_set, entity_id, new_state, attributes, force_update,
).result()
@callback
def async_set(self, entity_id, new_state, attributes=None,
force_update=False):
"""Set the state of an entity, add entity if it does not exist.
Attributes is an optional dict to specify attributes of this state.
If you just update the attributes and not the state, last changed will
not be affected.
This method must be run in the event loop.
"""
entity_id = entity_id.lower()
new_state = str(new_state)
attributes = attributes or {}
old_state = self._states.get(entity_id)
is_existing = old_state is not None
same_state = (is_existing and old_state.state == new_state and
not force_update)
same_attr = is_existing and old_state.attributes == attributes
if same_state and same_attr:
return
# If state did not exist or is different, set it
last_changed = old_state.last_changed if same_state else None
state = State(entity_id, new_state, attributes, last_changed)
self._states[entity_id] = state
event_data = {
'entity_id': entity_id,
'old_state': old_state,
'new_state': state,
}
self._bus.async_fire(EVENT_STATE_CHANGED, event_data)
class Service(object):
"""Represents a callable service."""
__slots__ = ['func', 'description', 'fields', 'schema',
'is_callback', 'is_coroutinefunction']
def __init__(self, func, description, fields, schema):
"""Initialize a service."""
self.func = func
self.description = description or ''
self.fields = fields or {}
self.schema = schema
self.is_callback = is_callback(func)
self.is_coroutinefunction = asyncio.iscoroutinefunction(func)
def as_dict(self):
"""Return dictionary representation of this service."""
return {
'description': self.description,
'fields': self.fields,
}
class ServiceCall(object):
"""Represents a call to a service."""
__slots__ = ['domain', 'service', 'data', 'call_id']
def __init__(self, domain, service, data=None, call_id=None):
"""Initialize a service call."""
self.domain = domain.lower()
self.service = service.lower()
self.data = MappingProxyType(data or {})
self.call_id = call_id
def __repr__(self):
"""Return the represenation of the service."""
if self.data:
return "<ServiceCall {}.{}: {}>".format(
self.domain, self.service, util.repr_helper(self.data))
else:
return "<ServiceCall {}.{}>".format(self.domain, self.service)
class ServiceRegistry(object):
"""Offers services over the eventbus."""
def __init__(self, hass):
"""Initialize a service registry."""
self._services = {}
self._hass = hass
self._cur_id = 0
self._async_unsub_call_event = None
@property
def services(self):
"""Dict with per domain a list of available services."""
return run_callback_threadsafe(
self._hass.loop, self.async_services,
).result()
@callback
def async_services(self):
"""Dict with per domain a list of available services.
This method must be run in the event loop.
"""
return {domain: {key: value.as_dict() for key, value
in self._services[domain].items()}
for domain in self._services}
def has_service(self, domain, service):
"""Test if specified service exists.
Async friendly.
"""
return service.lower() in self._services.get(domain.lower(), [])
def register(self, domain, service, service_func, description=None,
schema=None):
"""
Register a service.
Description is a dict containing key 'description' to describe
the service and a key 'fields' to describe the fields.
Schema is called to coerce and validate the service data.
"""
run_callback_threadsafe(
self._hass.loop,
self.async_register, domain, service, service_func, description,
schema
).result()
@callback
def async_register(self, domain, service, service_func, description=None,
schema=None):
"""
Register a service.
Description is a dict containing key 'description' to describe
the service and a key 'fields' to describe the fields.
Schema is called to coerce and validate the service data.
This method must be run in the event loop.
"""
domain = domain.lower()
service = service.lower()
description = description or {}
service_obj = Service(service_func, description.get('description'),
description.get('fields', {}), schema)
if domain in self._services:
self._services[domain][service] = service_obj
else:
self._services[domain] = {service: service_obj}
if self._async_unsub_call_event is None:
self._async_unsub_call_event = self._hass.bus.async_listen(
EVENT_CALL_SERVICE, self._event_to_service_call)
self._hass.bus.async_fire(
EVENT_SERVICE_REGISTERED,
{ATTR_DOMAIN: domain, ATTR_SERVICE: service}
)
def call(self, domain, service, service_data=None, blocking=False):
"""
Call a service.
Specify blocking=True to wait till service is executed.
Waits a maximum of SERVICE_CALL_LIMIT.
If blocking = True, will return boolean if service executed
succesfully within SERVICE_CALL_LIMIT.
This method will fire an event to call the service.
This event will be picked up by this ServiceRegistry and any
other ServiceRegistry that is listening on the EventBus.
Because the service is sent as an event you are not allowed to use
the keys ATTR_DOMAIN and ATTR_SERVICE in your service_data.
"""
return run_coroutine_threadsafe(
self.async_call(domain, service, service_data, blocking),
self._hass.loop
).result()
@asyncio.coroutine
def async_call(self, domain, service, service_data=None, blocking=False):
"""
Call a service.
Specify blocking=True to wait till service is executed.
Waits a maximum of SERVICE_CALL_LIMIT.
If blocking = True, will return boolean if service executed
succesfully within SERVICE_CALL_LIMIT.
This method will fire an event to call the service.
This event will be picked up by this ServiceRegistry and any
other ServiceRegistry that is listening on the EventBus.
Because the service is sent as an event you are not allowed to use
the keys ATTR_DOMAIN and ATTR_SERVICE in your service_data.
This method is a coroutine.
"""
call_id = self._generate_unique_id()
event_data = {
ATTR_DOMAIN: domain.lower(),
ATTR_SERVICE: service.lower(),
ATTR_SERVICE_DATA: service_data,
ATTR_SERVICE_CALL_ID: call_id,
}
if blocking:
fut = asyncio.Future(loop=self._hass.loop)
@callback
def service_executed(event):
"""Callback method that is called when service is executed."""
if event.data[ATTR_SERVICE_CALL_ID] == call_id:
fut.set_result(True)
unsub = self._hass.bus.async_listen(
EVENT_SERVICE_EXECUTED, service_executed)
self._hass.bus.async_fire(EVENT_CALL_SERVICE, event_data)
if blocking:
done, _ = yield from asyncio.wait(
[fut], loop=self._hass.loop, timeout=SERVICE_CALL_LIMIT)
success = bool(done)
unsub()
return success
@asyncio.coroutine
def _event_to_service_call(self, event):
"""Callback for SERVICE_CALLED events from the event bus."""
service_data = event.data.get(ATTR_SERVICE_DATA) or {}
domain = event.data.get(ATTR_DOMAIN).lower()
service = event.data.get(ATTR_SERVICE).lower()
call_id = event.data.get(ATTR_SERVICE_CALL_ID)
if not self.has_service(domain, service):
if event.origin == EventOrigin.local:
_LOGGER.warning("Unable to find service %s/%s",
domain, service)
return
service_handler = self._services[domain][service]
def fire_service_executed():
"""Fire service executed event."""
if not call_id:
return
data = {ATTR_SERVICE_CALL_ID: call_id}
if (service_handler.is_coroutinefunction or
service_handler.is_callback):
self._hass.bus.async_fire(EVENT_SERVICE_EXECUTED, data)
else:
self._hass.bus.fire(EVENT_SERVICE_EXECUTED, data)
try:
if service_handler.schema:
service_data = service_handler.schema(service_data)
except vol.Invalid as ex:
_LOGGER.error("Invalid service data for %s.%s: %s",
domain, service, humanize_error(service_data, ex))
fire_service_executed()
return
service_call = ServiceCall(domain, service, service_data, call_id)
if service_handler.is_callback:
service_handler.func(service_call)
fire_service_executed()
elif service_handler.is_coroutinefunction:
yield from service_handler.func(service_call)
fire_service_executed()
else:
def execute_service():
"""Execute a service and fires a SERVICE_EXECUTED event."""
service_handler.func(service_call)
fire_service_executed()
self._hass.async_add_job(execute_service)
def _generate_unique_id(self):
"""Generate a unique service call id."""
self._cur_id += 1
return '{}-{}'.format(id(self), self._cur_id)
class Config(object):
"""Configuration settings for Home Assistant."""
def __init__(self):
"""Initialize a new config object."""
self.latitude = None # type: Optional[float]
self.longitude = None # type: Optional[float]
self.elevation = None # type: Optional[int]
self.location_name = None # type: Optional[str]
self.time_zone = None # type: Optional[str]
self.units = METRIC_SYSTEM # type: UnitSystem
# If True, pip install is skipped for requirements on startup
self.skip_pip = False # type: bool
# List of loaded components
self.components = []
# Remote.API object pointing at local API
self.api = None
# Directory that holds the configuration
self.config_dir = None
def distance(self: object, lat: float, lon: float) -> float:
"""Calculate distance from Home Assistant.
Async friendly.
"""
return self.units.length(
location.distance(self.latitude, self.longitude, lat, lon), 'm')
def path(self, *path):
"""Generate path to the file within the config dir.
Async friendly.
"""
if self.config_dir is None:
raise HomeAssistantError("config_dir is not set")
return os.path.join(self.config_dir, *path)
def as_dict(self):
"""Create a dict representation of this dict.
Async friendly.
"""
time_zone = self.time_zone or dt_util.UTC
return {
'latitude': self.latitude,
'longitude': self.longitude,
'elevation': self.elevation,
'unit_system': self.units.as_dict(),
'location_name': self.location_name,
'time_zone': time_zone.zone,
'components': self.components,
'config_dir': self.config_dir,
'version': __version__
}
def _async_create_timer(hass, interval=TIMER_INTERVAL):
"""Create a timer that will start on HOMEASSISTANT_START."""
stop_event = asyncio.Event(loop=hass.loop)
# Setting the Event inside the loop by marking it as a coroutine
@callback
def stop_timer(event):
"""Stop the timer."""
stop_event.set()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_timer)
@asyncio.coroutine
def timer(interval, stop_event):
"""Create an async timer."""
_LOGGER.info("Timer:starting")
last_fired_on_second = -1
calc_now = dt_util.utcnow
while not stop_event.is_set():
now = calc_now()
# First check checks if we are not on a second matching the
# timer interval. Second check checks if we did not already fire
# this interval.
if now.second % interval or \
now.second == last_fired_on_second:
# Sleep till it is the next time that we have to fire an event.
# Aim for halfway through the second that fits TIMER_INTERVAL.
# If TIMER_INTERVAL is 10 fire at .5, 10.5, 20.5, etc seconds.
# This will yield the best results because time.sleep() is not
# 100% accurate because of non-realtime OS's
slp_seconds = interval - now.second % interval + \
.5 - now.microsecond/1000000.0
yield from asyncio.sleep(slp_seconds, loop=hass.loop)
now = calc_now()
last_fired_on_second = now.second
# Event might have been set while sleeping
if not stop_event.is_set():
try:
# Schedule the bus event
hass.loop.call_soon(
hass.bus.async_fire,
EVENT_TIME_CHANGED,
{ATTR_NOW: now}
)
except ShuttingDown:
# HA raises error if firing event after it has shut down
break
@asyncio.coroutine
def start_timer(event):
"""Start our async timer."""
hass.loop.create_task(timer(interval, stop_event))
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_timer)
|
|
#!/usr/bin/python
import errno
import getopt
import math
import os
import progress
import re
import select
import signal
import socket
import subprocess
import sys
import time
from xml.dom.minidom import parse,parseString
from xml.dom import DOMException
from pprint import pprint
from configtool import getConfigVal, setConfigVal
try:
import numpy
except:
sys.stderr.write("failed to import numpy\n")
#return number of cpus online
def cpuCount():
try:
return os.sysconf("SC_NPROCESSORS_ONLN")
except:
pass
try:
return os.sysconf("_SC_NPROCESSORS_ONLN")
except:
pass
try:
return int(os.environ["NUMBER_OF_PROCESSORS"])
except:
pass
try:
return int(os.environ["NUM_PROCESSORS"])
except:
sys.stderr.write("failed to get the number of processors\n")
return 1 # guess 1
def getmemorysize():
try:
return int(re.match("MemTotal: *([0-9]+) *kB", open("/proc/meminfo").read()).group(1))*1024
except:
sys.stderr.write("failed to get total memory\n")
return 8 * (1024**3) # guess 8gb
def setmemlimit(n = getmemorysize()):
try:
import resource
resource.setrlimit(resource.RLIMIT_AS, (n,n))
except:
sys.stderr.write("failed to set memory limit\n")
def parallelRunJobs(jobs):
class JobInfo:
def __init__(self, id, fn):
self.id=id
self.fn=fn
self.pid=None
self.fd=None
self.msg=""
self.rv=None
def __cmp__(this, that):
return this.id-that.id
def fileno(self):
return self.fd.fileno()
def forkrun(self):
self.fd, w = socket.socketpair()
self.pid = os.fork()
if self.pid == 0:
#child
progress.disable()
self.fd.close()
class Redir():
def __init__(self, fd):
self.fd=fd
def write(self, s):
self.fd.sendall(s)
sys.stdout = Redir(w)
try:
rv = self.fn()
except Exception, e:
#import traceback
#traceback.print_exc()
print "Exception:",e
rv = False
print exitval
if rv:
sys.exit(0)
else:
sys.exit(1)
else:
#parent
w.close()
self.fd.setblocking(0)
return self
def handleevent(self):
if self.pid is None:
return None
try:
m=self.fd.recv(1024)
if m is not None:
self.msg+=m
if self.msg.rfind(exitval) >= 0:
raise Exception("done")
except:
pid, self.rv = os.waitpid(self.pid, 0)
assert self.pid == pid
self.pid = None
self.fd.close()
self.fd = None
def kill(self):
if self.pid is not None:
os.kill(self.pid, signal.SIGKILL)
def addmsg(self, msg):
self.msg+=msg
def getmsg(self):
return self.msg.replace(exitval,"") \
.replace('\n',' ') \
.strip()
startline = progress.currentline()
NCPU=cpuCount()
exitval="!EXIT!"
maxprinted=[0]
jobs_pending = map(lambda id: JobInfo(id, jobs[id]), xrange(len(jobs)))
jobs_running = [] # JobInfo list
jobs_done = [] # JobInfo list
def mkstatus():
s="running jobs: "
failed=len(filter(lambda x: x.rv!=0, jobs_done))
complete=(len(jobs_done)-failed)
if complete>0:
s += "%d complete, "%complete
if failed>0:
s += "%d failed, "%failed
s += "%d running, "%len(jobs_running)
s += "%d pending"%len(jobs_pending)
return s
def updatestatus(fast=False):
progress.remaining(2*len(jobs_pending)+len(jobs_running))
if not fast:
for j in jobs_done[maxprinted[0]:]:
if j.id==maxprinted[0]:
print j.getmsg()
maxprinted[0]+=1
else:
break
progress.push()
progress.status(mkstatus)
updatestatus()
try:
while len(jobs_pending)>0 or len(jobs_running)>0:
#spawn new jobs
while len(jobs_pending)>0 and len(jobs_running)<NCPU:
jobs_running.append(jobs_pending.pop(0).forkrun())
updatestatus()
#wait for an event
rj, wj, xj = select.select(jobs_running, [], jobs_running)
#handle pending data
for j in rj:
j.handleevent()
for j in wj:
j.handleevent()
for j in xj:
j.handleevent()
#move completed jobs to jobs_done list
newdone=filter(lambda x: x.pid is None, jobs_running)
jobs_running = filter(lambda x: x.pid is not None, jobs_running)
jobs_done.extend(newdone)
jobs_done.sort()
updatestatus(True)
except KeyboardInterrupt:
for j in jobs_running:
j.kill()
j.addmsg("INTERRUPTED")
jobs_done.extend(jobs_running)
jobs_done.sort()
updatestatus()
raise
updatestatus()
progress.pop()
return jobs_done
def getscriptpath():
try:
import configtool
m=re.search('''from ['"](.*)['"]''', str(configtool))
return os.path.dirname(m.group(1))
except:
return os.path.abspath(os.path.dirname(sys.argv[0]))
def chdirToPetabricksRoot():
old = os.getcwd()
new = getscriptpath()
isCurDirOk = lambda: os.path.isfile("src/compiler/pbc.cpp")
if not isCurDirOk():
os.chdir(new)
if not isCurDirOk():
os.chdir(os.pardir)
if not isCurDirOk():
os.chdir(old)
raise Exception("This script should be run from petabricks root directory")
def compilePetabricks():
cmd=["make","-sqC","src","all"]
if subprocess.call(cmd) != 0:
cmd=["make", "-j%d"%cpuCount()]
p=subprocess.Popen(cmd)
rv=p.wait()
if rv!=0:
raise Exception("pbc compile failed")
return rv
return 0
def expandBenchmarkName(name, ext):
base=re.sub("[.]pbcc$","", name)
if ext:
name=base+ext
if name[0] != '/':
return "./examples/%s" % (name)
else:
return name
benchmarkToBin = lambda name: expandBenchmarkName(name, "")
benchmarkToSrc = lambda name: expandBenchmarkName(name, ".pbcc")
benchmarkToInfo = lambda name: expandBenchmarkName(name, ".info")
benchmarkToCfg = lambda name: expandBenchmarkName(name, ".cfg")
class InvalidBenchmarkNameException(Exception):
def __init__(self, name):
self.name=name
def __str__(self):
return "InvalidBenchmarkNameException(%s)" % self.name
def searchBenchmarkName(n):
for root, dirs, files in os.walk("./examples"):
if n in files or n + ".pbcc" in files:
return normalizeBenchmarkName("%s/%s"%(root,n), False)
raise InvalidBenchmarkNameException(n)
def normalizeBenchmarkName(orig, search=True):
n=re.sub("^[./]*examples[/]", "", orig);
n=re.sub("[.]pbcc$","", n)
if os.path.isfile(orig+".pbcc"):
orig = os.path.abspath(orig+".pbcc")
elif os.path.isfile(orig):
orig = os.path.abspath(orig)
else:
orig = None
if os.path.isfile(benchmarkToSrc(n)) or not search:
return n
else:
try:
return searchBenchmarkName(n)
except InvalidBenchmarkNameException:
if orig is not None:
return orig
raise
def compileBenchmarks(benchmarks):
NULL=open("/dev/null","w")
pbc="./src/pbc"
libdepends=[pbc, "./src/libpbmain.a", "./src/libpbruntime.a", "./src/libpbcommon.a"]
assert os.path.isfile(pbc)
benchmarkMaxLen=0
jobs_per_pbc=max(1, 2*cpuCount() / len(benchmarks))
def compileBenchmark(name):
print name.ljust(benchmarkMaxLen)
src=benchmarkToSrc(name)
bin=benchmarkToBin(name)
if not os.path.isfile(src):
print "invalid benchmark"
return False
srcModTime=max(os.path.getmtime(src), reduce(max, map(os.path.getmtime, libdepends)))
if os.path.isfile(bin) and os.path.getmtime(bin) > srcModTime:
print "compile SKIPPED"
return True
else:
if os.path.isfile(bin):
os.unlink(bin)
p = subprocess.Popen([pbc, '--jobs='+str(jobs_per_pbc), src], stdout=NULL, stderr=NULL)
status = p.wait()
if status == 0:
print "compile PASSED"
return True
else:
print "compile FAILED (rc=%d)"%status
return False
newjob = lambda name, fn: lambda: compileBenchmark(name) and fn()
mergejob = lambda oldfn, fn: lambda: oldfn() and fn()
jobs=[]
# build jobs list
jobsdata = dict()
for b in benchmarks:
if type(b) is type(()):
name, fn, postfn = b
else:
name, fn, postfn = b, lambda: True, lambda: True
benchmarkMaxLen=max(benchmarkMaxLen, len(name))
if not jobsdata.has_key(name):
jobsdata[name] = [newjob(name,fn), postfn]
jobs.append(name)
else:
jobsdata[name][0] = mergejob(jobsdata[name][0], fn)
jobs = map(lambda n: mergejob(*jobsdata[n]), jobs)
return parallelRunJobs(jobs)
def loadAndCompileBenchmarks(file, searchterms=[], extrafn=lambda b: True, postfn=lambda b: True):
chdirToPetabricksRoot()
compilePetabricks()
benchmarks=open(file)
stripcomment = re.compile("([^#]*)([#].*)?")
benchmarks=map(lambda x: stripcomment.match(x).group(1).strip(), benchmarks)
benchmarks=filter(lambda x: len(x)>0, benchmarks)
ws = re.compile("[ \t]+")
benchmarks=map(lambda x: ws.split(x), benchmarks)
if len(searchterms)>0:
benchmarks=filter(lambda b: any(s in b[0] for s in searchterms), benchmarks)
for b in benchmarks:
b[0]=normalizeBenchmarkName(b[0])
return compileBenchmarks(map(lambda x: (x[0], lambda: extrafn(x), lambda: postfn(x[0])), benchmarks)), benchmarks
def killSubprocess(p):
if p.poll() is None:
try:
p.kill() #requires python 2.6
except:
os.kill(p.pid, signal.SIGTERM)
def tryAorB(A, B):
def tryAorBinst(x):
try:
return A(x)
except:
return B(x)
return tryAorBinst
#attempt to convert to an int or float
tryIntFloat = tryAorB(int, tryAorB(float, lambda x: x))
class TimingRunTimeout(Exception):
def __str__(self):
return repr(self.value)
class TimingRunFailed(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def goodwait(p):
'''
Python doesn't check if its system calls return EINTR, which is kind of
dumb, so we have to catch this here.
'''
rv=None
while True:
try:
rv=p.wait()
return rv
except OSError, e:
if e.errno != errno.EINTR:
raise
def xmlToDict(xml, tag, fn=tryIntFloat, idx=0):
try:
rslt = xml.getElementsByTagName(tag)[idx].attributes
attrs=dict()
for x in xrange(rslt.length):
attrs[str(rslt.item(x).name)]=fn(rslt.item(x).nodeValue)
return attrs
except Exception,e:
return None
NULL=open("/dev/null", "w")
def callAndWait(cmd):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=NULL)
goodwait(p)
if p.returncode == -15:
raise TimingRunTimeout()
if p.returncode != 0:
raise TimingRunFailed(p.returncode)
return p
#parse timing results with a given time limit
def executeRun(cmd, returnTags=['timing', 'accuracy', 'outputhash'], retries=3):
p = callAndWait(cmd)
try:
xml = parse(p.stdout)
except Exception, e:
print 'program crash',e
if retries>1:
return executeRun(cmd, returnTags, retries-1)
else:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=NULL)
goodwait(p)
print p.stdout.read()
sys.exit(99)
timing = xmlToDict(xml, "timing")
if timing['average'] > 2**31:
raise TimingRunTimeout()
if type(returnTags) is type(""):
return xmlToDict(xml, returnTags)
else:
return map(lambda t: xmlToDict(xml, t), returnTags)
def executeRaceRun(_cmd, configa, configb, retries=3):
cmd = _cmd + ['--config='+configa, '--race-with='+configb]
p = callAndWait(cmd)
try:
xml = parse(p.stdout)
except Exception, e:
print 'program crash',e
if retries>1:
return executeRaceRun(_cmd, configa, configb, retries-1)
else:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=NULL)
goodwait(p)
print p.stdout.read()
sys.exit(99)
aresult = xmlToDict(xml, "testresult", tryIntFloat, 0)
bresult = xmlToDict(xml, "testresult", tryIntFloat, 1)
assert aresult['label']==0
assert bresult['label']==1
return aresult, bresult
#parse timing results with a given time limit
def executeTimingRun(prog, n, args=[], limit=None, returnTags='timing'):
cmd = [ prog, "--n=%d"%n, "--time" ]
cmd.extend(args);
if limit:
cmd.append("--max-sec=%f" % float(limit))
return executeRun(cmd, returnTags)
def collectTimingSamples(prog, n=100, step=100, maxTime=10.0, x=[], y=[], args=[], scaler=lambda x: x):
start=time.time()
left=maxTime
try:
while left>0:
ni = int(math.ceil(scaler(n)))
y.append(executeTimingRun(prog, ni, args=args, limit=int(left+1))['average'])
x.append(ni)
n+=step
left=start+maxTime-time.time()
except TimingRunTimeout:
if len(x)<1:
raise
return x,y
def binarySearchInverse(fx, y, thresh=0.001, min=0.0, max=1000000000):
y0=fx(min)
yn=fx(max)
assert y0<=yn
if y0 > y-thresh:
return min
if yn < y+thresh:
return max
guess=(min+max)/2.0
yguess=fx(guess)
#binary search
if abs(yguess-y) < thresh:
return guess
if yguess>y:
return binarySearchInverse(fx, y, thresh, min, guess)
else:
return binarySearchInverse(fx, y, thresh, guess, max)
#fit y = c1 * x**c2
def expFitRaw(x,y):
# shift to log scale
x=map(lambda z: math.log(z,2), x)
y=map(lambda z: math.log(z,2), y)
# and polyfit
c2,c1 = numpy.polyfit(x, y, 1)
c1=2**c1
return c1,c2
#fit y = c1 * x**c2
def expFit(x,y):
c1,c2 = expFitRaw(x,y)
return lambda x: c1*x**c2,\
lambda y: 2**(math.log(y/c1, 2)/c2), \
"%.10f * x^%.4f"%(c1,c2)
#fit y = p[0]*x**n + ... + p[n-2]*x + p[n-1]
#order is picked automatically based on expFit
def polyFit(x,y):
c1, order = expFitRaw(x,y)
p = numpy.polyfit(x, y, int(math.ceil(order)))
fx=lambda x: numpy.polyval(p,x)
invfx=lambda y: binarySearchInverse(fx, y)
return fx, invfx, repr(p)
def collectTimingSamples2(prog, maxTime=12.0, args=[]):
#make initial guess at order
x,y=collectTimingSamples(prog, 4, 1, maxTime, args=args, scaler=lambda x: 2**x)
return x,y
def testEstimation(x, y, fit, prog):
pf, pinv, pStr = fit(x,y)
print " ",pStr
print " est 10k", pf(10000) #, "actual=", executeTimingRun(prog,10000)['average']
print " est 1 sec", (pinv(1))
print " est 2 sec", (pinv(2))
print " est 3 sec", (pinv(3))
def inferGoodInputSizes(prog, desiredTimes, maxTime=5.0):
x,y=collectTimingSamples2(prog, maxTime)
efx, efy, estr = expFit(x,y)
#pfx, pfy, pstr = polyFit(x,y)
sizes=map(int, map(efy, desiredTimes))
return sizes
def getMakefileFlag(name):
r=re.compile("^"+name+"[ ]*[=][ ]*(.*)")
return r.match(filter(lambda l: r.match(l), open("src/Makefile"))[0]).group(1).strip()
getCXX = lambda: getMakefileFlag("CXX")
getCXXFLAGS = lambda: getMakefileFlag("CXXFLAGS")
def getTunables(tx, type):
return filter( lambda t: t.getAttribute("type")==type, tx.getElementsByTagName("tunable") )
getTunablesSequential=lambda tx: getTunables(tx, "system.cutoff.sequential")
getTunablesSplitSize=lambda tx: getTunables(tx, "system.cutoff.splitsize")
def mainname(bin):
run_command = mkcmd("--name")
p = subprocess.Popen(run_command, stdout=subprocess.PIPE, stderr=substderr)
os.waitpid(p.pid, 0)
lines = p.stdout.readlines()
return lines[-1].strip()
if __name__ == "__main__":
chdirToPetabricksRoot()
compilePetabricks()
compileBenchmarks(map(normalizeBenchmarkName, ["add", "multiply", "transpose"]))
print "Estimating input sizes"
inferGoodInputSizes("./examples/simple/add", [0.1,0.5,1.0], 2)
|
|
"""
Support for Songpal-enabled (Sony) media devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.songpal/
"""
import asyncio
import logging
from collections import OrderedDict
import voluptuous as vol
from homeassistant.components.media_player import (
DOMAIN, PLATFORM_SCHEMA, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF,
SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP, MediaPlayerDevice)
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_NAME, STATE_OFF, STATE_ON, EVENT_HOMEASSISTANT_STOP)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['python-songpal==0.0.9.1']
_LOGGER = logging.getLogger(__name__)
CONF_ENDPOINT = 'endpoint'
PARAM_NAME = 'name'
PARAM_VALUE = 'value'
PLATFORM = 'songpal'
SET_SOUND_SETTING = 'songpal_set_sound_setting'
SUPPORT_SONGPAL = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_STEP | \
SUPPORT_VOLUME_MUTE | SUPPORT_SELECT_SOURCE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_ENDPOINT): cv.string,
})
SET_SOUND_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(PARAM_NAME): cv.string,
vol.Required(PARAM_VALUE): cv.string,
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the Songpal platform."""
from songpal import SongpalException
if PLATFORM not in hass.data:
hass.data[PLATFORM] = {}
if discovery_info is not None:
name = discovery_info["name"]
endpoint = discovery_info["properties"]["endpoint"]
_LOGGER.debug("Got autodiscovered %s - endpoint: %s", name, endpoint)
device = SongpalDevice(name, endpoint)
else:
name = config.get(CONF_NAME)
endpoint = config.get(CONF_ENDPOINT)
device = SongpalDevice(name, endpoint, poll=False)
if endpoint in hass.data[PLATFORM]:
_LOGGER.debug("The endpoint exists already, skipping setup.")
return
try:
await device.initialize()
except SongpalException as ex:
_LOGGER.error("Unable to get methods from songpal: %s", ex)
raise PlatformNotReady
hass.data[PLATFORM][endpoint] = device
async_add_entities([device], True)
async def async_service_handler(service):
"""Service handler."""
entity_id = service.data.get("entity_id", None)
params = {key: value for key, value in service.data.items()
if key != ATTR_ENTITY_ID}
for device in hass.data[PLATFORM].values():
if device.entity_id == entity_id or entity_id is None:
_LOGGER.debug("Calling %s (entity: %s) with params %s",
service, entity_id, params)
await device.async_set_sound_setting(
params[PARAM_NAME], params[PARAM_VALUE])
hass.services.async_register(
DOMAIN, SET_SOUND_SETTING, async_service_handler,
schema=SET_SOUND_SCHEMA)
class SongpalDevice(MediaPlayerDevice):
"""Class representing a Songpal device."""
def __init__(self, name, endpoint, poll=False):
"""Init."""
from songpal import Device
self._name = name
self._endpoint = endpoint
self._poll = poll
self.dev = Device(self._endpoint)
self._sysinfo = None
self._state = False
self._available = False
self._initialized = False
self._volume_control = None
self._volume_min = 0
self._volume_max = 1
self._volume = 0
self._is_muted = False
self._active_source = None
self._sources = {}
@property
def should_poll(self):
"""Return True if the device should be polled."""
return self._poll
async def initialize(self):
"""Initialize the device."""
await self.dev.get_supported_methods()
self._sysinfo = await self.dev.get_system_info()
async def async_activate_websocket(self):
"""Activate websocket for listening if wanted."""
_LOGGER.info("Activating websocket connection..")
from songpal import (VolumeChange, ContentChange,
PowerChange, ConnectChange)
async def _volume_changed(volume: VolumeChange):
_LOGGER.debug("Volume changed: %s", volume)
self._volume = volume.volume
self._is_muted = volume.mute
await self.async_update_ha_state()
async def _source_changed(content: ContentChange):
_LOGGER.debug("Source changed: %s", content)
if content.is_input:
self._active_source = self._sources[content.source]
_LOGGER.debug("New active source: %s", self._active_source)
await self.async_update_ha_state()
else:
_LOGGER.warning("Got non-handled content change: %s",
content)
async def _power_changed(power: PowerChange):
_LOGGER.debug("Power changed: %s", power)
self._state = power.status
await self.async_update_ha_state()
async def _try_reconnect(connect: ConnectChange):
_LOGGER.error("Got disconnected with %s, trying to reconnect.",
connect.exception)
self._available = False
self.dev.clear_notification_callbacks()
await self.async_update_ha_state()
# Try to reconnect forever, a successful reconnect will initialize
# the websocket connection again.
delay = 10
while not self._available:
_LOGGER.debug("Trying to reconnect in %s seconds", delay)
await asyncio.sleep(delay)
# We need to inform HA about the state in case we are coming
# back from a disconnected state.
await self.async_update_ha_state(force_refresh=True)
delay = min(2*delay, 300)
self.dev.on_notification(VolumeChange, _volume_changed)
self.dev.on_notification(ContentChange, _source_changed)
self.dev.on_notification(PowerChange, _power_changed)
self.dev.on_notification(ConnectChange, _try_reconnect)
async def listen_events():
await self.dev.listen_notifications()
async def handle_stop(event):
await self.dev.stop_listen_notifications()
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, handle_stop)
self.hass.loop.create_task(listen_events())
@property
def name(self):
"""Return name of the device."""
return self._name
@property
def unique_id(self):
"""Return a unique ID."""
return self._sysinfo.macAddr
@property
def available(self):
"""Return availability of the device."""
return self._available
async def async_set_sound_setting(self, name, value):
"""Change a setting on the device."""
await self.dev.set_sound_settings(name, value)
async def async_update(self):
"""Fetch updates from the device."""
from songpal import SongpalException
try:
volumes = await self.dev.get_volume_information()
if not volumes:
_LOGGER.error("Got no volume controls, bailing out")
self._available = False
return
if len(volumes) > 1:
_LOGGER.debug(
"Got %s volume controls, using the first one", volumes)
volume = volumes[0]
_LOGGER.debug("Current volume: %s", volume)
self._volume_max = volume.maxVolume
self._volume_min = volume.minVolume
self._volume = volume.volume
self._volume_control = volume
self._is_muted = self._volume_control.is_muted
status = await self.dev.get_power()
self._state = status.status
_LOGGER.debug("Got state: %s", status)
inputs = await self.dev.get_inputs()
_LOGGER.debug("Got ins: %s", inputs)
self._sources = OrderedDict()
for input_ in inputs:
self._sources[input_.uri] = input_
if input_.active:
self._active_source = input_
_LOGGER.debug("Active source: %s", self._active_source)
self._available = True
# activate notifications if wanted
if not self._poll:
await self.hass.async_create_task(
self.async_activate_websocket())
except SongpalException as ex:
_LOGGER.error("Unable to update: %s", ex)
self._available = False
async def async_select_source(self, source):
"""Select source."""
for out in self._sources.values():
if out.title == source:
await out.activate()
return
_LOGGER.error("Unable to find output: %s", source)
@property
def source_list(self):
"""Return list of available sources."""
return [src.title for src in self._sources.values()]
@property
def state(self):
"""Return current state."""
if self._state:
return STATE_ON
return STATE_OFF
@property
def source(self):
"""Return currently active source."""
return self._active_source.title
@property
def volume_level(self):
"""Return volume level."""
volume = self._volume / self._volume_max
return volume
async def async_set_volume_level(self, volume):
"""Set volume level."""
volume = int(volume * self._volume_max)
_LOGGER.debug("Setting volume to %s", volume)
return await self._volume_control.set_volume(volume)
async def async_volume_up(self):
"""Set volume up."""
return await self._volume_control.set_volume("+1")
async def async_volume_down(self):
"""Set volume down."""
return await self._volume_control.set_volume("-1")
async def async_turn_on(self):
"""Turn the device on."""
return await self.dev.set_power(True)
async def async_turn_off(self):
"""Turn the device off."""
return await self.dev.set_power(False)
async def async_mute_volume(self, mute):
"""Mute or unmute the device."""
_LOGGER.debug("Set mute: %s", mute)
return await self._volume_control.set_mute(mute)
@property
def is_volume_muted(self):
"""Return whether the device is muted."""
return self._is_muted
@property
def supported_features(self):
"""Return supported features."""
return SUPPORT_SONGPAL
|
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper classes for managing issues."""
import re
def get_values_containing(target, expression):
regex = re.compile(expression, re.DOTALL | re.IGNORECASE)
return [value for value in target if regex.search(value)]
def get_values_matching(target, expression):
regex = re.compile(expression + r'\Z', re.DOTALL | re.IGNORECASE)
return [value for value in target if regex.match(value)]
def has_values_containing(target, expression):
return any(get_values_containing(target, expression))
def has_values_matching(target, expression):
return any(get_values_matching(target, expression))
def has_value(target, value):
for target_value in target:
if target_value.lower() == value.lower():
return True
return False
class ChangeList(list):
"""List that tracks changes for incremental updates."""
def __init__(self, seq=()):
super(ChangeList, self).__init__(seq)
self.added = set()
self.removed = set()
def append(self, p_object):
list.append(self, p_object)
if p_object in self.removed:
self.removed.remove(p_object)
else:
self.added.add(p_object)
def remove(self, value):
list.remove(self, value)
if value in self.added:
self.added.remove(value)
else:
self.removed.add(value)
def is_changed(self):
return (len(self.added) + len(self.removed)) > 0
def reset(self):
self.added.clear()
self.removed.clear()
class Issue(object):
"""Class representing a single issue."""
def __init__(self):
self.blocking = None
self.blocked_on = None
self.body = None
self.depends_on = None
self.cc = ChangeList()
self.closed = None
self.comment = ''
self.components = ChangeList()
self.created = None
self.id = 0
self.labels = ChangeList()
self.merged_into = None
self.merged_into_project = None
self.open = False
self.owner = None
self.reporter = None
self.status = None
self.stars = 0
self.summary = None
self.updated = None
self.dirty = False
self.send_email = True
self.new = True
self.itm = None
self.project_name = None
self.comments = None
self.comment_count = 0
self.first_comment = None
self.last_comment = None
self.changed = set()
def __getattribute__(self, item):
if item in ['body'] and not object.__getattribute__(self, item):
comment = self.get_first_comment()
self.__setattr__(item, comment.comment)
return object.__getattribute__(self, item)
def __setattr__(self, name, value):
self.__dict__[name] = value
if 'changed' in self.__dict__:
self.__dict__['changed'].add(name)
# Automatically set the project name if the itm is set.
if name == 'itm' and value and hasattr(value, 'project_name'):
self.__dict__['project_name'] = value.project_name
# Treat comments and dirty flag specially.
if name not in ('dirty', 'body', 'comments', 'itm', 'new', 'comment_count',
'first_comment', 'last_comment', 'project_name', 'changed',
'send_email'):
self.__dict__['dirty'] = True
if name in ('dirty') and not value:
self.labels.reset()
self.cc.reset()
if 'changed' in self.__dict__:
self.changed.clear()
def __getstate__(self):
"""Ensure that we don't pickle the itm.
This would raise an exception due to the way the apiary folks did their
information (i.e. OAuth kicking us once again).
"""
result_dict = self.__dict__.copy()
del result_dict['itm']
return result_dict
def __setstate__(self, new_dict):
self.__dict__.update(new_dict)
self.itm = None
def _remove_tracked_value(self, target, value):
for existing_value in target:
if existing_value.lower() == value.lower():
target.remove(existing_value)
self.dirty = True
return
def add_component(self, component):
if not self.has_component(component):
self.components.append(component)
self.dirty = True
def remove_component(self, component):
if self.has_component(component):
self._remove_tracked_value(self.components, component)
self.add_component('-%s' % component)
def remove_components_by_prefix(self, prefix):
components = self.get_components_by_prefix(prefix)
for component in components:
self.remove_label(component)
def add_label(self, label):
if not self.has_label(label):
self.labels.append(label)
self.dirty = True
def remove_label(self, label):
if self.has_label(label):
self._remove_tracked_value(self.labels, label)
self.add_label('-%s' % label)
def remove_label_by_prefix(self, prefix):
labels = self.get_labels_by_prefix(prefix)
for label in labels:
self.remove_label(label)
def add_cc(self, cc):
if not self.has_cc(cc):
self.cc.append(cc)
self.dirty = True
def remove_cc(self, cc):
if self.has_cc(cc):
self.cc.remove(cc)
self.dirty = True
def get_components_by_prefix(self, prefix):
return get_values_matching(self.components, '%s.*' % prefix)
def get_components_containing(self, expression):
return get_values_containing(self.components, expression)
def get_components_matching(self, expression):
return get_values_matching(self.components, expression)
def has_components_containing(self, expression):
return has_values_containing(self.components, expression)
def has_components_matching(self, expression):
return has_values_matching(self.components, expression)
def has_component(self, value):
return has_value(self.components, value)
def get_labels_by_prefix(self, prefix):
return get_values_matching(self.labels, '%s.*' % prefix)
def get_labels_containing(self, expression):
return get_values_containing(self.labels, expression)
def get_labels_matching(self, expression):
return get_values_matching(self.labels, expression)
def has_label_by_prefix(self, prefix):
return has_values_containing(self.labels, '%s.*' % prefix)
def has_label_containing(self, expression):
return has_values_containing(self.labels, expression)
def has_label_matching(self, expression):
return has_values_matching(self.labels, expression)
def has_label(self, value):
return has_value(self.labels, value)
def has_cc(self, value):
return has_value(self.cc, value)
def has_comment_with_label(self, label):
for comment in self.get_comments():
if comment.has_label(label):
return True
return False
def has_comment_with_label_by_prefix(self, prefix):
for comment in self.get_comments():
if comment.get_labels_by_prefix(prefix):
return True
return False
def get_comments(self):
if not self.comments and self.itm:
self.comments = self.itm.get_comments(self.id)
self.comment_count = len(self.comments)
return self.comments
def get_first_comment(self):
if not self.first_comment and self.itm:
self.first_comment = self.itm.get_first_comment(self.id)
return self.first_comment
def get_last_comment(self):
if not self.last_comment and self.itm:
self.last_comment = self.itm.get_last_comment(self.id)
return self.last_comment
def get_comment_count(self):
if not self.comment_count and self.itm:
self.comment_count = self.itm.get_comment_count(self.id)
return self.comment_count
def save(self, send_email=None):
if self.itm:
self.itm.save(self, send_email)
def refresh(self):
if self.itm:
self.comments = None
self.last_comment = None
self.comment_count = 0
self.itm.refresh(self)
return self
|
|
import os
import json
import requests
from flask import Flask,Blueprint,session, redirect, render_template, g, url_for, request
from datetime import datetime
from HitchHike.config import GOOGLE_API_KEY
from flask_login import login_required, current_user
from flask_socketio import SocketIO, send, emit, join_room, leave_room, rooms
from HitchHike.welcome import socketio
from HitchHike.User.models import CarDriver, HitchHiker, Vehicle
from .models import AvailableCar, Ride
from HitchHike.googleapi import GoogleApi
dashboard=Blueprint("dashboard",__name__,template_folder="../template/dashboard",static_folder='../static')
GOOGLE_GEOCODE_URL = 'https://maps.googleapis.com/maps/api/geocode/json?place_id={0}&key={1}'
def reverse_geo_code(place_id):
tu = (place_id, GOOGLE_API_KEY)
location = requests.get(GOOGLE_GEOCODE_URL.format(*tu))
lat = str(location.json()['results'][0]['geometry']['location']['lat'])
lng = str(location.json()['results'][0]['geometry']['location']['lng'])
return [lat, lng]
@dashboard.route('/driver',methods=['GET'])
@login_required
def dash_driver():
return render_template('dashdriver/index.html',map_key=GOOGLE_API_KEY)
# else:
# return redirect(url_for('user.login'))
@dashboard.route('/hitchhiker',methods=['GET'])
@login_required
def dash_user():
return render_template('dashhiker/index.html',map_key=GOOGLE_API_KEY)
@dashboard.route('/profile')
@login_required
def profile():
return render_template('dashboard/profile.html')
# return redirect(url_for('user.login'))
@dashboard.route('/postride/', methods=['POST'])
@login_required
def post_ride():
# print "inPOST"
data = json.loads(request.data)
# print data
user = current_user.get_id()
car = AvailableCar.by_user(user)
if car is None:
available = AvailableCar()
available.owner = user
available.start = data['orig']
available.end = data['dest']
available.save()
return json.dumps({'status':'OK'})
else:
car.update(data['orig'], data['dest'])
return json.dumps({'status':'OK'})
@socketio.on('reqreceive')
def msgreceive(msg):
print
print "origin" , msg['orig']
print "dest" , msg['dest']
print
msg['eid'] = session.get('user_id',None)
# send(msg, broadcast=True)
cars=AvailableCar.all();
for i in cars:
# origin1 = requests.get('https://maps.googleapis.com/maps/api/geocode/json?place_id='+i.start+'&key='+GOOGLE_API_KEY)
# origin2 = requests.get('https://maps.googleapis.com/maps/api/geocode/json?place_id='+msg['orig']+'&key='+GOOGLE_API_KEY)
# origin1lat = str(origin1.json()['results'][0]['geometry']['location']['lat'])
# origin1lng = str(origin1.json()['results'][0]['geometry']['location']['lng'])
# origin2lat = str(origin2.json()['results'][0]['geometry']['location']['lat'])
# origin2lng = str(origin2.json()['results'][0]['geometry']['location']['lng'])
origin1 = reverse_geo_code(i.start)
origin2 = reverse_geo_code(msg['orig'])
origin1lat = origin1[0]
origin1lng = origin1[1]
origin2lat = origin2[0]
origin2lng = origin2[1]
url = 'https://maps.googleapis.com/maps/api/distancematrix/json?origins='+origin1lat+','+origin1lng+'&destinations='+origin2lat+','+origin2lng+'&key='+GOOGLE_API_KEY
print url
dist = requests.get(url)
print
distval = dist.json()['rows'][0]['elements'][0]['distance']['value']
print
if ( distval <= 3000 ):
msg['owner'] = i.owner
emit('message', msg, room=i.owner)
print
@socketio.on('messageaccept')
def msgaccept(msg):
email = session.get('user_id',None)
driver = CarDriver.get_user(email)
print 'yolo'
msg['deid'] = email
msg['name'] = driver.name
msg['phone'] = driver.phone
vehicle = Vehicle.get_by_user(email)
msg['vehicle'] = vehicle.company + " " + vehicle.model
msg['regno'] = vehicle.reg_number
print msg
emit('message', msg, room=msg['eid'])
@socketio.on('joined')
def joined(message=None):
"""Sent by clients when they enter a room.
A status message is broadcast to all people in the room."""
room = session.get('user_id',None)
join_room(room)
print
# print "hurray in controller"
print session.get('user_id')
#emit('status', {'msg': session.get('name') + ' has entered the room.'}, room=room)
print
print session
print
#print session.get('name') + ' has entered the room: ' + room
@dashboard.route('/driver/ride/accept',methods=['POST'])
@login_required
def accept_ride():
data = json.loads(request.data)
print data
ride = Ride()
ride.driver = data['owner']
ride.hitchhiker = data['eid']
ride.origin = data['orig']
ride.destination = data['dest']
ride.driver_origin = data['dorig']
ride.driver_destination = data['ddest']
ride.save()
return json.dumps({'status':'OK'})
@dashboard.route('/driver/ride/', methods=['GET'])
@login_required
def driver_ride():
user = current_user.get_id()
if CarDriver.get_user(user):
ride = Ride.by_user(user)
if ride is not None:
data = {}
hitch_origin_data = reverse_geo_code(ride.origin)
hitch_dest_data = reverse_geo_code(ride.destination)
driver_origin_data = reverse_geo_code(ride.driver_origin)
driver_dest_data = reverse_geo_code(ride.driver_destination)
data['hitch_orig_lat'] = hitch_origin_data[0]
data['hitch_orig_lng'] = hitch_origin_data[1]
data['hitch_dest_lat'] = hitch_dest_data[0]
data['hitch_dest_lng'] = hitch_dest_data[1]
data['driver_orig_lat'] = driver_origin_data[0]
data['driver_orig_lng'] = driver_origin_data[1]
data['driver_dest_lat'] = driver_dest_data[0]
data['driver_dest_lng'] = driver_dest_data[1]
return render_template('ride/driver.html', data = data,map_key=GOOGLE_API_KEY)
else:
return "No active rides currently."
else:
return "Error : Forbidden. \n You are not allowed to view this page."
@dashboard.route('/hitchhiker/ride/', methods=['GET'])
@login_required
def hitchhiker_ride():
user = current_user.get_id()
if HitchHiker.get_user(user):
ride = Ride.by_hitchhiker(user)
if ride is not None:
data = {}
origin_data = GoogleApi.reverse_geo_code(ride.origin)
dest_data = GoogleApi.reverse_geo_code(ride.destination)
data['orig_lat'] = origin_data[0]
data['orig_lng'] = origin_data[1]
data['dest_lat'] = dest_data[0]
data['dest_lng'] = dest_data[1]
return render_template('ride/hitchhiker.html', data = data,map_key=GOOGLE_API_KEY)
else:
return "No active rides currently"
else:
return "Error : Forbidden. \n You are not allowed to view this page."
@dashboard.route('/driver/ride/stop',methods=['GET', 'POST'])
def stop_ride():
user = current_user.get_id()
if CarDriver.get_user(user):
ride = Ride.by_user(user)
if ride:
summary = ride.stop()
return "fare :" + str(ride.fare)
else:
return "No active ride"
else:
return "Forbidden"
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import unittest
from textwrap import dedent
import pytest
from pants.backend.codegen.tasks.protobuf_parse import (MESSAGE_PARSER, ProtobufParse, camelcase,
get_outer_class_name, update_type_list)
from pants.util.contextutil import temporary_dir
class ProtobufParseTest(unittest.TestCase):
def test_parse_for(self):
with temporary_dir() as workdir:
filename = 'temperatures.proto'
with open(os.path.join(workdir, filename), 'w') as fd:
fd.write(dedent('''
package com.pants.examples.temperature;
message Temperature {
optional string unit = 1;
required int64 number = 2;
}
'''))
fd.close()
proto_parser = ProtobufParse(fd.name, filename)
proto_parser.parse()
self.assertEqual('com.pants.examples.temperature', proto_parser.package)
self.assertEqual(set(), proto_parser.enums)
self.assertEqual(set(['Temperature']), proto_parser.messages)
self.assertEqual(set(), proto_parser.services)
self.assertEqual('Temperatures', proto_parser.outer_class_name)
def test_whitespace(self):
with temporary_dir() as workdir:
filename = 'jack_spratt_no_whitespace.proto'
with open(os.path.join(workdir, filename), 'w') as fd:
fd.write(dedent('''
package com.twitter.lean;
option java_multiple_files = true;
enum Jake { FOO=1;}
message joe_bob {}
'''))
fd.close()
proto_parse_no_whitespace = ProtobufParse(fd.name, filename)
proto_parse_no_whitespace.parse()
self.assertEqual('com.twitter.lean', proto_parse_no_whitespace.package)
self.assertEqual(set(['Jake']), proto_parse_no_whitespace.enums)
self.assertEqual(set(['joe_bob']), proto_parse_no_whitespace.messages)
self.assertEqual(set(), proto_parse_no_whitespace.services)
self.assertEqual('JackSprattNoWhitespace', proto_parse_no_whitespace.outer_class_name)
filename = 'jack_spratt.proto'
with open(os.path.join(workdir, filename), 'w') as fd:
fd.write(dedent('''
package com.twitter.lean;
option java_multiple_files = true;
enum Jake { FOO=1;
}
message joe_bob {
}
'''))
fd.close()
proto_parse_with_whitespace = ProtobufParse(fd.name, filename)
proto_parse_with_whitespace.parse()
self.assertEqual('com.twitter.lean', proto_parse_with_whitespace.package)
self.assertEqual(set(['Jake']), proto_parse_with_whitespace.enums)
self.assertEqual(set(['joe_bob']), proto_parse_with_whitespace.messages)
self.assertEqual('JackSpratt',proto_parse_with_whitespace.outer_class_name)
def test_update_type_list(self):
match = MESSAGE_PARSER.match('message Temperature {')
expected_value = set()
expected_value.add('Temperature')
actual_value = set()
update_type_list(match, 0, actual_value)
self.assertEqual(expected_value, actual_value)
def get_outer_class_name(self, source):
self.assertEqual('Distances', get_outer_class_name('distances.java'))
def test_camelcase(self):
self.assertEqual('TestingOut', camelcase('testing_out'))
def test_filename(self):
with temporary_dir() as workdir:
filename = 'foobar/testfile.proto'
os.makedirs(os.path.join(workdir, 'foobar'))
with open(os.path.join(workdir, filename), 'w') as fd:
fd.write(dedent('''
package com.pants.protos;
message Foo {
optional string name = 1;
}
'''))
fd.close()
proto_parse = ProtobufParse(fd.name, filename)
self.assertEquals('testfile', proto_parse.filename)
def test_extend(self):
with temporary_dir() as workdir:
filename = 'testextend.proto'
with open(os.path.join(workdir, filename), 'w') as fd:
fd.write(dedent('''
package com.pants.protos;
extend Foo {
optional int32 bar = 126;
}
'''))
fd.close()
proto_parse = ProtobufParse(fd.name, filename)
proto_parse.parse()
self.assertEqual(set(['Foo']), proto_parse.extends)
# TODO(Eric Ayers) The following tests won't pass because the .proto parse is not reliable.
# https://github.com/pantsbuild/pants/issues/96
@pytest.mark.xfail
def test_inner_class_no_newline(self):
with temporary_dir() as workdir:
filename = 'inner_class_no_newline.proto'
with open(os.path.join(workdir, filename), 'w') as fd:
fd.write(dedent('''
package com.pants.protos;
option java_multiple_files = true;
message Foo {
enum Bar { BAZ = 0; }
}
'''))
fd.close()
proto_parse = ProtobufParse(fd.name, filename)
proto_parse.parse()
self.assertEqual('com.pants.protos', proto_parse.package)
self.assertEqual(set(['Bar']), proto_parse.enums)
self.assertEqual(set(['Foo']), proto_parse.messages)
self.assertEqual(set(), proto_parse.services)
self.assertEqual('InnerClassNoNewline', proto_parse.outer_class_name)
@pytest.mark.xfail
def test_no_newline_at_all1(self):
with temporary_dir() as workdir:
filename = 'no_newline_at_all1.proto'
with open(os.path.join(workdir, filename), 'w') as fd:
fd.write('package com.pants.protos; option java_multiple_files = true; message Foo {'
+ ' enum Bar { BAZ = 0; } } message FooBar { }')
fd.close()
proto_parse = ProtobufParse(fd.name, filename)
proto_parse.parse()
self.assertEqual('com.pants.protos', proto_parse.package)
self.assertEqual(set(['Bar']), proto_parse.enums)
self.assertEqual(set(['Foo', 'FooBar']), proto_parse.messages)
self.assertEqual(set(), proto_parse.services)
self.assertEqual('NoNewlineAtAll1', proto_parse.outer_class_name)
@pytest.mark.xfail
def test_no_newline_at_all2(self):
with temporary_dir() as workdir:
filename = 'no_newline_at_all2.proto'
with open(os.path.join(workdir, filename), 'w') as fd:
fd.write('package com.pants.protos; message Foo {'
+ 'enum Bar { BAZ = 0; } } message FooBar { }')
fd.close()
proto_parse = ProtobufParse(fd.name, filename)
proto_parse.parse()
self.assertEqual('com.pants.protos', proto_parse.package)
self.assertEqual(set(['Bar']), proto_parse.enums)
self.assertEqual(set(['Foo', 'FooBar']), proto_parse.messages)
self.assertEqual(set(), proto_parse.services)
self.assertEqual('NoNewlineAtAll2', proto_parse.outer_class_name)
@pytest.mark.xfail
def test_no_newline_at_all3(self):
with temporary_dir() as workdir:
filename = 'no_newline_at_all3.proto'
with open(os.path.join(workdir, filename), 'w') as fd:
fd.write('package com.pants.protos; option java_package = "com.example.foo.bar"; message Foo { }')
fd.close()
proto_parse = ProtobufParse(fd.name, filename)
proto_parse.parse()
self.assertEqual('com.example.foo.bar', proto_parse.package)
self.assertEqual(set(), proto_parse.enums)
self.assertEqual(set(['Foo',]), proto_parse.messages)
self.assertEqual(set(), proto_parse.services)
self.assertEqual('NoNewlineAtAll3', proto_parse.outer_class_name)
@pytest.mark.xfail
def test_crazy_whitespace(self):
with temporary_dir() as workdir:
filename = 'crazy_whitespace.proto'
with open(os.path.join(workdir, filename), 'w') as fd:
fd.write(dedent('''
package
com.pants.protos; option
java_multiple_files
= true; option java_package =
"com.example.foo.bar"; message
Foo
{
enum
Bar {
BAZ = 0; } } message
FooBar
{ }
'''))
fd.close()
proto_parse = ProtobufParse(fd.name, filename)
proto_parse.parse()
self.assertEqual('com.example.foo.bar', proto_parse.package)
self.assertEqual(set(['Bar']), proto_parse.enums)
self.assertEqual(set(['Foo', 'FooBar']), proto_parse.messages)
self.assertEqual(set(), proto_parse.services)
self.assertEqual('CrazyWhitespace', proto_parse.outer_class_name)
|
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
@patch("os.path.exists", new = MagicMock(return_value=True))
@patch("platform.linux_distribution", new = MagicMock(return_value="Linux"))
class TestZookeeperClient(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "ZOOKEEPER/3.4.5/package"
STACK_VERSION = "2.0.6"
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_client.py",
classname = "ZookeeperClient",
command = "configure",
config_file = "default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Directory', '/etc/zookeeper/conf',
owner = 'zookeeper',
group = 'hadoop',
create_parents = True,
)
self.assertResourceCalled('File', '/etc/zookeeper/conf/zookeeper-env.sh',
owner = 'zookeeper',
content = InlineTemplate(self.getConfig()['configurations']['zookeeper-env']['content']),
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/zookeeper/conf/zoo.cfg',
owner = 'zookeeper',
content = Template('zoo.cfg.j2'),
group = 'hadoop',
mode = None,
)
self.assertResourceCalled('File', '/etc/zookeeper/conf/configuration.xsl',
owner = 'zookeeper',
content = Template('configuration.xsl.j2'),
group = 'hadoop',
mode = None,
)
self.assertResourceCalled('Directory', '/var/run/zookeeper',
owner = 'zookeeper',
group = 'hadoop',
create_parents = True,
mode = 0755,
)
self.assertResourceCalled('Directory', '/var/log/zookeeper',
owner = 'zookeeper',
group = 'hadoop',
create_parents = True,
mode = 0755,
)
self.assertResourceCalled('Directory', '/hadoop/zookeeper',
owner = 'zookeeper',
group = 'hadoop',
create_parents = True,
cd_access='a',
mode = 0755,
)
self.assertResourceCalled('File',
'/etc/zookeeper/conf/log4j.properties',
content=InlineTemplate(self.getConfig()['configurations']['zookeeper-log4j']['content']),
mode=0644,
group='hadoop',
owner='zookeeper'
)
self.assertResourceCalled('File', '/etc/zookeeper/conf/zoo_sample.cfg',
owner = 'zookeeper',
group = 'hadoop',
)
self.assertNoMoreResources()
def test_configure_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_client.py",
classname = "ZookeeperClient",
command = "configure",
config_file = "secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Directory', '/etc/zookeeper/conf',
owner = 'zookeeper',
group = 'hadoop',
create_parents = True,
)
self.assertResourceCalled('File', '/etc/zookeeper/conf/zookeeper-env.sh',
owner = 'zookeeper',
content = InlineTemplate(self.getConfig()['configurations']['zookeeper-env']['content']),
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/zookeeper/conf/zoo.cfg',
owner = 'zookeeper',
content = Template('zoo.cfg.j2'),
group = 'hadoop',
mode = None,
)
self.assertResourceCalled('File', '/etc/zookeeper/conf/configuration.xsl',
owner = 'zookeeper',
content = Template('configuration.xsl.j2'),
group = 'hadoop',
mode = None,
)
self.assertResourceCalled('Directory', '/var/run/zookeeper',
owner = 'zookeeper',
group = 'hadoop',
create_parents = True,
mode = 0755,
)
self.assertResourceCalled('Directory', '/var/log/zookeeper',
owner = 'zookeeper',
group = 'hadoop',
create_parents = True,
mode = 0755,
)
self.assertResourceCalled('Directory', '/hadoop/zookeeper',
owner = 'zookeeper',
group = 'hadoop',
create_parents = True,
cd_access='a',
mode = 0755
)
self.assertResourceCalled('File',
'/etc/zookeeper/conf/log4j.properties',
content=InlineTemplate(self.getConfig()['configurations']['zookeeper-log4j']['content']),
mode=0644,
group='hadoop',
owner='zookeeper'
)
self.assertResourceCalled('File', '/etc/zookeeper/conf/zookeeper_client_jaas.conf',
owner = 'zookeeper',
content = Template('zookeeper_client_jaas.conf.j2'),
group = 'hadoop',
mode = None,
)
self.assertResourceCalled('File', '/etc/zookeeper/conf/zoo_sample.cfg',
owner = 'zookeeper',
group = 'hadoop',
)
self.assertNoMoreResources()
def test_pre_upgrade_restart(self):
config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.2.1.0-3242'
json_content['commandParams']['version'] = version
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_client.py",
classname = "ZookeeperClient",
command = "pre_upgrade_restart",
config_dict = json_content,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assertResourceCalled('Execute',
('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'zookeeper-client', version), sudo=True)
self.assertNoMoreResources()
@patch("resource_management.core.shell.call")
def test_pre_upgrade_restart_23(self, call_mock):
call_mock.side_effects = [(0, None), (0, None)]
config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-3242'
json_content['commandParams']['version'] = version
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_client.py",
classname = "ZookeeperClient",
command = "pre_upgrade_restart",
config_dict = json_content,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
call_mocks = [(0, None, ''), (0, None)],
mocks_dict = mocks_dict)
self.assertResourceCalledIgnoreEarlier('Execute',
('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'zookeeper-client', version), sudo=True)
self.assertEquals(1, mocks_dict['call'].call_count)
self.assertEquals(1, mocks_dict['checked_call'].call_count)
self.assertEquals(
('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'zookeeper', '--stack-version', '2.3.0.0-3242', '--conf-version', '0'),
mocks_dict['checked_call'].call_args_list[0][0][0])
self.assertEquals(
('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'zookeeper', '--stack-version', '2.3.0.0-3242', '--conf-version', '0'),
mocks_dict['call'].call_args_list[0][0][0])
self.assertNoMoreResources()
|
|
import pytest
from guardian.shortcuts import get_perms
from tests.algorithms_tests.factories import AlgorithmJobFactory
from tests.algorithms_tests.utils import TwoAlgorithms
from tests.components_tests.factories import ComponentInterfaceValueFactory
from tests.factories import GroupFactory, ImageFactory, UserFactory
from tests.utils import get_view_for_user
@pytest.mark.django_db
@pytest.mark.parametrize("reverse", [True, False])
def test_user_can_download_images(client, reverse):
alg_set = TwoAlgorithms()
j1_creator, j2_creator = UserFactory(), UserFactory()
alg1_job = AlgorithmJobFactory(
algorithm_image__algorithm=alg_set.alg1, creator=j1_creator
)
alg2_job = AlgorithmJobFactory(
algorithm_image__algorithm=alg_set.alg2, creator=j2_creator
)
alg1_job.viewer_groups.add(alg_set.alg1.editors_group)
alg2_job.viewer_groups.add(alg_set.alg2.editors_group)
iv1, iv2, iv3, iv4 = (
ComponentInterfaceValueFactory(image=ImageFactory()),
ComponentInterfaceValueFactory(image=ImageFactory()),
ComponentInterfaceValueFactory(image=ImageFactory()),
ComponentInterfaceValueFactory(image=ImageFactory()),
)
if reverse:
for im in [iv1, iv2, iv3, iv4]:
im.algorithms_jobs_as_output.add(alg1_job, alg2_job)
for im in [iv3, iv4]:
im.algorithms_jobs_as_output.remove(alg1_job, alg2_job)
for im in [iv1, iv2]:
im.algorithms_jobs_as_output.remove(alg2_job)
else:
# Test that adding images works
alg1_job.outputs.add(iv1, iv2, iv3, iv4)
# Test that removing images works
alg1_job.outputs.remove(iv3, iv4)
tests = (
(None, 200, []),
(alg_set.creator, 200, []),
(
alg_set.editor1,
200,
[
*[i.image.pk for i in alg1_job.inputs.all()],
iv1.image.pk,
iv2.image.pk,
],
),
(alg_set.user1, 200, []),
(
j1_creator,
200,
[
*[i.image.pk for i in alg1_job.inputs.all()],
iv1.image.pk,
iv2.image.pk,
],
),
(alg_set.editor2, 200, [i.image.pk for i in alg2_job.inputs.all()],),
(alg_set.user2, 200, []),
(j2_creator, 200, [i.image.pk for i in alg2_job.inputs.all()]),
(alg_set.u, 200, []),
)
for test in tests:
response = get_view_for_user(
viewname="api:image-list",
client=client,
user=test[0],
content_type="application/json",
)
assert response.status_code == test[1]
assert response.json()["count"] == len(test[2])
pks = {obj["pk"] for obj in response.json()["results"]}
assert {str(pk) for pk in test[2]} == pks
# Test clearing
if reverse:
iv1.algorithms_jobs_as_output.clear()
iv2.algorithms_jobs_as_output.clear()
else:
alg1_job.outputs.clear()
response = get_view_for_user(
viewname="api:image-list",
client=client,
user=j1_creator,
content_type="application/json",
)
assert response.status_code == 200
assert response.json()["count"] == 1
@pytest.mark.django_db
@pytest.mark.parametrize("reverse", [True, False])
def test_user_can_download_input_images(client, reverse):
alg_set = TwoAlgorithms()
j1_creator, j2_creator = UserFactory(), UserFactory()
alg1_job = AlgorithmJobFactory(
algorithm_image__algorithm=alg_set.alg1, creator=j1_creator
)
alg2_job = AlgorithmJobFactory(
algorithm_image__algorithm=alg_set.alg2, creator=j2_creator
)
alg1_job.viewer_groups.add(alg_set.alg1.editors_group)
alg2_job.viewer_groups.add(alg_set.alg2.editors_group)
iv1, iv2, iv3, iv4 = (
ComponentInterfaceValueFactory(image=ImageFactory()),
ComponentInterfaceValueFactory(image=ImageFactory()),
ComponentInterfaceValueFactory(image=ImageFactory()),
ComponentInterfaceValueFactory(image=ImageFactory()),
)
alg1_origin_input = [i.image.pk for i in alg1_job.inputs.all()]
alg2_origin_input = [i.image.pk for i in alg2_job.inputs.all()]
if reverse:
for iv in [iv1, iv2, iv3, iv4]:
iv.algorithms_jobs_as_input.add(alg1_job, alg2_job)
for iv in [iv3, iv4]:
iv.algorithms_jobs_as_input.remove(alg1_job, alg2_job)
for iv in [iv1, iv2]:
iv.algorithms_jobs_as_input.remove(alg2_job)
else:
# Test that adding images works
alg1_job.inputs.add(iv1, iv2, iv3, iv4)
# Test that removing images works
alg1_job.inputs.remove(iv3, iv4)
tests = (
(None, 200, []),
(alg_set.creator, 200, []),
(
alg_set.editor1,
200,
[*alg1_origin_input, iv1.image.pk, iv2.image.pk],
),
(alg_set.user1, 200, []),
(j1_creator, 200, [*alg1_origin_input, iv1.image.pk, iv2.image.pk],),
(alg_set.editor2, 200, alg2_origin_input),
(alg_set.user2, 200, []),
(j2_creator, 200, alg2_origin_input),
(alg_set.u, 200, []),
)
for test in tests:
response = get_view_for_user(
viewname="api:image-list",
client=client,
user=test[0],
content_type="application/json",
)
assert response.status_code == test[1]
assert response.json()["count"] == len(test[2])
pks = {obj["pk"] for obj in response.json()["results"]}
assert {str(pk) for pk in test[2]} == pks
# Test clearing
if reverse:
iv1.algorithms_jobs_as_input.clear()
iv2.algorithms_jobs_as_input.clear()
else:
alg1_job.inputs.clear()
response = get_view_for_user(
viewname="api:image-list",
client=client,
user=j1_creator,
content_type="application/json",
)
assert response.status_code == 200
if reverse:
assert response.json()["count"] == 1
else:
assert response.json()["count"] == 0
@pytest.mark.django_db
class TestAlgorithmJobViewersGroup:
def test_view_permissions_are_assigned(self):
job = AlgorithmJobFactory()
viewer_groups = {*job.viewer_groups.all()}
assert viewer_groups == {
job.viewers,
}
for group in viewer_groups:
assert "view_job" in get_perms(group, job)
@pytest.mark.parametrize("reverse", [True, False])
def test_group_addition(self, reverse):
job = AlgorithmJobFactory()
group = GroupFactory()
civ_in, civ_out = (
ComponentInterfaceValueFactory(image=ImageFactory()),
ComponentInterfaceValueFactory(image=ImageFactory()),
)
job.inputs.add(civ_in)
job.outputs.add(civ_out)
assert "view_job" not in get_perms(group, job)
assert "view_image" not in get_perms(group, civ_in.image)
assert "view_image" not in get_perms(group, civ_out.image)
if reverse:
group.job_set.add(job)
else:
job.viewer_groups.add(group)
assert "view_job" in get_perms(group, job)
assert "view_image" in get_perms(group, civ_in.image)
assert "view_image" in get_perms(group, civ_out.image)
@pytest.mark.parametrize("reverse", [True, False])
def test_group_removal(self, reverse):
job = AlgorithmJobFactory()
civ_in, civ_out = (
ComponentInterfaceValueFactory(image=ImageFactory()),
ComponentInterfaceValueFactory(image=ImageFactory()),
)
job.inputs.add(civ_in)
job.outputs.add(civ_out)
group = job.viewer_groups.first()
assert "view_job" in get_perms(group, job)
assert "view_image" in get_perms(group, civ_in.image)
assert "view_image" in get_perms(group, civ_out.image)
if reverse:
group.job_set.remove(job)
else:
job.viewer_groups.remove(group)
assert "view_job" not in get_perms(group, job)
assert "view_image" not in get_perms(group, civ_in.image)
assert "view_image" not in get_perms(group, civ_out.image)
@pytest.mark.parametrize("reverse", [True, False])
def test_group_clearing(self, reverse):
job = AlgorithmJobFactory()
civ_in, civ_out = (
ComponentInterfaceValueFactory(image=ImageFactory()),
ComponentInterfaceValueFactory(image=ImageFactory()),
)
job.inputs.add(civ_in)
job.outputs.add(civ_out)
groups = job.viewer_groups.all()
assert len(groups) > 0
for group in groups:
assert "view_job" in get_perms(group, job)
assert "view_image" in get_perms(group, civ_in.image)
assert "view_image" in get_perms(group, civ_out.image)
if reverse:
for group in groups:
group.job_set.clear()
else:
job.viewer_groups.clear()
for group in groups:
assert "view_job" not in get_perms(group, job)
assert "view_image" not in get_perms(group, civ_in.image)
assert "view_image" not in get_perms(group, civ_out.image)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tensor_array_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_grad
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def _make_converter(tf_dtype):
def _converter(x):
if tf_dtype == dtypes.string:
# In Python3, np.str is unicode, while we always want bytes
return np.asarray(x).astype("|S")
x = np.asarray(x).astype(tf_dtype.as_numpy_dtype)
if tf_dtype.is_complex:
# Add a non-zero imaginary component to x.
x -= 1j * x
return x
return _converter
def _make_ta(size, name, dtype=dtypes.float32, infer_shape=False):
return tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name=name, size=size, infer_shape=infer_shape)
@test_util.run_all_in_graph_and_eager_modes
@test_util.with_control_flow_v2
class TensorArrayTest(test.TestCase):
@classmethod
def setUpClass(cls):
super(TensorArrayTest, cls).setUpClass()
cls._workers, _ = test.create_local_cluster(num_workers=3, num_ps=0)
@classmethod
def tearDownClass(cls):
super(TensorArrayTest, cls).tearDownClass()
session_lib.Session.reset(cls._workers[0].target)
@test_util.run_in_graph_and_eager_modes
def testTensorArrayWriteRead(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
def _testTensorArrayWritePack(self, tf_dtype):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
convert = _make_converter(tf_dtype)
w0 = ta.write(0, convert([[4.0, 5.0]]))
w1 = w0.write(1, convert([[6.0, 7.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
c0 = w2.stack()
c0 = self.evaluate(c0)
self.assertAllEqual(
convert([[[4.0, 5.0]], [[6.0, 7.0]], [[8.0, 9.0]]]), c0)
def _testTensorArrayWritePackMaybeLegacy(self):
self._testTensorArrayWritePack(dtypes.float32)
self._testTensorArrayWritePack(dtypes.float64)
self._testTensorArrayWritePack(dtypes.int32)
self._testTensorArrayWritePack(dtypes.int64)
self._testTensorArrayWritePack(dtypes.complex64)
self._testTensorArrayWritePack(dtypes.complex128)
self._testTensorArrayWritePack(dtypes.string)
def testTensorArrayWritePack(self):
self._testTensorArrayWritePackMaybeLegacy()
def testEmptyTensorArrayPack(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
empty_element = np.zeros((0, 1), dtype=np.float32)
w0 = ta.write(0, empty_element)
w1 = w0.write(1, empty_element)
w2 = w1.write(2, empty_element)
c0 = w2.stack()
c0 = self.evaluate(c0)
self.assertAllEqual([3, 0, 1], c0.shape)
def _testTensorArrayWriteConcat(self, tf_dtype):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3, infer_shape=False)
convert = _make_converter(tf_dtype)
w0 = ta.write(0, convert([[4.0, 5.0], [104.0, 105.0], [204.0, 205.0]]))
w1 = w0.write(1, convert([[6.0, 7.0], [106.0, 107.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
c0 = w2.concat()
c0 = self.evaluate(c0)
self.assertAllEqual(
convert([[4.0, 5.0], [104.0, 105.0], [204.0, 205.0], [6.0, 7.0],
[106.0, 107.0], [8.0, 9.0]]), c0)
@test_util.run_deprecated_v1
def testTensorArrayWriteConcat(self):
self._testTensorArrayWriteConcat(dtypes.float32)
self._testTensorArrayWriteConcat(dtypes.float64)
self._testTensorArrayWriteConcat(dtypes.int32)
self._testTensorArrayWriteConcat(dtypes.int64)
self._testTensorArrayWriteConcat(dtypes.complex64)
self._testTensorArrayWriteConcat(dtypes.complex128)
self._testTensorArrayWriteConcat(dtypes.string)
def _testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros(self):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
element_shape=tensor_shape.TensorShape([1, 2]))
self.assertAllEqual([[0.0, 0.0]], self.evaluate(ta.read(0)))
self.assertAllEqual([[[0.0, 0.0]], [[4.0, 5.0]], [[0.0, 0.0]]],
self.evaluate(ta.write(1, [[4.0, 5.0]]).stack()))
self.assertAllEqual([[0.0, 0.0], [4.0, 5.0], [0.0, 0.0]],
self.evaluate(ta.write(1, [[4.0, 5.0]]).concat()))
@test_util.run_v1_only("b/122324791")
def testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros(self):
self._testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros()
def _testTensorArrayReadOrPackNotAllValuesAvailableInferShapeFillsZeros(self):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3)
self.assertAllEqual(
[[0.0, 0.0]], self.evaluate(ta.write(1, [[4.0, 5.0]]).read(0)))
self.assertAllEqual([[[0.0, 0.0]], [[4.0, 5.0]], [[0.0, 0.0]]],
self.evaluate(ta.write(1, [[4.0, 5.0]]).stack()))
self.assertAllEqual([[0.0, 0.0], [4.0, 5.0], [0.0, 0.0]],
self.evaluate(ta.write(1, [[4.0, 5.0]]).concat()))
@test_util.run_v1_only("b/122324791")
def testTensorArrayReadOrPackNotAllValuesAvailableInferShapeFillsZeros(self):
self._testTensorArrayReadOrPackNotAllValuesAvailableInferShapeFillsZeros()
@test_util.run_v1_only("Uses placeholders")
def testSkipEagerTensorArrayReadUninitializedInferShapeFillsZeros(self):
with self.cached_session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3)
val = array_ops.placeholder(dtypes.float32)
self.assertAllEqual(
[[0.0, 0.0]], sess.run(ta.write(1, val).read(0), {val: [[4.0, 5.0]]}))
def _testTensorArrayUnpackRead(self, tf_dtype):
with self.cached_session(use_gpu=True):
convert = _make_converter(tf_dtype)
ta = _make_ta(3, "foo", dtype=tf_dtype)
# Unpack a vector into scalars
w0 = ta.unstack(convert([1.0, 2.0, 3.0]))
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual(convert(1.0), d0)
self.assertAllEqual(convert(2.0), d1)
self.assertAllEqual(convert(3.0), d2)
# Unpack a matrix into vectors
w1 = ta.unstack(convert([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]]))
r0 = w1.read(0)
r1 = w1.read(1)
r2 = w1.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual(convert([1.0, 1.1]), d0)
self.assertAllEqual(convert([2.0, 2.1]), d1)
self.assertAllEqual(convert([3.0, 3.1]), d2)
# Try unpacking an empty matrix, which should not cause an error.
w2 = ta.unstack(convert([[], [], []]))
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
def _testTensorArrayUnpackReadMaybeLegacy(self):
self._testTensorArrayUnpackRead(dtypes.float32)
self._testTensorArrayUnpackRead(dtypes.float64)
self._testTensorArrayUnpackRead(dtypes.int32)
self._testTensorArrayUnpackRead(dtypes.int64)
self._testTensorArrayUnpackRead(dtypes.complex64)
self._testTensorArrayUnpackRead(dtypes.complex128)
self._testTensorArrayUnpackRead(dtypes.string)
def testTensorArrayUnpackRead(self):
self._testTensorArrayUnpackReadMaybeLegacy()
def _testTensorArraySplitRead(self, tf_dtype):
with self.cached_session(use_gpu=True):
convert = _make_converter(tf_dtype)
# Split an empty vector
ta = _make_ta(3, "foo", dtype=tf_dtype)
lengths = constant_op.constant([0, 0, 0])
w0 = ta.split(convert([]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
# Split a vector
lengths = constant_op.constant([2, 0, 1])
w0 = ta.split(convert([1.0, 2.0, 3.0]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual(convert([1.0, 2.0]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([3.0]), d2)
# Split a matrix
lengths = constant_op.constant([2, 0, 1])
w0 = ta.split(
convert([[1.0, 101.0], [2.0, 201.0], [3.0, 301.0]]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual(convert([[1.0, 101.0], [2.0, 201.0]]), d0)
self.assertAllEqual(convert([]).reshape(0, 2), d1)
self.assertAllEqual(convert([[3.0, 301.0]]), d2)
@test_util.run_deprecated_v1
def testTensorArraySplitRead(self):
self._testTensorArraySplitRead(dtypes.float32)
self._testTensorArraySplitRead(dtypes.float64)
self._testTensorArraySplitRead(dtypes.int32)
self._testTensorArraySplitRead(dtypes.int64)
self._testTensorArraySplitRead(dtypes.complex64)
self._testTensorArraySplitRead(dtypes.complex128)
self._testTensorArraySplitRead(dtypes.string)
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
def testSkipEagerTensorGradArrayWriteRead(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
g_ta = ta.grad("grad")
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
g_w0 = g_ta.write(0, [[5.0, 6.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, -2.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
d0, d1, d2, g_d0, g_d1, g_d2 = session.run([r0, r1, r2, g_r0, g_r1, g_r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
self.assertAllEqual([[5.0, 6.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual(-2.0, g_d2)
@test_util.run_deprecated_v1
def testSkipEagerTensorArrayGradGrad(self):
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.skipTest("Legacy TensorArray does not support double derivatives.")
with self.test_session(use_gpu=True) as session:
x = constant_op.constant(4.0)
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=1,
infer_shape=False)
w0 = ta.write(0, x)
r0 = w0.read(0)
y = r0 * r0
g1 = gradients_impl.gradients(ys=[y], xs=[x])
g2 = gradients_impl.gradients(ys=[g1], xs=[x])
self.assertAllEqual([2.0], session.run(g2))
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
def testSkipEagerTensorGradArrayDynamicWriteRead(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
g_ta = w2.grad("grad") # Get gradient array here so we know the shape
s = w2.size()
g_s = g_ta.size()
g_w0 = g_ta.write(0, [[5.0, 6.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, -2.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
d0, d1, d2, g_d0, g_d1, g_d2, vs, g_vs = session.run(
[r0, r1, r2, g_r0, g_r1, g_r2, s, g_s])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
self.assertAllEqual([[5.0, 6.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual(-2.0, g_d2)
self.assertAllEqual(3, vs)
self.assertAllEqual(3, g_vs)
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
def testSkipEagerTensorGradAccessTwiceReceiveSameObject(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
g_ta_0 = ta.grad("grad")
g_ta_1 = ta.grad("grad")
with ops.control_dependencies([g_ta_0.write(0, [[4.0, 5.0]]).flow]):
# Write with one gradient handle, read with another copy of it
r1_0 = g_ta_1.read(0)
t_g_ta_0, t_g_ta_1, d_r1_0 = session.run(
[g_ta_0.handle.op, g_ta_1.handle.op, r1_0])
self.assertAllEqual(t_g_ta_0, t_g_ta_1)
self.assertAllEqual([[4.0, 5.0]], d_r1_0)
def testTensorArrayWriteWrongIndexOrDataTypeFails(self):
with self.session(use_gpu=True):
ta = _make_ta(3, "foo", dtype=dtypes.float32)
# Test writing the wrong datatype
if (control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not context.executing_eagerly()):
error_msg = ("Invalid data types; op elements string but list elements "
"float")
else:
error_msg = (
"TensorArray dtype is (float|float32) but Op is trying to write "
"dtype string")
with self.assertRaisesOpError(error_msg):
self.evaluate(ta.write(0, "wrong_type_scalar").flow)
if (control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not context.executing_eagerly()):
error_msg = "Trying to modify element -1 in a list with 3 elements."
else:
error_msg = "index -1"
with self.assertRaisesOpError(error_msg):
self.evaluate(ta.write(-1, 3.0).flow)
if (control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not context.executing_eagerly()):
error_msg = "Trying to modify element 3 in a list with 3 elements"
else:
error_msg = ("Tried to write to index 3 but array is not "
"resizeable and size is: 3")
# Test reading from too large an index
with self.assertRaisesOpError(error_msg):
self.evaluate(ta.write(3, 3.0).flow)
def testTensorArrayReadWrongIndexOrDataTypeFails(self):
with self.session(use_gpu=True):
ta = _make_ta(3, "foo", dtype=dtypes.float32)
w0 = ta.write(0, [[4.0, 5.0]])
# Test reading wrong datatype (only possible when constructing graphs).
if (not context.executing_eagerly() and
not control_flow_util.ENABLE_CONTROL_FLOW_V2):
r0_bad = gen_data_flow_ops.tensor_array_read_v3(
handle=w0.handle, index=0, dtype=dtypes.float64, flow_in=w0.flow)
with self.assertRaisesOpError(
"TensorArray dtype is float but Op requested dtype double."):
self.evaluate(r0_bad)
if (control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not context.executing_eagerly()):
error_msg = "Trying to access element -1 in a list with 3 elements."
else:
error_msg = "index -1"
# Test reading from a negative index, which is not allowed
with self.assertRaisesOpError(error_msg):
self.evaluate(ta.read(-1))
if (control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not context.executing_eagerly()):
error_msg = "Trying to access element 3 in a list with 3 elements."
else:
error_msg = "Tried to read from index 3 but array size is: 3"
# Test reading from too large an index
with self.assertRaisesOpError(error_msg):
self.evaluate(ta.read(3))
@test_util.disable_control_flow_v2("v2 allows multiple writes.")
@test_util.run_v1_only("v2 allows multiple writes.")
def testSkipEagerTensorArrayWriteMultipleFails(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
with self.assertRaisesOpError(
"Could not write to TensorArray index 2 because "
"it has already been written to."):
self.evaluate(ta.write(2, 3.0).write(2, 3.0).flow)
def testTensorArrayConcatIncompatibleShapesFails(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w1 = ta.write(0, 3.0)
w2 = w1.write(1, 4.0)
w3 = w2.write(2, [3.0])
with self.assertRaisesOpError(
"Concat saw a scalar shape at index 0 but requires at least vectors"):
self.evaluate(w3.concat())
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w1 = ta.write(0, [3.0])
w2 = w1.write(1, [4.0])
w3 = w2.write(2, [[3.0]])
# The exact error messages differ between eager execution and graph
# construction as the former bubbles up the error from array_op.concat.
error_msg = ("Incompatible ranks"
if control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not context.executing_eagerly() else "shape")
with self.assertRaisesRegexp(errors.InvalidArgumentError, error_msg):
self.evaluate(w3.concat())
def testTensorArraySplitIncompatibleShapesFails(self):
with self.session(use_gpu=True):
in_eager_mode = context.executing_eagerly()
ta = _make_ta(3, "foo")
with self.assertRaisesOpError(
r"Expected lengths to be a vector, received shape: \[\]"):
if in_eager_mode:
self.evaluate(ta.split([1.0, 2.0, 3.0], 1))
else:
lengths = array_ops.placeholder(dtypes.int64)
ta.split([1.0, 2.0, 3.0], lengths).flow.eval(feed_dict={lengths: 1})
error_msg = ("Unused values in tensor. Length of tensor: 3 Values used: 1"
if control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not in_eager_mode else
r"Expected sum of lengths to be equal to values.shape\[0\], "
r"but sum of lengths is 1 and value's shape is: \[3\]")
with self.assertRaisesOpError(error_msg):
self.evaluate(ta.split([1.0, 2.0, 3.0], [1]).flow)
ta = _make_ta(1, "baz")
if control_flow_util.ENABLE_CONTROL_FLOW_V2 and not in_eager_mode:
with self.assertRaisesRegexp(
ValueError, "Shape must be at least rank 1 but is rank 0"):
self.evaluate(ta.split(1.0, [1]).flow)
else:
with self.assertRaisesOpError(
r"Expected value to be at least a vector, but received shape: \[\]"
):
self.evaluate(ta.split(1.0, [1]).flow)
if not control_flow_util.ENABLE_CONTROL_FLOW_V2 or in_eager_mode:
ta = _make_ta(2, "buz")
with self.assertRaisesOpError(
r"TensorArray's size is not equal to the size of lengths "
r"\(2 vs. 1\), and the TensorArray is not marked as "
r"dynamically resizeable"):
self.evaluate(ta.split([1.0], [1]).flow)
def _testTensorArrayWriteGradientAddMultipleAdds(self, dtype):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name="foo", size=3, infer_shape=False)
ta_grad = ta.grad("grad")
c = lambda x: np.asarray(x, dtype=dtype.as_numpy_dtype)
w0 = ta.write(2, c(3.0))
w1 = w0.write(2, c(4.0))
w0_grad = ta_grad.write(2, c(3.0))
w1_grad = w0_grad.write(2, c(4.0))
w2_grad = w1_grad.write(2, c(5.0))
# Assert that aggregation works correctly
self.assertAllEqual(c(12.00), w2_grad.read(2).eval())
# Assert that if multiple_writes_aggregate is not enabled,
# multiple writes raise an exception.
with self.assertRaisesOpError(
r"TensorArray foo_.*: Could not write to TensorArray index 2 because "
r"it has already been written to."):
w1.flow.eval()
# Using differing shapes causes an exception
wb0_grad = ta_grad.write(1, c(1.0))
wb1_grad = wb0_grad.write(1, c([1.0]))
with self.assertRaisesOpError(
r"Could not aggregate to TensorArray index 1 because the "
r"existing shape is \[\] but the new input shape is \[1\]"):
wb1_grad.flow.eval()
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
def testSkipEagerTensorArrayWriteGradientAddMultipleAdds(self):
for dtype in (dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128):
self._testTensorArrayWriteGradientAddMultipleAdds(dtype)
@test_util.disable_control_flow_v2("Low level legacy TA op test.")
@test_util.run_v1_only("Low level legacy TA op test.")
def testSkipEagerTensorArrayGradWithShapeKnownElementShape(self):
with self.session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
size=3,
dtype=dtypes.float32,
element_shape=tensor_shape.TensorShape([2, 3]))
handle, flow = data_flow_ops.tensor_array_grad_with_shape(
handle=ta.handle,
flow_in=ta.flow,
shape_to_prepend=tensor_shape.TensorShape([4, 5]),
source="source")
ta_grad = tensor_array_ops.TensorArray(
dtypes.float32, handle=handle, flow=flow)
value = array_ops.placeholder(dtypes.float32)
ta_grad = ta_grad.write(0, value)
read_value = ta_grad.read(0)
# Make sure shape inference worked.
self.assertAllEqual([None, None, 2, 3], read_value.shape.as_list())
# Writing with wrong shape should not work.
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Could not write to TensorArray"):
fed_value = np.random.random([2, 3])
sess.run(read_value, feed_dict={value: fed_value})
# Writing with correct shape should work.
fed_value = np.random.random([4, 5, 2, 3])
self.assertAllClose(fed_value,
sess.run(read_value, feed_dict={value: fed_value}))
@test_util.disable_control_flow_v2("Low level legacy TA op test.")
@test_util.run_v1_only("Low level legacy TA op test.")
def testSkipEagerTensorArrayGradWithShapeUnknownElementShape(self):
with self.session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
size=3, dtype=dtypes.float32,
element_shape=None) # Note that element_shape is unknown
handle, flow = data_flow_ops.tensor_array_grad_with_shape(
handle=ta.handle,
flow_in=ta.flow,
shape_to_prepend=tensor_shape.TensorShape([4, 5]),
source="source")
ta_grad = tensor_array_ops.TensorArray(
dtypes.float32, handle=handle, flow=flow)
value = array_ops.placeholder(dtypes.float32)
ta_grad = ta_grad.write(0, value)
read_value = ta_grad.read(0)
# Make sure shape inference worked.
self.assertIsNone(read_value.shape.ndims)
# Write with some shape and check read value.
fed_value = np.random.random([4, 5, 7])
self.assertAllClose(fed_value,
sess.run(read_value, feed_dict={value: fed_value}))
def testMultiTensorArray(self):
with self.session(use_gpu=True):
h1 = tensor_array_ops.TensorArray(
size=1, dtype=dtypes.float32, tensor_array_name="foo")
w1 = h1.write(0, 4.0)
r1 = w1.read(0)
h2 = tensor_array_ops.TensorArray(
size=1, dtype=dtypes.float32, tensor_array_name="bar")
w2 = h2.write(0, 5.0)
r2 = w2.read(0)
r = r1 + r2
val = self.evaluate(r)
self.assertAllClose(9.0, val)
def _testTensorArrayGradientWriteReadType(self, dtype):
with self.cached_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.as_dtype(dtype),
tensor_array_name="foo",
size=3,
infer_shape=False)
c = lambda x: np.array(x, dtype=dtype)
value_0 = constant_op.constant(c([[4.0, 5.0]]))
value_1 = constant_op.constant(c(3.0))
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
r0 = w1.read(0)
r1 = w1.read(1)
r0_2 = w1.read(0)
# Test individual components' gradients
grad_just_r0 = gradients_impl.gradients(
ys=[r0], xs=[value_0], grad_ys=[c([[2.0, 3.0]])])
grad_just_r0_vals = session.run(grad_just_r0)
self.assertAllEqual(c([[2.0, 3.0]]), grad_just_r0_vals[0])
grad_r0_r0_2 = gradients_impl.gradients(
ys=[r0, r0_2],
xs=[value_0],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]])])
grad_r0_r0_2_vals = session.run(grad_r0_r0_2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_r0_r0_2_vals[0])
grad_just_r1 = gradients_impl.gradients(
ys=[r1], xs=[value_1], grad_ys=[c(-2.0)])
grad_just_r1_vals = session.run(grad_just_r1)
self.assertAllEqual(c(-2.0), grad_just_r1_vals[0])
# Test combined gradients
grad = gradients_impl.gradients(
ys=[r0, r0_2, r1],
xs=[value_0, value_1],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]]), c(-2.0)])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_vals[0])
self.assertAllEqual(c(-2.0), grad_vals[1])
@test_util.run_deprecated_v1
def testSkipEagerTensorArrayGradientWriteRead(self):
for dtype in (np.float32, np.float64, np.complex64, np.complex128):
self._testTensorArrayGradientWriteReadType(dtype)
def _testTensorArrayGradientWritePackConcatAndRead(self):
with self.cached_session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
value_0 = constant_op.constant([-1.0, 1.0])
value_1 = constant_op.constant([-10.0, 10.0])
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
p0 = w1.stack()
r0 = w1.read(0)
s0 = w1.concat()
# Test gradient accumulation between read(0), pack(), and concat()
with ops.control_dependencies([p0, r0, s0]):
grad_r = gradients_impl.gradients(
ys=[p0, r0, s0],
xs=[value_0, value_1],
grad_ys=[
[[2.0, 3.0], [4.0, 5.0]], # pack gradient
[-0.5, 1.5], # read(0) gradient
[20.0, 30.0, 40.0, 50.0]
]) # concat gradient
grad_vals = self.evaluate(grad_r) # 2 + 2 entries
self.assertAllClose([2.0 - 0.5 + 20.0, 3.0 + 1.5 + 30.0], grad_vals[0])
self.assertAllEqual([4.0 + 40.0, 5.0 + 50.0], grad_vals[1])
@test_util.run_deprecated_v1
def testSkipEagerTensorArrayGradientWritePackConcatAndRead(self):
self._testTensorArrayGradientWritePackConcatAndRead()
@test_util.disable_control_flow_v2("v2 does not support clear_after_read.")
@test_util.run_v1_only("v2 does not support clear_after_read.")
def testTensorArrayReadTwice(self):
with self.session(use_gpu=True):
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
ta_readonce = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=2)
w_readonce = ta_readonce.unstack(value)
r0_readonce = w_readonce.read(0)
with self.assertRaisesOpError(
r"Could not read index 0 twice because it was cleared after a "
r"previous read \(perhaps try setting clear_after_read = false\?\)"):
with ops.control_dependencies([r0_readonce]):
self.evaluate(w_readonce.read(0))
ta_readtwice = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
w_readtwice = ta_readtwice.unstack(value)
r0_readtwice = w_readtwice.read(0)
with ops.control_dependencies([r0_readtwice]):
r1_readtwice = w_readtwice.read(0)
self.assertAllEqual([1.0, -1.0], self.evaluate(r1_readtwice))
def _testTensorArrayGradientUnpackRead(self):
with self.cached_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unstack(value)
r0 = w.read(0)
r0_1 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0)
grad = gradients_impl.gradients(
ys=[r0, r0_1, r1],
xs=[value],
grad_ys=[[2.0, 3.0], [-1.5, 1.5], [4.0, 5.0]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0 - 1.5, 3.0 + 1.5], [4.0, 5.0]], grad_vals[0])
@test_util.run_deprecated_v1
def testSkipEagerTensorArrayGradientUnpackRead(self):
self._testTensorArrayGradientUnpackRead()
@test_util.run_deprecated_v1
def testSkipEagerTensorArrayGradientSplitConcat(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=2,
infer_shape=False)
value = constant_op.constant(
[[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
w = ta.split(value, [2, 1])
r = w.concat()
# Test combined gradients
grad = gradients_impl.gradients(
ys=[r],
xs=[value],
grad_ys=[[[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]],
grad_vals[0])
def _testTensorArrayGradientDynamicUnpackRead(self):
with self.cached_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True)
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unstack(value)
r0 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0)
grad = gradients_impl.gradients(
ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])
@test_util.run_deprecated_v1
def testSkipEagerTensorArrayGradientDynamicUnpackRead(self):
self._testTensorArrayGradientDynamicUnpackRead()
def testCloseTensorArray(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
self.evaluate(ta.close())
def testSizeTensorArray(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
s = ta.size()
self.assertAllEqual(3, self.evaluate(s))
def testWriteCloseTensorArray(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [3.0])
self.evaluate(w1.close()) # Expected to run without problems
def _testWhileLoopWritePackGradients(self, dynamic_size, dtype):
np_dtype = dtype.as_numpy_dtype
with self.cached_session(use_gpu=True):
def func(v0, state0, var):
ta = tensor_array_ops.TensorArray(
dtype=dtype,
tensor_array_name="foo",
size=0 if dynamic_size else 3,
dynamic_size=dynamic_size)
time_0 = array_ops.identity(0)
def body(time, ta_t, state):
sliced = array_ops.slice(
v0, begin=array_ops.stack([time, 0]), size=[1, -1])
sliced = array_ops.squeeze(sliced)
out = sliced + var + state
state += sliced
ta_t = ta_t.write(time, out)
return (time + 1, ta_t, state)
(unused_0, h_final, unused_2) = control_flow_ops.while_loop(
cond=lambda time, unused_1, unused_2: time < 3,
body=body,
loop_vars=(time_0, ta, state0),
shape_invariants=(time_0.get_shape(), tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()),
parallel_iterations=3)
vout = h_final.stack()
return vout
v0 = array_ops.identity(np.arange(3 * 5, dtype=np_dtype).reshape(3, 5))
state0 = array_ops.identity(np.array([1] * 5, dtype=np_dtype))
init_val = np.arange(100, 105, dtype=np_dtype)
var = variable_scope.get_variable(
"var",
shape=init_val.shape,
dtype=np_dtype,
initializer=init_ops.constant_initializer(init_val))
vout = func(v0, state0, var)
grad_val = -np.arange(3 * 5, dtype=np_dtype).reshape(3, 5)
if context.executing_eagerly():
grad_fn = backprop.gradients_function(func)
v0_grad, state0_grad, var_grad = grad_fn(v0, state0, var, dy=grad_val)
else:
v0_grad = gradients_impl.gradients([vout], [v0], [grad_val])[0]
state0_grad = gradients_impl.gradients([vout], [state0], [grad_val])[0]
var_grad = gradients_impl.gradients([vout], [var], [grad_val])[0]
self.evaluate(variables.global_variables_initializer())
state0_t, var_t, v0_t, vout_t, v0_grad_t, var_grad_t, state0_grad_t = (
self.evaluate(
([state0, var, v0, vout, v0_grad, var_grad, state0_grad])))
just_v0_grad_t = self.evaluate(v0_grad)
# state = [ state0 | state0 + v0[0] | state0 + v0[0] + v0[1] ]
# vout = [ v0[0] + var + state[0] |
# v0[1] + var + state[1] |
# v0[2] + var + state[2] ]
# = [ v0[0] + var + state0 |
# v0[1] + var + state0 + v0[0] |
# v0[2] + var + state0 + v0[0] + v0[1] ]
#
# d(vout[0])/d(v0) = [1 | 0 | 0 ]
# d(vout[1])/d(v0) = [1 | 1 | 0 ]
# d(vout[2])/d(v0) = [1 | 1 | 1 ]
# d(vout)/d(var) = [1 | 1 | 1]
# d(vout)/d(state0) = [ 1 | 1 | 1 ]
state_per_time = np.array(
[state0_t, state0_t + v0_t[0, :], state0_t + v0_t[0, :] + v0_t[1, :]])
# Compare forward prop
self.assertAllClose(v0_t + var_t + state_per_time, vout_t)
# Compare backward prop
expected_v0_grad_t = np.array([
grad_val[0, :] + grad_val[1, :] + grad_val[2, :],
grad_val[1, :] + grad_val[2, :], grad_val[2, :]
])
self.assertAllEqual(expected_v0_grad_t, v0_grad_t)
self.assertAllEqual(expected_v0_grad_t, just_v0_grad_t)
self.assertAllClose(grad_val.sum(axis=0), var_grad_t)
self.assertAllClose(grad_val.sum(axis=0), state0_grad_t)
def testWhileLoopWritePackGradients(self):
self._testWhileLoopWritePackGradients(
dynamic_size=False, dtype=dtypes.float32)
# TODO(ebrevdo): re-enable when While supports non-float32 gradients.
# self._testWhileLoopWritePackGradients(
# dynamic_size=False, dtype=tf.int64)
@test_util.run_v1_only("b/117943489")
def testSkipEagerWhileLoopDynamicWritePackGradients(self):
self._testWhileLoopWritePackGradients(
dynamic_size=True, dtype=dtypes.float32)
def testGradSerialTwoLoops(self):
with self.session(use_gpu=True):
def loop(x):
num_steps = 100
acc = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
size=num_steps,
clear_after_read=False,
element_shape=tensor_shape.scalar())
i = constant_op.constant(0, name="i")
c = lambda i, acc: i < 5
def b(i, acc):
x1 = control_flow_ops.cond(
math_ops.equal(i, 0), lambda: x,
lambda: math_ops.multiply(acc.read(i - 1), 2.0))
return i + 1, acc.write(i, x1)
i1, acc1 = control_flow_ops.while_loop(c, b, [i, acc])
z = constant_op.constant(0.0)
def fn(i, acc):
return i + 1, acc.write(i, z)
_, acc2 = control_flow_ops.while_loop(lambda i, acc: i < num_steps, fn,
[i1, acc1])
r = acc2.stack()
return r
x = constant_op.constant(2.0, name="x")
if context.executing_eagerly():
grad = backprop.gradients_function(loop)(x)[0]
else:
grad = gradients_impl.gradients(loop(x), [x])[0]
self.assertAllClose(31.0, self.evaluate(grad))
@test_util.run_deprecated_v1
def testSkipEagerSumOfTwoReadVariablesWithoutRepeatGrad(self):
with self.session(use_gpu=True) as session:
a = array_ops.identity(
np.arange(
3 * 5, dtype=np.float32).reshape(3, 5) + 1)
b = array_ops.identity(
np.arange(
3 * 5, dtype=np.float32).reshape(3, 5) + 1 + 3 * 5)
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
ta = ta.write(0, a, name="write_a")
ta = ta.write(1, b, name="write_b")
c = (
ta.read(
0, name="read_a_0") + # a + b
ta.read(
1, name="read_b_0"))
g0 = -(np.arange(3 * 5, dtype=np.float32).reshape(3, 5) + 1)
grad_a = gradients_impl.gradients([c], [a], [g0])[0] # d(a+b)/da = 1
grad_b = gradients_impl.gradients([c], [b], [g0])[0] # d(a+b)/db = 1
# Test gradients calculated individually
grad_a_t, = session.run([grad_a])
self.assertAllEqual(grad_a_t, g0)
grad_b_t, = session.run([grad_b])
self.assertAllEqual(grad_b_t, g0)
# Test gradients calculated jointly
joint_grad_a_t, joint_grad_b_t = session.run([grad_a, grad_b])
self.assertAllEqual(joint_grad_a_t, g0)
self.assertAllEqual(joint_grad_b_t, g0)
def _grad_source_for_name(self, name):
return tensor_array_grad._GetGradSource(constant_op.constant(0, name=name))
@test_util.run_deprecated_v1
def testSkipEagerGetGradSource_Invalid(self):
with self.assertRaises(ValueError):
self._grad_source_for_name("")
with self.assertRaises(ValueError):
self._grad_source_for_name("foo")
with self.assertRaises(ValueError):
self._grad_source_for_name("foo/bar")
@test_util.run_deprecated_v1
def testSkipEagerGetGradSource_NoEnclosingScope(self):
self.assertEqual("gradients:0", self._grad_source_for_name("gradients"))
self.assertEqual("gradients_0:0", self._grad_source_for_name("gradients_0"))
self.assertEqual("gradients", self._grad_source_for_name("gradients/foo"))
self.assertEqual("gradients_0",
self._grad_source_for_name("gradients_0/foo"))
self.assertEqual("gradients",
self._grad_source_for_name("gradients/foo/bar"))
self.assertEqual("gradients_0",
self._grad_source_for_name("gradients_0/foo/bar"))
@test_util.run_deprecated_v1
def testSkipEagerGetGradSource_EnclosingScope(self):
self.assertEqual("foo/gradients:0",
self._grad_source_for_name("foo/gradients"))
self.assertEqual("foo/gradients_0:0",
self._grad_source_for_name("foo/gradients_0"))
self.assertEqual("foo/gradients",
self._grad_source_for_name("foo/gradients/bar"))
self.assertEqual("foo/gradients_0",
self._grad_source_for_name("foo/gradients_0/bar"))
self.assertEqual("foo/bar/gradients",
self._grad_source_for_name("foo/bar/gradients/baz"))
self.assertEqual("foo/bar/gradients_0",
self._grad_source_for_name("foo/bar/gradients_0/baz"))
@test_util.run_deprecated_v1
def testSkipEagerGetGradSource_NestedUsesInnermost(self):
self.assertEqual(
"foo/gradients/bar/gradients_0",
self._grad_source_for_name("foo/gradients/bar/gradients_0/baz"))
@test_util.run_deprecated_v1
def testSkipEagerWriteShape(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c0 = constant_op.constant([4.0, 5.0])
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c1 = constant_op.constant([6.0, 7.0])
w1 = w0.write(1, c1)
r0 = w1.read(0)
r1 = w1.read(1)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
self.assertAllEqual(c1.get_shape(), r1.get_shape())
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c2 = constant_op.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w0.write(0, c2)
@test_util.run_deprecated_v1
def testSkipEagerPartlyUnknownShape(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=6)
c0 = array_ops.placeholder(dtypes.float32, [None, None, None, 3])
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual([None, None, None, 3], r0.get_shape().as_list())
c1 = array_ops.placeholder(dtypes.float32, [None, None, None, 3])
w1 = w0.write(1, c1)
r1 = w1.read(0)
self.assertAllEqual([None, None, None, 3], r1.get_shape().as_list())
# Writing less specific shape (doesn't change type.)
c2 = array_ops.placeholder(dtypes.float32, [None, None, None, None])
w2 = w1.write(2, c2)
r2 = w2.read(0)
self.assertAllEqual([None, None, None, 3], r2.get_shape().as_list())
# Writing more specific shape in one dimension and less specific in
# another.
c3 = array_ops.placeholder(dtypes.float32, [None, None, 2, None])
w3 = w2.write(3, c3)
r3 = w3.read(0)
self.assertAllEqual([None, None, 2, 3], r3.get_shape().as_list())
# Writing partly defined shape using TensorArray.scatter.
c4 = array_ops.placeholder(dtypes.float32, [2, None, 4, 2, 3])
w4 = w3.scatter([4, 5], c4)
r4 = w4.read(0)
self.assertAllEqual([None, 4, 2, 3], r4.get_shape().as_list())
# Writing fully defined shape using TensorArray.split.
c5 = array_ops.placeholder(dtypes.float32, [10, 4, 2, 3])
w5 = w4.split(c5, constant_op.constant([5, 5]))
r5 = w5.read(0)
self.assertAllEqual([5, 4, 2, 3], r5.get_shape().as_list())
def _testUnpackShape(self):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True,
infer_shape=True)
value = constant_op.constant(
[[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
w0 = ta.unstack(value)
r0 = w0.read(0)
self.assertAllEqual((2,), r0.get_shape())
c1 = constant_op.constant([4.0, 5.0])
w1 = w0.write(3, c1)
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
# TensorArray v2 does not support clear_after_read.
with self.assertRaisesOpError(
r"Could not read index 0 twice because it was cleared after a "
r"previous read \(perhaps try setting clear_after_read = false\?\)"
):
with ops.control_dependencies([r0]):
self.evaluate(w1.read(0))
r1 = w1.read(1)
self.assertAllEqual(c1.get_shape(), r1.shape)
c2 = constant_op.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w1.write(4, c2)
@test_util.run_v1_only("b/117943489")
def testUnpackShape(self):
self._testUnpackShape()
@test_util.run_deprecated_v1
def testSplitShape(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True,
infer_shape=True)
value = constant_op.constant([[1.0, -1.0], [2.0, -2.0], [3.0, -3.0]])
w0 = ta.split(value, [1, 1, 1])
r0 = w0.read(0)
self.assertAllEqual((1, 2), r0.get_shape())
ta1 = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo1",
size=0,
dynamic_size=True,
infer_shape=True)
w0 = ta1.split(value, [1, 2])
r0 = w0.read(0)
if context.executing_eagerly():
self.assertEqual((1, 2), r0.get_shape())
self.assertEqual((2, 2), w0.read(1).get_shape())
else:
self.assertEqual(r0.get_shape().ndims, None)
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.assertEqual(
tensor_shape.TensorShape(
ta1.handle.op.get_attr("element_shape")).ndims, None)
@test_util.run_deprecated_v1
def testSkipEagerWriteUnknownShape(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=True)
c0 = array_ops.placeholder(dtypes.float32)
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
def _testGradientWhenNotAllComponentsRead(self):
with self.cached_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
x = constant_op.constant([2.0, 3.0])
w = ta.unstack(x)
r0 = w.read(0)
# calculate (dr0/dx0, dr0/dx1). since r0 = x0, gradients are (1, 0).
grad_r0 = gradients_impl.gradients(ys=[r0], xs=[x], grad_ys=[1.0])
grad_r0_vals = session.run(grad_r0)[0]
self.assertAllEqual(grad_r0_vals, [1.0, 0.0])
@test_util.run_deprecated_v1
def testSkipEagerGradientWhenNotAllComponentsRead(self):
self._testGradientWhenNotAllComponentsRead()
@test_util.run_deprecated_v1
def testSkipEagerWriteButNotAllComponentsReadGrad(self):
with self.cached_session(use_gpu=True) as session:
x0 = constant_op.constant(5.0)
x1 = constant_op.constant(10.0)
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2).write(0, x0).write(1, x1)
r0 = ta.read(0)
# calculate (dr0/dx0, dr0/dx1). since r0 = x0, gradients are (1, 0).
grad_r0_x1 = gradients_impl.gradients(ys=[r0], xs=[x0, x1], grad_ys=[1.0])
grad_r0_x1_vals = session.run(grad_r0_x1)
self.assertAllEqual(grad_r0_x1_vals, [1.0, 0.0])
def _testTensorArrayUnpackDynamic(self):
with self.cached_session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=3, dynamic_size=True)
x = constant_op.constant([1.0, 2.0, 3.0])
w0 = ta.unstack(x)
w1 = w0.write(3, 4.0)
r = w1.stack()
self.assertAllEqual(np.array([1.0, 2.0, 3.0, 4.0]), self.evaluate(r))
grad = gradients_impl.gradients(ys=[r], xs=[x])
self.assertAllEqual(np.array([1.0, 1.0, 1.0]), self.evaluate(grad)[0])
@test_util.run_v1_only("b/117943489")
def testSkipEagerTensorArrayUnpackDynamic(self):
self._testTensorArrayUnpackDynamic()
@test_util.run_v1_only("b/117943489")
def testSkipEagerTensorArraySplitDynamic(self):
with self.session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=3, dynamic_size=True)
x = constant_op.constant([1.0, 2.0, 3.0])
w0 = ta.split(x, [1, 1, 1])
w1 = w0.write(3, [4.0])
r = w1.concat()
self.assertAllEqual(np.array([1.0, 2.0, 3.0, 4.0]), self.evaluate(r))
grad = gradients_impl.gradients(ys=[r], xs=[x])
self.assertAllEqual(np.array([1.0, 1.0, 1.0]), self.evaluate(grad)[0])
def _testTensorArrayEvalEmpty(self):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, dynamic_size=False, infer_shape=False)
v2_msg = ("Tried to stack elements of an empty list with "
"non-fully-defined element_shape")
v1_msg = (
"TensorArray has size zero, but element shape <unknown> is not "
"fully defined. Currently only static shapes are supported when "
"packing zero-size TensorArrays.")
with self.assertRaisesOpError(
v2_msg if control_flow_util.ENABLE_CONTROL_FLOW_V2 else v1_msg):
ta.stack().eval()
@test_util.run_v1_only("b/120545219")
def testSkipEagerTensorArrayEvalEmpty(self):
self._testTensorArrayEvalEmpty()
# this test is ill-defined for Eager mode --- unpacking an empty tensor
# gives an empty list / there is not equivalent of "mark_used" in Eager
def _testTensorArrayEvalEmptyWithDefault(self):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, dynamic_size=False, infer_shape=True)
self.assertEqual(0, ta.size().eval())
# Don't actually perform the pack. This stores the static shape.
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
ta = ta.unstack(array_ops.zeros([0, 3, 5]))
else:
ta.unstack(array_ops.zeros([0, 3, 5])).mark_used()
packed = ta.stack()
concatenated = ta.concat()
self.assertAllEqual([0, 3, 5], self.evaluate(packed).shape)
# Concatenating zero tensors along their first dimension gives a
# first dimension of zero
self.assertAllEqual([0, 5], self.evaluate(concatenated).shape)
@test_util.run_v1_only("b/117943489")
def testSkipEagerTensorArrayEvalEmptyWithDefault(self):
self._testTensorArrayEvalEmptyWithDefault()
@test_util.run_v1_only("b/117943489")
def testSkipEagerTensorArrayScatterReadAndGradients(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True)
indices = constant_op.constant([1, 8])
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.scatter(indices, value)
r0 = w.read(1)
r1 = w.read(8)
# Test combined gradients + aggregation of read(0)
grad = gradients_impl.gradients(
ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
read_vals, grad_vals = session.run([[r0, r1], grad])
self.assertEqual(len(read_vals), 2)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([1.0, -1.0], read_vals[0])
self.assertAllEqual([10.0, -10.0], read_vals[1])
self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])
@test_util.run_v1_only("b/117943489")
def testSkipEagerTensorArrayScatterPartialReadAndGradients(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True)
indices = constant_op.constant([1, 8])
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.scatter(indices, value)
r0 = w.read(1)
# Test combined gradients + aggregation of read(0)
grad = gradients_impl.gradients(
ys=[r0], xs=[value], grad_ys=[[2.0, 3.0]])[0]
read_val, grad_val = session.run([r0, grad])
self.assertAllEqual([1.0, -1.0], read_val)
self.assertAllEqual([[2.0, 3.0], [0.0, 0.0]], grad_val)
@test_util.run_v1_only("b/118890905")
def testTensorArrayWriteGatherAndGradients(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True)
def func(values):
indices = constant_op.constant([1, 8])
w = ta.unstack(values)
g = w.gather(indices)
return g
values = constant_op.constant([[1.0 * x, -1.0 * x] for x in range(10)])
g = func(values)
grad_ys = [[[2.0, 3.0], [4.0, 5.0]]]
# Test combined gradients + aggregation of read(0)
if context.executing_eagerly():
g_vals = [g]
grad_vals = backprop.gradients_function(func)(
values, dy=constant_op.constant(grad_ys[0], dtype=dtypes.float32))
else:
grad = gradients_impl.gradients(ys=[g], xs=[values], grad_ys=grad_ys)
g_vals, grad_vals = session.run([[g], grad])
# Gradients for 8 of the 10 unread components are zero.
expected_grad = np.zeros((10, 2))
expected_grad[1] = [2.0, 3.0]
expected_grad[8] = [4.0, 5.0]
self.assertEqual(len(g_vals), 1)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[1.0, -1.0], [8.0, -8.0]], g_vals[0])
self.assertAllEqual(expected_grad, grad_vals[0])
@test_util.disable_control_flow_v2("colocate_with not supported in v2.")
@test_util.run_v1_only("b/120545219")
def testSkipEagerTensorArrayGetsDeviceFromFirstWrite(self):
with ops.device("/job:worker/task:0/cpu:0"):
# this initial device will be ignored.
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
with ops.device("/job:worker/task:1/cpu:0"):
# the first write sets the op's device.
ta = ta.write(0, 1.0)
with ops.device("/job:worker/task:2/cpu:0"):
# subsequent writes do not modify the op's device.
ta = ta.write(1, 1.0)
# The gradient TA will sit on the same device as the forward TA.
ta_grad = ta.grad("grad")
flows = [ta.flow, ta_grad.flow]
# Similar tests for unpack and split
with ops.device("/job:worker/task:0/cpu:0"):
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=3)
with ops.device("/job:worker/task:1/cpu:0"):
ta = ta.unstack([1.0, 2.0])
with ops.device("/job:worker/task:2/cpu:0"):
ta = ta.write(2, 3.0)
flows.append(ta.flow)
with ops.device("/job:worker/task:0/cpu:0"):
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
with ops.device("/job:worker/task:1/cpu:0"):
ta = ta.split([1.0, 2.0], [1, 1])
flows.append(ta.flow)
session = session_lib.Session(self._workers[0].target)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
session.run(flows, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField("step_stats"))
dev_stats = {d.device: d.node_stats
for d in run_metadata.step_stats.dev_stats}
for d in dev_stats:
if "/task:1/" in d:
self.assertTrue(
[s for s in dev_stats[d] if "/TensorArray" in s.node_name])
elif "/host:CPU" not in d:
self.assertFalse(
[s for s in dev_stats[d] if "/TensorArray" in s.node_name])
@test_util.disable_control_flow_v2("colocate_with not supported in v2.")
@test_util.run_v1_only("b/120545219")
def testSkipEagerTensorArrayGetsDeviceFromFirstWriteInWhileLoop(self):
with ops.device("/job:worker/task:0/cpu:0"):
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
def _body(i, ta_i):
with ops.device("/job:worker/task:1/cpu:0"):
return i + 1, ta_i.write(i, constant_op.constant(0.0))
_, ta_out = control_flow_ops.while_loop(
lambda i, ta: i < 2, _body, loop_vars=[0, ta])
session = session_lib.Session(self._workers[0].target)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
session.run(ta_out.flow, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField("step_stats"))
dev_stats = {d.device: d.node_stats
for d in run_metadata.step_stats.dev_stats}
for d in dev_stats:
if "/task:1/" in d:
self.assertTrue(
[s for s in dev_stats[d] if "TensorArray" == s.node_name])
else:
self.assertFalse(
[s for s in dev_stats[d] if "TensorArray" == s.node_name])
@test_util.disable_control_flow_v2("colocate_with not supported in v2.")
@test_util.run_v1_only("b/120545219")
def testSkipEagerTensorArrayDisabledColocateWithFirstWriteCall(self):
with ops.device("/job:worker/task:0/cpu:0"):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, colocate_with_first_write_call=False)
def _body(i, ta_i):
with ops.device("/job:worker/task:1/cpu:0"):
return i + 1, ta_i.write(i, constant_op.constant(0.0))
_, ta_out = control_flow_ops.while_loop(
lambda i, ta: i < 2, _body, loop_vars=[0, ta])
session = session_lib.Session(self._workers[0].target)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
session.run(ta_out.flow, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField("step_stats"))
dev_stats = {d.device: list(d.node_stats)
for d in run_metadata.step_stats.dev_stats}
for d in dev_stats:
if "/task:0/" in d and "CPU" in d: # Skip any GPU node stats
self.assertTrue(
[s for s in dev_stats[d] if "TensorArray" == s.node_name])
else:
self.assertFalse(
[s for s in dev_stats[d] if "TensorArray" == s.node_name])
def testTensorArrayIdentity(self):
with self.session(use_gpu=True):
ta0 = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2,
infer_shape=False)
ta1 = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=4,
infer_shape=True)
ta0 = ta0.write(0, 0.)
ta1 = ta1.write(0, 1)
v0 = variable_scope.get_variable(
"v0", shape=(), initializer=init_ops.zeros_initializer())
v1 = variable_scope.get_variable(
"v1", shape=(), initializer=init_ops.zeros_initializer())
with ops.control_dependencies([v0.assign_add(1)]):
ta0 = ta0.identity()
with ops.control_dependencies([v1.assign_add(1)]):
ta1 = ta1.identity()
read0 = ta0.read(0)
read1 = ta1.read(0)
size0 = ta0.size()
size1 = ta1.size()
# Tests correct properties on new TensorArrays.
self.assertEqual(dtypes.float32, ta0.dtype)
self.assertEqual(dtypes.int32, ta1.dtype)
if context.executing_eagerly():
self.assertEqual(tensor_shape.scalar(), read0.get_shape())
else:
self.assertEqual(tensor_shape.unknown_shape(), read0.get_shape())
self.assertEqual(tensor_shape.scalar(), read1.get_shape())
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
read0_v, read1_v, size0_v, size1_v = self.evaluate((read0, read1, size0,
size1))
# Tests that the control dependencies was added and executed.
self.assertEqual(1, self.evaluate(v0))
self.assertEqual(1, self.evaluate(v1))
# Tests correct TensorArray.
self.assertEqual(read0_v, 0)
self.assertEqual(read1_v, 1)
self.assertEqual(size0_v, 2)
self.assertEqual(size1_v, 4)
@test_util.run_deprecated_v1
def testSkipEagerTensorArrayGradYsInCorrectScope(self):
n_time = 1
n_dim = 1
x = constant_op.constant([[1.42]])
dy = constant_op.constant([[2.42]])
ta = tensor_array_ops.TensorArray(
dtypes.float32, size=n_time, element_shape=[n_dim])
for t in range(n_time):
ta = ta.write(index=t, value=x[t])
y = ta.stack()
# dy is outside of the gradients name scope; tf.gradients must
# wrap it in the correct name scope.
dx, = gradients_impl.gradients(ys=[y], xs=[x], grad_ys=[dy])
with self.cached_session(use_gpu=True) as sess:
vdx, vdy = self.evaluate([dx, dy])
self.assertAllClose(vdx, vdy)
def testSkipEagerTensorArrayInt64GPU(self):
if not test.is_gpu_available():
return
with self.session(use_gpu=True, force_gpu=True) as sess:
value = array_ops.placeholder(dtypes.int64)
ta = tensor_array_ops.TensorArray(dtype=dtypes.int64, size=2)
ta = ta.scatter([0, 1], value)
r0 = ta.read(0)
r1 = ta.read(1)
v0, v1 = sess.run([r0, r1], feed_dict={value: [-3, 100]})
self.assertAllEqual(v0, -3)
self.assertAllEqual(v1, 100)
if __name__ == "__main__":
test.main()
|
|
import json
from django.test.testcases import TransactionTestCase
from django_dynamic_fixture import G
from mock import patch, call
from entity_event.context_serializer import DefaultContextSerializer
from entity_event.tests.models import TestModel
class DefaultContextSerializerTests(TransactionTestCase):
def setUp(self):
super(DefaultContextSerializerTests, self).setUp()
# Create some fake context to work with
self.context = dict(
test='test'
)
# Create a serializer to test with
self.serializer = DefaultContextSerializer(self.context)
@patch.object(DefaultContextSerializer, 'serialize_value', autospec=True)
def test_data_property(self, mock_serialize_value):
# Call the property
response = self.serializer.data
# Assert that we have a proper response
self.assertEqual(response, mock_serialize_value.return_value)
@patch.object(DefaultContextSerializer, 'serialize_model', autospec=True)
@patch.object(DefaultContextSerializer, 'serialize_json_string', autospec=True)
@patch.object(DefaultContextSerializer, 'serialize_list', autospec=True)
@patch.object(DefaultContextSerializer, 'serialize_dict', autospec=True)
def test_serialize_value(self, *serialize_methods):
# Setup the return values of each method
for serialize_method in serialize_methods:
serialize_method.return_value = self.context
# Call the method
response = self.serializer.serialize_value(self.context)
# Assert we have a proper response
self.assertEqual(response, serialize_methods[0].return_value)
# Assert that each serialize method was called properly
for serialize_method in serialize_methods:
serialize_method.assert_called_once_with(self.serializer, self.context)
def test_serialize_model_non_model(self):
# Call the method
response = self.serializer.serialize_model('test')
# Assert we have a proper response
self.assertEqual(response, 'test')
def test_serialize_model(self):
# Create a model to test with
model = G(TestModel)
# Fetch the model so we dont have the fks loaded and only select one related
model = TestModel.objects.select_related('fk').get(id=model.id)
# Call the method
response = self.serializer.serialize_model(model)
# Evaluate the fk_m2m because later djangos return a queryset
response['fk_m2m'] = list(response['fk_m2m'])
# Assert that we have a proper response
self.assertEqual(
response,
{
'fk_m2m': [],
'fk2': model.fk2.id,
'fk': {
'id': model.fk.id,
'value': model.fk.value
},
'id': model.id,
'value': model.value
}
)
def test_serialize_json_string_non_string(self):
# Call the method
response = self.serializer.serialize_json_string(dict())
# Assert we have a proper response
self.assertEqual(response, dict())
def test_serialize_json_string_non_json_string(self):
# Call the method
response = self.serializer.serialize_json_string('test')
# Assert we have a proper response
self.assertEqual(response, 'test')
def test_serialize_json_string_bad_json_string(self):
# Call the method
response = self.serializer.serialize_json_string('{test')
# Assert we have a proper response
self.assertEqual(response, '{test')
def test_serialize_json_string(self):
# Create a json string to test
test_dict = dict(test='test')
test_json = json.dumps(test_dict)
# Call the method
response = self.serializer.serialize_json_string(test_json)
# Assert that we have a proper response
self.assertEqual(
response,
test_dict
)
def test_serialize_list_non_list(self):
# Call the method
response = self.serializer.serialize_list('test')
# Assert we have a proper response
self.assertEqual(response, 'test')
@patch.object(DefaultContextSerializer, 'serialize_value', autospec=True)
def test_serialize_list_list(self, mock_serialize_value):
# Setup a test list
test_list = ['one', 'two', 'three']
# Call the method
response = self.serializer.serialize_list(test_list)
# Assert that we have the proper response
self.assertEqual(
response,
[
mock_serialize_value.return_value,
mock_serialize_value.return_value,
mock_serialize_value.return_value,
]
)
# Assert that we called serialize value on on values of the list
self.assertEqual(
mock_serialize_value.mock_calls,
[
call(self.serializer, 'one'),
call(self.serializer, 'two'),
call(self.serializer, 'three'),
]
)
@patch.object(DefaultContextSerializer, 'serialize_value', autospec=True)
def test_serialize_list_tuple(self, mock_serialize_value):
# Setup a test tuple
test_tuple = ('one', 'two', 'three')
# Call the method
response = self.serializer.serialize_list(test_tuple)
# Assert that we have the proper response
self.assertEqual(
response,
[
mock_serialize_value.return_value,
mock_serialize_value.return_value,
mock_serialize_value.return_value,
]
)
# Assert that we called serialize value on on values of the list
self.assertEqual(
mock_serialize_value.mock_calls,
[
call(self.serializer, 'one'),
call(self.serializer, 'two'),
call(self.serializer, 'three'),
]
)
def test_serialize_dict_non_dict(self):
# Call the method
response = self.serializer.serialize_dict('test')
# Assert we have a proper response
self.assertEqual(response, 'test')
@patch.object(DefaultContextSerializer, 'serialize_value', autospec=True)
def test_serialize_dict(self, mock_serialize_value):
# Setup a test dict
test_dict = dict(one='one', two='two')
# Call the method
response = self.serializer.serialize_dict(test_dict)
# Assert we have a proper response
self.assertEqual(
response,
dict(
one=mock_serialize_value.return_value,
two=mock_serialize_value.return_value,
)
)
# Assert that we called serialize value on on values of the dict
mock_serialize_value.assert_has_calls([
call(self.serializer, 'one'),
call(self.serializer, 'two'),
], any_order=True)
|
|
#
# Unit Tests for the Task class
#
# Eric Jeschke ([email protected])
# Bruce Bon ([email protected]) 2007-08-31
#
import unittest
import time
import random
import logging
from ginga.misc import Task
import ginga.util.six as six
LOGDEBUG = True
# ========================================================================
class simpleTask(Task.Task):
"""Simple task used in various tests below. Sleeps a random interval
between 0 and 0.5 seconds, and then returns val.
"""
def __init__(self, val):
self.val = val
super(simpleTask, self).__init__()
def execute(self):
time.sleep(0.5*random.random())
return self.val
def make_SimpleTask(val):
"""Create a simpleTask object and return it."""
t = simpleTask(val)
return t
def make_CompoundTask(typeClass, prefix, num):
"""
Arguments:
typeClass Task.SequentialTaskset or Task.ConcurrentAndTaskset
prefix 'ct', 't2', 't3', etc.
num number of tasks in this compound task
Create num simpleTask objects in a list; create a compound task
object of type typeClass with taskseq = the list of tasks, and
return it.
"""
tasks = []
for i in range(num):
st = make_SimpleTask(prefix + '_' + str(i))
tasks.append(st)
t = typeClass(taskseq=tasks)
return t
class dynamicBuilderTask(Task.Task):
"""Dynamically builds and executes a sequential compound task.
"""
def __init__(self, num):
self.num = num
super(dynamicBuilderTask, self).__init__()
def execute(self):
t = make_CompoundTask(Task.SequentialTaskset, 'ct', self.num)
t.init_and_start(self)
res = t.wait()
return res
class stepTask(Task.Task):
"""Simple sequential task used in various tests below. Returns the result
of the last step.
Implemented using Python generators. Less complex way to generate a
sequential task.
"""
def __init__(self):
self.count = 0
# Create generator for the task's sequential logic
self.gen = self.tasklogic()
super(stepTask, self).__init__()
def tasklogic(self):
"""This implements the task's logic as a simple sequential function.
"""
# e.g. This is the first step
self.count += 1
yield self.count
# e.g. Second step
self.count += 1
yield self.count
# e.g. Series of steps as an iteration
while self.count < 7:
yield self.count
self.count += 1
# e.g. Final step
self.count += 1
yield self.count
def step(self):
# Call generator for next step
return six.advance_iterator(self.gen)
def execute(self):
res = 0
try:
# Be careful that generator terminates or this will iterate forever
while True:
self.logger.debug("About to call step()")
res = self.step()
self.logger.debug("Result is %d" % (res))
except StopIteration:
# Raised when tasklogic() "runs off the end" (terminates)
pass
# Return final result
return res
class TestTask01(unittest.TestCase):
def setUp(self):
"""
- Initialize logger
- Create 20-thread thread pool
- Make a fake parentTask using the thread pool
"""
self.logger = logging.getLogger('TestTask01Logger')
self.logger.setLevel(logging.DEBUG)
self.logger.debug("setting up thread pool")
self.tpool = Task.ThreadPool(numthreads=20, logger=self.logger)
self.tpool.startall(wait=True)
# Make a fake 'parent' task
self.parentTask = make_SimpleTask('t1')
self.parentTask.tag = 'tasks'
self.parentTask.logger = self.logger
self.parentTask.threadPool = self.tpool
def tearDown(self):
"""Stop all threads in pool"""
self.logger.debug("TestTask01: tearing down thread pool")
self.tpool.stopall(wait=True)
def test_01(self):
self.logger.debug("test of simple task creation and execution")
t = simpleTask('t1')
t.initialize(self.parentTask)
t.start()
res = t.wait()
self.assertEqual('t1', res)
def test_02(self):
self.logger.debug("test of a sequential (compound) task")
t = make_CompoundTask(Task.SequentialTaskset, 't2', 3)
t.init_and_start(self.parentTask)
res = t.wait()
self.logger.debug("res = %s" % (str(res)))
self.logger.debug("Total time is %f" % t.getExecutionTime())
self.assertEqual('t2_2', res)
def test_03(self):
self.logger.debug("test of a concurrent (compound) task")
t = make_CompoundTask(Task.ConcurrentAndTaskset, 't3', 3)
t.init_and_start(self.parentTask)
res = t.wait()
resTuple = ( t.taskseq[0].result, t.taskseq[1].result, t.taskseq[2].result )
self.logger.debug("resTuple = %s" % (str(resTuple)))
self.logger.debug("Total time is %f" % t.getExecutionTime())
# test against the values assigned in make_CompoundTask()
self.assertTrue('t3_1' in resTuple)
self.assertTrue('t3_0' in resTuple)
self.assertTrue('t3_2' in resTuple)
def test_04(self):
self.logger.debug("test of 2 seqential task sets in a concurrent task")
t1 = make_CompoundTask(Task.SequentialTaskset, 't4a', 3)
t2 = make_CompoundTask(Task.SequentialTaskset, 't4b', 3)
t = Task.ConcurrentAndTaskset([t1, t2])
t.init_and_start(self.parentTask)
res = t.wait()
resTuple = ( t1.result, t2.result )
self.logger.debug("resTuple = %s" % (str(resTuple)))
self.logger.debug("Total time is %f" % t.getExecutionTime())
# test against the values assigned to final task in each make_CompoundTask()
self.assertTrue('t4b_2' in resTuple)
self.assertTrue('t4a_2' in resTuple)
def test_05(self):
self.logger.debug("test of 2 seqential task sets in a sequential task")
t1 = make_CompoundTask(Task.SequentialTaskset, 't5a', 3)
t2 = make_CompoundTask(Task.SequentialTaskset, 't5b', 3)
t = Task.SequentialTaskset([t1, t2])
t.init_and_start(self.parentTask)
res = t.wait()
resTuple = ( t1.result, t2.result )
self.logger.debug("resTuple = %s" % (str(resTuple)))
self.logger.debug("Total time is %f" % t.getExecutionTime())
self.assertEqual('t5b_2', res)
# test against the values assigned in make_CompoundTask()
self.assertEqual('t5a_2', resTuple[0])
self.assertEqual('t5b_2', resTuple[1])
def test_06(self):
self.logger.debug("test of 2 concurrent tasks in a concurrent task")
t1 = make_CompoundTask(Task.ConcurrentAndTaskset, 't6a', 3)
t2 = make_CompoundTask(Task.ConcurrentAndTaskset, 't6b', 3)
t = Task.ConcurrentAndTaskset([t1, t2])
t.init_and_start(self.parentTask)
res = t.wait()
resTuple = ( t1.taskseq[0].result, t1.taskseq[1].result, t1.taskseq[2].result,
t2.taskseq[0].result, t2.taskseq[1].result, t2.taskseq[2].result )
self.logger.debug("resTuple = %s" % (str(resTuple)))
self.logger.debug("Total time is %f" % t.getExecutionTime())
self.assertTrue( t.taskseq[0].result in ('t6a_0', 't6a_1', 't6a_2'))
self.assertTrue( t.taskseq[1].result in ('t6b_0', 't6b_1', 't6b_2'))
# test against the values assigned in make_CompoundTask()
self.assertEqual( 't6a_0', resTuple[0] )
self.assertEqual( 't6a_1', resTuple[1] )
self.assertEqual( 't6a_2', resTuple[2] )
self.assertEqual( 't6b_0', resTuple[3] )
self.assertEqual( 't6b_1', resTuple[4] )
self.assertEqual( 't6b_2', resTuple[5] )
def test_07(self):
self.logger.debug("test of simple step task")
t = stepTask()
t.init_and_start(self.parentTask)
res = t.wait()
self.logger.debug("Total time is %f" % t.getExecutionTime())
self.assertEqual(8, res)
def test_08(self):
self.logger.debug("test of dynamically built task")
t = dynamicBuilderTask(5)
t.init_and_start(self.parentTask)
res = t.wait()
self.logger.debug("res = %s" % (str(res)))
self.logger.debug("Total time is %f" % t.getExecutionTime())
self.assertEqual('ct_4', res)
if __name__ == "__main__":
unittest.main()
#END
|
|
################################################################################
# Copyright (c) 2011-2021, National Research Foundation (SARAO)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Class for concatenating visibility data sets."""
import itertools
from functools import reduce
import numpy as np
from .categorical import (CategoricalData, concatenate_categorical,
unique_in_order)
from .dataset import DataSet
from .lazy_indexer import LazyIndexer
from .sensordata import (SensorCache, SensorData, SensorGetter,
dummy_sensor_getter)
class ConcatenationError(Exception):
"""Sequence of objects could not be concatenated due to incompatibility."""
# -------------------------------------------------------------------------------------------------
# -- CLASS : ConcatenatedLazyIndexer
# -------------------------------------------------------------------------------------------------
class ConcatenatedLazyIndexer(LazyIndexer):
"""Two-stage deferred indexer that concatenates multiple indexers.
This indexer concatenates a sequence of indexers along the first (i.e. time)
axis. The index specification is broken down into chunks along this axis,
sent to the applicable underlying indexers and the returned data are
concatenated again before returning it.
Parameters
----------
indexers : sequence of :class:`LazyIndexer` objects and/or arrays
Sequence of indexers or raw arrays to be concatenated
transforms : list of :class:`LazyTransform` objects or None, optional
Extra chain of transforms to be applied to data after final indexing
Attributes
----------
name : string
Name of first non-empty indexer (or empty string otherwise)
Raises
------
InvalidTransform
If transform chain does not obey restrictions on changing the data shape
"""
def __init__(self, indexers, transforms=None):
# Only keep those indexers that have any data selected on first axis (unless nothing at all is selected)
self.indexers = [indexer for indexer in indexers if indexer.shape[0]]
if not self.indexers:
self.indexers = indexers[:1]
# Wrap any raw array in the sequence in a LazyIndexer (slower but more compatible)
for n, indexer in enumerate(self.indexers):
self.indexers[n] = indexer if isinstance(indexer, LazyIndexer) else LazyIndexer(indexer)
self.transforms = [] if transforms is None else transforms
# Pick the first non-empty indexer name as overall name, or failing that, an empty string
names = unique_in_order([indexer.name for indexer in self.indexers if indexer.name])
self.name = (names[0] + ' etc.') if len(names) > 1 else names[0] if len(names) == 1 else ''
# Test validity of shape and dtype
self.shape, self.dtype
def __str__(self):
"""Verbose human-friendly string representation of lazy indexer object."""
shape, dtype = self._initial_shape, self._initial_dtype
descr = [self._name_shape_dtype(self.name, shape, dtype)]
for n, indexer in enumerate(self.indexers):
indexer_descr = str(indexer).split('\n')
descr += [f'- Indexer {n:03d}: ' + indexer_descr[0]]
descr += [' ' + indescr for indescr in indexer_descr[1:]]
for transform in self.transforms:
shape, dtype = transform.new_shape(shape), transform.dtype if transform.dtype is not None else dtype
descr += ['-> ' + self._name_shape_dtype(transform.name, shape, dtype)]
return '\n'.join(descr)
def __getitem__(self, keep):
"""Extract a concatenated array from the underlying indexers.
This applies the given second-stage (global) index on top of the existing
first-stage (local) indices of each indexer, and concatenates the arrays
extracted from each indexer along the first dimension.
Parameters
----------
keep : tuple of int or slice or sequence of int or sequence of bool
Second-stage (global) index as a valid index or slice specification
(supports arbitrary slicing or advanced indexing on any dimension)
Returns
-------
data : array
Concatenated output array
"""
ndim = len(self._initial_shape)
# Ensure that keep is a tuple (then turn it into a list to simplify further processing)
keep = list(keep) if isinstance(keep, tuple) else [keep]
# The original keep tuple will be passed to data transform chain
original_keep = tuple(keep)
# Ensure that keep is same length as first-stage data shape (truncate or pad with blanket slices as necessary)
keep = keep[:ndim] + [slice(None)] * (ndim - len(keep))
keep_head, keep_tail = keep[0], keep[1:]
# Figure out the final shape on the fixed tail dimensions
shape_tails = [len(np.atleast_1d(np.arange(dim_len)[dim_keep]))
for dim_keep, dim_len in zip(keep[1:], self._initial_shape[1:])]
indexer_starts = np.cumsum([0] + [len(indexer) for indexer in self.indexers[:-1]])
def find_indexer(index):
return indexer_starts.searchsorted(index, side='right') - 1
# Interpret selection on first dimension, along which data will be concatenated
if np.isscalar(keep_head):
# If selection is a scalar, pass directly to appropriate indexer (after removing offset)
keep_head = len(self) + keep_head if keep_head < 0 else keep_head
ind = find_indexer(keep_head)
out_data = self.indexers[ind][tuple([keep_head - indexer_starts[ind]] + keep_tail)]
elif isinstance(keep_head, slice):
# If selection is a slice, split it into smaller slices that span individual indexers
# Start by normalising slice to full first-stage range
start, stop, stride = keep_head.indices(len(self))
chunks = []
# Step through indexers that overlap with slice (it's guaranteed that some will overlap)
for ind in range(find_indexer(start), find_indexer(stop) + 1):
chunk_start = start - indexer_starts[ind] \
if start >= indexer_starts[ind] else ((start - indexer_starts[ind]) % stride)
chunk_stop = stop - indexer_starts[ind]
# The final .reshape is needed to upgrade any scalar or singleton chunks to full dimension
chunks.append(self.indexers[ind][tuple([slice(chunk_start, chunk_stop, stride)] +
keep_tail)].reshape(tuple([-1] + shape_tails)))
out_data = np.concatenate(chunks)
else:
# Anything else is advanced indexing via bool or integer sequences
keep_head = np.atleast_1d(keep_head)
# A boolean mask is simpler to handle (no repeated or out-of-order indexing) - partition mask over indexers
if keep_head.dtype == np.bool and len(keep_head) == len(self):
chunks = []
for ind in range(len(self.indexers)):
chunk_start = indexer_starts[ind]
chunk_stop = indexer_starts[ind + 1] if ind < len(indexer_starts) - 1 else len(self)
chunks.append(self.indexers[ind][tuple([keep_head[chunk_start:chunk_stop]] +
keep_tail)].reshape(tuple([-1] + shape_tails)))
out_data = np.concatenate(chunks)
else:
# Form sequence of relevant indexer indices and local data indices with indexer offsets removed
indexers = find_indexer(keep_head)
local_indices = keep_head - indexer_starts[indexers]
# Determine output data shape after second-stage selection
final_shape = [len(np.atleast_1d(np.arange(dim_len)[dim_keep]))
for dim_keep, dim_len in zip(keep, self._initial_shape)]
out_data = np.empty(final_shape, dtype=self.dtype)
for ind in range(len(self.indexers)):
chunk_mask = (indexers == ind)
# Insert all selected data originating from same indexer into final array
if chunk_mask.any():
out_data[chunk_mask] = self.indexers[ind][tuple([local_indices[chunk_mask]] + keep_tail)]
# Apply transform chain to output data, if any
return reduce(lambda data, transform: transform(data, original_keep), self.transforms, out_data)
@property
def _initial_shape(self):
"""Shape of data array after first-stage indexing and before transformation."""
# Each component must have the same shape except for the first dimension (length)
# The overall length will be the sum of component lengths
shape_tails = {indexer.shape[1:] for indexer in self.indexers}
if len(shape_tails) != 1:
raise ConcatenationError("Incompatible shapes among sub-indexers making up indexer '%s':\n%s" %
(self.name, '\n'.join([repr(indexer) for indexer in self.indexers])))
return tuple([np.sum([len(indexer) for indexer in self.indexers])] + list(shape_tails.pop()))
@property
def _initial_dtype(self):
"""Type of data array before transformation."""
# Each component must have the same dtype, which becomes the overall dtype
dtypes = {indexer.dtype for indexer in self.indexers}
if len(dtypes) == 1:
return dtypes.pop()
elif np.all([np.issubdtype(dtype, np.string_) for dtype in dtypes]):
# Strings of different lengths have different dtypes (e.g. '|S1' vs '|S10') but can be safely concatenated
return np.dtype('|S{}'.format(max([dt.itemsize for dt in dtypes])))
else:
raise ConcatenationError(f"Incompatible dtypes among sub-indexers making up indexer '{self.name}':\n"
+ '\n'.join([repr(indexer) for indexer in self.indexers]))
# -------------------------------------------------------------------------------------------------
# -- CLASS : ConcatenatedSensorGetter
# -------------------------------------------------------------------------------------------------
def common_dtype(sensor_data_sequence):
"""The dtype suitable to store all sensor data values in the given sequence.
This extracts the dtypes of a sequence of sensor data objects and finds the
minimal dtype to which all of them may be safely cast using NumPy type
promotion rules (which will typically be the dtype of a concatenation of
the values).
Parameters
----------
sensor_data_sequence : sequence of extracted sensor data objects
These objects may include :class:`numpy.ndarray` and :class:`CategoricalData`
Returns
-------
dtype : :class:`numpy.dtype` object
The promoted dtype of the sequence, or None if `sensor_data_sequence` is empty
"""
dtypes = [sd.dtype for sd in sensor_data_sequence]
# Find resulting dtype through type promotion or give up if nothing is known
return np.result_type(*dtypes) if dtypes else None
class ConcatenatedSensorGetter(SensorGetter):
"""The concatenation of multiple raw (uncached) sensor data sets.
This is a convenient container for returning raw (uncached) sensor data sets
from a :class:`ConcatenatedSensorCache` object. It only accesses the
underlying data sets when explicitly asked to via the :meth:`get` interface,
but provides quick access to metadata such as sensor name.
Parameters
----------
data : sequence of :class:`SensorGetter`
Uncached sensor data
"""
def __init__(self, data):
names = unique_in_order([sd.name for sd in data])
if len(names) != 1:
# XXX This is probably not a serious restriction; consider removal.
# It is a weak verification that we are combining like sensors,
# but underlying names may legitimately differ for datasets of
# different minor versions (even within the same version...).
raise ConcatenationError('Cannot concatenate sensor with different '
f'underlying names: {names}')
super().__init__(names[0])
self._data = data
def get(self):
parts = [sd.get() for sd in self._data]
# Filter out empty sensors, because they may have a default dtype that
# will skew the dtype of the concatenation
parts = [part for part in parts if part]
if not parts:
timestamp = np.array([])
value = np.array([])
status = np.array([], dtype='S7')
else:
timestamp = np.concatenate([part.timestamp for part in parts])
value = np.concatenate([part.value for part in parts])
if all(part.status is not None for part in parts):
status = np.concatenate([part.status for part in parts])
else:
status = None
return SensorData(self.name, timestamp, value, status)
# -------------------------------------------------------------------------------------------------
# -- CLASS : ConcatenatedSensorCache
# -------------------------------------------------------------------------------------------------
class ConcatenatedSensorCache(SensorCache):
"""Sensor cache that is a concatenation of multiple underlying caches.
This concatenates a sequence of sensor caches along the time axis and makes
them appear like a single sensor cache. The combined cache contains a
superset of all actual and virtual sensors found in the underlying caches
and replaces any missing sensor data with dummy values.
Parameters
----------
caches : sequence of :class:`SensorCache` objects
Sequence of underlying caches to be concatenated
keep : sequence of bool, optional
Default (global) time selection specification as boolean mask that will
be applied to sensor data (this can be disabled on data retrieval)
"""
def __init__(self, caches, keep=None):
self.caches = caches
# Collect all virtual sensors in caches as well as properties.
virtual, self.props = {}, {}
for cache in caches:
virtual.update(cache.virtual)
self.props.update(cache.props)
self.virtual = virtual
timestamps = [cache.timestamps for cache in caches]
if np.all([isinstance(ts, LazyIndexer) for ts in timestamps]):
self.timestamps = ConcatenatedLazyIndexer(timestamps)
else:
self.timestamps = np.concatenate([ts[:] for ts in timestamps])
self._segments = np.cumsum([0] + [len(cache.timestamps) for cache in caches])
self._set_keep(keep)
def _set_keep(self, keep=None):
"""Set time selection for sensor values.
Parameters
----------
keep : array of bool, shape (*T*,), optional
Boolean selection mask with one entry per timestamp
"""
if keep is not None:
# Save top-level / global boolean selection mask and let each cache.keep be a view into this array
self.keep = keep
for n, cache in enumerate(self.caches):
cache._set_keep(keep[self._segments[n]:self._segments[n + 1]])
def _get(self, name, **kwargs):
"""Extract sensor data from multiple caches (see :meth:`get` for docs).
This extracts a sequence of sensor data objects, one from each cache.
For caches which do not contain the sensor it returns `None`.
"""
# First extract from all caches where the requested sensor is present
split_data = []
for cache in self.caches:
try:
sensor_data = cache.get(name, **kwargs)
except KeyError:
split_data.append(None)
else:
split_data.append(sensor_data)
return split_data
def get(self, name, select=False, extract=True, **kwargs):
"""Sensor values interpolated to correlator data timestamps.
Retrieve raw (uncached) or cached sensor data from each underlying cache
and concatenate the results along the time axis.
Parameters
----------
name : string
Sensor name
select : {False, True}, optional
True if preset time selection will be applied to returned data
extract : {True, False}, optional
True if sensor data should be extracted from store and cached
kwargs : dict, optional
Additional parameters are passed to underlying sensor caches
Returns
-------
data : array or :class:`CategoricalData` or :class:`SensorGetter` object
If extraction is disabled, this will be a :class:`SensorGetter` object
for uncached sensors. If selection is enabled, this will be a 1-D
array of values, one per selected timestamp. If selection is
disabled, this will be a 1-D array of values (of the same length as
the :attr:`timestamps` attribute) for numerical data, and a
:class:`CategoricalData` object for categorical data.
Raises
------
KeyError
If sensor name was not found in cache and did not match virtual template
"""
# Get array, categorical data or raw sensor data from each cache
split_data = self._get(name, select=select, extract=extract, **kwargs)
if all(sd is None for sd in split_data):
raise KeyError(f'Key {name} not found in any of the concatenated datasets')
# If this sensor has already been partially extracted,
# we are forced to extract it in rest of caches too
if not extract and not all(sd is None or isinstance(sd, SensorGetter) for sd in split_data):
extract = True
split_data = self._get(name, select=select, extract=extract, **kwargs)
if not extract:
# Just discard pieces for which the sensor is missing.
split_data = [sd for sd in split_data if sd is not None]
return ConcatenatedSensorGetter(split_data)
props = self._get_props(name, self.props, **kwargs)
if any(sd is None for sd in split_data):
# This should not typically happen, and it needs a slow path to
# figure out the right dummy value. We put the dummy values back
# into the cache so that this isn't needed next time.
if select:
split_data2 = self._get(name, select=False, extract=True, **kwargs)
else:
split_data2 = split_data
split_data2 = [sd for sd in split_data2 if sd is not None]
dtype = common_dtype(split_data2)
dummy = dummy_sensor_getter(name, value=props.get('initial_value'), dtype=dtype)
for i, cache in enumerate(self.caches):
if split_data[i] is None:
cache[name] = self._extract(dummy, cache.timestamps, cache.dump_period, **props)
split_data[i] = cache.get(name, select=select, extract=True, **kwargs)
if any(isinstance(sd, CategoricalData) for sd in split_data):
return concatenate_categorical(split_data, **props)
else:
# Keep arrays as arrays and lists as lists to avoid dtype issues
if any(isinstance(sd, np.ndarray) for sd in split_data):
return np.concatenate(split_data)
else:
return sum(split_data, [])
def __setitem__(self, name, data):
"""Assign data to sensor, splitting it across underlying caches.
Parameters
----------
name : string
Sensor name
data : array or :class:`CategoricalData`
Data to be assigned to sensor
"""
# Split data into segments and setitem to each cache
if isinstance(data, CategoricalData):
split_data = data.partition(self._segments)
for n, cache in enumerate(self.caches):
cache[name] = split_data[n]
else:
for n, cache in enumerate(self.caches):
cache[name] = data[self._segments[n]:self._segments[n + 1]]
def __delitem__(self, name):
found = False
for cache in self.caches:
try:
del cache[name]
found = True
except KeyError:
pass
if not found:
raise KeyError(name)
def __contains__(self, name):
return any(name in cache for cache in self.caches)
def __len__(self):
return sum(1 for _ in self)
def __iter__(self):
"""Key iterator that iterates through sensor names."""
seen = set()
for cache in self.caches:
for key in cache:
if key not in seen:
seen.add(key)
yield key
# -------------------------------------------------------------------------------------------------
# -- CLASS : ConcatenatedDataSet
# -------------------------------------------------------------------------------------------------
class ConcatenatedDataSet(DataSet):
"""Class that concatenates existing visibility data sets.
This provides a single DataSet interface to a list of concatenated data sets.
Where possible, identical targets, subarrays, spectral windows and
observation sensors are merged. For more information on attributes, see the
:class:`DataSet` docstring.
Parameters
----------
datasets : sequence of :class:`DataSet` objects
List of existing data sets
"""
def __init__(self, datasets):
DataSet.__init__(self, '', datasets[0].ref_ant, datasets[0].time_offset)
# Sort data sets in chronological order via 'decorate-sort-undecorate' (DSU) idiom
decorated_datasets = [(d.start_time, d) for d in datasets]
decorated_datasets.sort()
self.datasets = datasets = [d[-1] for d in decorated_datasets]
# Merge high-level metadata
self.name = ','.join(unique_in_order([d.name for d in datasets]))
self.url = ' | '.join(unique_in_order([d.url for d in datasets]))
self.version = ','.join(unique_in_order([d.version for d in datasets]))
self.observer = ','.join(unique_in_order([d.observer for d in datasets]))
self.description = ' | '.join(unique_in_order([d.description for d in datasets]))
self.experiment_id = ','.join(unique_in_order([d.experiment_id for d in datasets]))
obs_params = unique_in_order(reduce(lambda x, y: x + y, [list(d.obs_params.keys()) for d in datasets]))
for param in obs_params:
values = [d.obs_params.get(param, '') for d in datasets]
# If all values are the same, extract the unique value from the list; otherwise keep the list
# The itertools.groupby function should work on any value, even unhashable and unorderable ones
self.obs_params[param] = values[0] if len([k for k in itertools.groupby(values)]) == 1 else values
rx_ants = unique_in_order(reduce(lambda x, y: x + y, [list(d.receivers.keys()) for d in datasets]))
for ant in rx_ants:
rx = [d.receivers.get(ant, '') for d in datasets]
self.receivers[ant] = rx[0] if len([k for k in itertools.groupby(rx)]) == 1 else rx
dump_periods = unique_in_order([d.dump_period for d in datasets])
if len(dump_periods) > 1:
raise ConcatenationError('Data sets cannot be concatenated because of differing dump periods: ' +
', '.join(f'{dp:g}' for dp in dump_periods))
self.dump_period = dump_periods[0]
self._segments = np.cumsum([0] + [len(d.sensor.timestamps) for d in datasets])
# Keep main time selection mask at top level and ensure that underlying datasets use slice views of main one
self._set_keep(time_keep=np.ones(self._segments[-1], dtype=np.bool))
self.start_time = min([d.start_time for d in datasets])
self.end_time = max([d.end_time for d in datasets])
self.sensor = ConcatenatedSensorCache([d.sensor for d in datasets], keep=self._time_keep)
subarray = self.sensor.get('Observation/subarray')
spw = self.sensor.get('Observation/spw')
target = self.sensor.get('Observation/target')
self.subarrays = subarray.unique_values
self.spectral_windows = spw.unique_values
self.catalogue.add(target.unique_values)
self.catalogue.antenna = self.sensor[f'Antennas/{self.ref_ant}/antenna'][0]
split_sub = subarray.partition(self._segments)
split_spw = spw.partition(self._segments)
split_target = target.partition(self._segments)
# Fix index sensors in underlying datasets: scan / compscan runs on and the rest are remapped to merged values
scan_start, compscan_start = 0, 0
for n, d in enumerate(datasets):
d.sensor['Observation/subarray'] = split_sub[n]
d.sensor['Observation/subarray_index'] = CategoricalData(split_sub[n].indices, split_sub[n].events)
d.sensor['Observation/spw'] = split_spw[n]
d.sensor['Observation/spw_index'] = CategoricalData(split_spw[n].indices, split_spw[n].events)
d.sensor['Observation/target'] = split_target[n]
d.sensor['Observation/target_index'] = CategoricalData(split_target[n].indices, split_target[n].events)
scan_index = d.sensor.get('Observation/scan_index')
scan_index.unique_values = [index + scan_start for index in scan_index.unique_values]
scan_start += len(scan_index.unique_values)
d.sensor['Observation/scan_index'] = scan_index
compscan_index = d.sensor.get('Observation/compscan_index')
compscan_index.unique_values = [index + compscan_start for index in compscan_index.unique_values]
compscan_start += len(compscan_index.unique_values)
d.sensor['Observation/compscan_index'] = compscan_index
# Apply default selection and initialise all members that depend on selection in the process
self.select(spw=0, subarray=0)
def _set_keep(self, time_keep=None, freq_keep=None, corrprod_keep=None,
weights_keep=None, flags_keep=None):
"""Set time, frequency and/or correlation product selection masks.
Set the selection masks for those parameters that are present. The time
mask is split into chunks and applied to the underlying datasets and
sensor caches, while the frequency and corrprod masks are directly
applied to the underlying datasets as well. Also allow for weights
and flags selections.
Parameters
----------
time_keep : array of bool, shape (*T*,), optional
Boolean selection mask with one entry per timestamp
freq_keep : array of bool, shape (*F*,), optional
Boolean selection mask with one entry per frequency channel
corrprod_keep : array of bool, shape (*B*,), optional
Boolean selection mask with one entry per correlation product
weights_keep : 'all' or string or sequence of strings, optional
Names of selected weight types (or 'all' for the lot)
flags_keep : 'all' or string or sequence of strings, optional
Names of selected flag types (or 'all' for the lot)
"""
super()._set_keep(time_keep, freq_keep, corrprod_keep, weights_keep, flags_keep)
for n, d in enumerate(self.datasets):
d._set_keep(time_keep=self._time_keep[self._segments[n]:self._segments[n + 1]],
freq_keep=self._freq_keep,
corrprod_keep=self._corrprod_keep,
weights_keep=self._weights_keep,
flags_keep=self._flags_keep)
@property
def timestamps(self):
"""Visibility timestamps in UTC seconds since Unix epoch.
The timestamps are returned as an array indexer of float64, shape
(*T*,), with one timestamp per integration aligned with the integration
*midpoint*. To get the data array itself from the indexer `x`, do `x[:]`
or perform any other form of selection on it.
"""
return ConcatenatedLazyIndexer([d.timestamps for d in self.datasets])
@property
def vis(self):
"""Complex visibility data as a function of time, frequency and baseline.
The visibility data are returned as an array indexer of complex64, shape
(*T*, *F*, *B*), with time along the first dimension, frequency along the
second dimension and correlation product ("baseline") index along the
third dimension. The number of integrations *T* matches the length of
:meth:`timestamps`, the number of frequency channels *F* matches the
length of :meth:`freqs` and the number of correlation products *B*
matches the length of :meth:`corr_products`. To get the data array
itself from the indexer `x`, do `x[:]` or perform any other form of
selection on it.
"""
return ConcatenatedLazyIndexer([d.vis for d in self.datasets])
@property
def weights(self):
"""Visibility weights as a function of time, frequency and baseline.
The weights data are returned as an array indexer of float32, shape
(*T*, *F*, *B*), with time along the first dimension, frequency along the
second dimension and correlation product ("baseline") index along the
third dimension. The number of integrations *T* matches the length of
:meth:`timestamps`, the number of frequency channels *F* matches the
length of :meth:`freqs` and the number of correlation products *B*
matches the length of :meth:`corr_products`. To get the data array
itself from the indexer `x`, do `x[:]` or perform any other form of
indexing on it. Only then will data be loaded into memory.
"""
return ConcatenatedLazyIndexer([d.weights for d in self.datasets])
@property
def flags(self):
"""Flags as a function of time, frequency and baseline.
The flags data are returned as an array indexer of bool, shape
(*T*, *F*, *B*), with time along the first dimension, frequency along the
second dimension and correlation product ("baseline") index along the
third dimension. The number of integrations *T* matches the length of
:meth:`timestamps`, the number of frequency channels *F* matches the
length of :meth:`freqs` and the number of correlation products *B*
matches the length of :meth:`corr_products`. To get the data array
itself from the indexer `x`, do `x[:]` or perform any other form of
indexing on it. Only then will data be loaded into memory.
"""
return ConcatenatedLazyIndexer([d.flags for d in self.datasets])
@property
def temperature(self):
"""Air temperature in degrees Celsius."""
return np.concatenate([d.temperature for d in self.datasets])
@property
def pressure(self):
"""Barometric pressure in millibars."""
return np.concatenate([d.pressure for d in self.datasets])
@property
def humidity(self):
"""Relative humidity as a percentage."""
return np.concatenate([d.humidity for d in self.datasets])
@property
def wind_speed(self):
"""Wind speed in metres per second."""
return np.concatenate([d.wind_speed for d in self.datasets])
@property
def wind_direction(self):
"""Wind direction as an azimuth angle in degrees."""
return np.concatenate([d.wind_direction for d in self.datasets])
|
|
from ...core import AtomicExpr, Expr, Integer, Symbol, Tuple, sympify
from ...core.assumptions import StdFactKB
from ...core.decorators import _sympifyit, call_highest_priority
from ...core.logic import fuzzy_bool
from ...functions import adjoint, conjugate
from ...logic import false
from ...simplify import simplify
from ..matrices import ShapeError
class MatrixExpr(Expr):
"""Superclass for Matrix Expressions
MatrixExprs represent abstract matrices, linear transformations represented
within a particular basis.
Examples
========
>>> A = MatrixSymbol('A', 3, 3)
>>> y = MatrixSymbol('y', 3, 1)
>>> x = (A.T*A).inverse() * A * y
See Also
========
MatrixSymbol
MatAdd
MatMul
Transpose
Inverse
"""
_op_priority = 11.0
is_Matrix = True
is_MatrixExpr = True
is_Identity = None
is_Inverse = False
is_Transpose = False
is_ZeroMatrix = False
is_MatAdd = False
is_MatMul = False
def __new__(cls, *args, **kwargs):
args = map(sympify, args)
return Expr.__new__(cls, *args, **kwargs)
# The following is adapted from the core Expr object
def __neg__(self):
from .matmul import MatMul
return MatMul(-1, self).doit()
def __abs__(self):
raise NotImplementedError
@_sympifyit('other', NotImplemented)
@call_highest_priority('__radd__')
def __add__(self, other):
from .matadd import MatAdd
return MatAdd(self, other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__add__')
def __radd__(self, other):
from .matadd import MatAdd
return MatAdd(other, self).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rsub__')
def __sub__(self, other):
from .matadd import MatAdd
return MatAdd(self, -other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__sub__')
def __rsub__(self, other):
from .matadd import MatAdd
return MatAdd(other, -self).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rmul__')
def __mul__(self, other):
from .matmul import MatMul
return MatMul(self, other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__mul__')
def __rmul__(self, other):
from .matmul import MatMul
return MatMul(other, self).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rpow__')
def __pow__(self, other):
from .inverse import Inverse
from .matpow import MatPow
if not self.is_square:
raise ShapeError(f'Power of non-square matrix {self}')
elif self.is_Identity:
return self
elif other == -1:
return Inverse(self)
elif other == 0:
return Identity(self.rows)
elif other == 1:
return self
return MatPow(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__pow__')
def __rpow__(self, other): # pragma: no cover
raise NotImplementedError('Matrix Power not defined')
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rtruediv__')
def __truediv__(self, other):
return self * other**Integer(-1)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__truediv__')
def __rtruediv__(self, other):
raise NotImplementedError()
# return MatMul(other, Pow(self, -1))
@property
def rows(self):
return self.shape[0]
@property
def cols(self):
return self.shape[1]
@property
def is_square(self):
return self.rows == self.cols
def _eval_conjugate(self):
from .adjoint import Adjoint
from .transpose import Transpose
return Adjoint(Transpose(self))
def _eval_inverse(self):
from .inverse import Inverse
return Inverse(self)
def _eval_transpose(self):
from .transpose import Transpose
return Transpose(self)
def _eval_power(self, exp):
from .matpow import MatPow
return MatPow(self, exp)
def _eval_simplify(self, **kwargs):
if self.is_Atom:
return self
else:
return self.__class__(*[simplify(x, **kwargs) for x in self.args])
def _eval_adjoint(self):
from .adjoint import Adjoint
return Adjoint(self)
def _entry(self, i, j): # pragma: no cover
raise NotImplementedError('Indexing not implemented '
f'for {self.__class__.__name__}')
def adjoint(self):
return adjoint(self)
def conjugate(self):
return conjugate(self)
def transpose(self):
from .transpose import transpose
return transpose(self)
T = property(transpose, None, None, 'Matrix transposition.')
def inverse(self):
return self._eval_inverse()
def valid_index(self, i, j):
def is_valid(idx):
return isinstance(idx, (int, Integer, Symbol, Expr))
return (is_valid(i) and is_valid(j) and
(0 <= i) != false and (i < self.rows) != false and
(0 <= j) != false and (j < self.cols) != false)
def __getitem__(self, key):
if not isinstance(key, tuple) and isinstance(key, slice):
from .slice import MatrixSlice
return MatrixSlice(self, key, (0, None, 1))
if isinstance(key, tuple) and len(key) == 2:
i, j = key
if isinstance(i, slice) or isinstance(j, slice):
from .slice import MatrixSlice
return MatrixSlice(self, i, j)
i, j = sympify(i), sympify(j)
if self.valid_index(i, j) is not False:
return self._entry(i, j)
else:
raise IndexError(f'Invalid indices ({i}, {j})')
elif isinstance(key, (int, Integer)):
# row-wise decomposition of matrix
rows, cols = self.shape
if not (isinstance(rows, Integer) and isinstance(cols, Integer)):
raise IndexError('Single index only supported for '
'non-symbolic matrix shapes.')
key = sympify(key)
i = key // cols
j = key % cols
if self.valid_index(i, j) is not False:
return self._entry(i, j)
else:
raise IndexError(f'Invalid index {key}')
elif isinstance(key, (Symbol, Expr)):
raise IndexError('Single index only supported for '
'non-symbolic indices.')
raise IndexError(f'Invalid index, wanted {self}[i,j]')
def as_explicit(self):
"""
Returns a dense Matrix with elements represented explicitly
Returns an object of type ImmutableMatrix.
Examples
========
>>> I = Identity(3)
>>> I
I
>>> I.as_explicit()
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
as_mutable: returns mutable Matrix type
"""
from ..immutable import ImmutableMatrix
return ImmutableMatrix([[ self[i, j]
for j in range(self.cols)]
for i in range(self.rows)])
def as_mutable(self):
"""
Returns a dense, mutable matrix with elements represented explicitly
Examples
========
>>> I = Identity(3)
>>> I
I
>>> I.shape
(3, 3)
>>> I.as_mutable()
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
as_explicit: returns ImmutableMatrix
"""
return self.as_explicit().as_mutable()
def __array__(self):
from numpy import empty
a = empty(self.shape, dtype=object)
for i in range(self.rows):
for j in range(self.cols):
a[i, j] = self[i, j]
return a
def equals(self, other):
"""
Test elementwise equality between matrices, potentially of different
types
>>> Identity(3).equals(eye(3))
True
"""
if all(x.is_Integer for x in self.shape):
return self.as_explicit().equals(other)
def canonicalize(self):
return self
def as_coeff_mmul(self):
from .matmul import MatMul
return 1, MatMul(self)
class MatrixElement(Expr):
"""Element of the matrix expression."""
parent = property(lambda self: self.args[0])
i = property(lambda self: self.args[1])
j = property(lambda self: self.args[2])
_diff_wrt = True
def __new__(cls, name, n, m):
n, m = map(sympify, (n, m))
from .. import MatrixBase
if isinstance(name, MatrixBase):
if n.is_Integer and m.is_Integer:
return name[n, m]
name = sympify(name)
return Expr.__new__(cls, name, n, m)
def xreplace(self, rule):
if self in rule:
return rule[self]
else:
return self
class MatrixSymbol(MatrixExpr, AtomicExpr):
"""Symbolic representation of a Matrix object
Creates a Diofant Symbol to represent a Matrix. This matrix has a shape and
can be included in Matrix Expressions
>>> A = MatrixSymbol('A', 3, 4) # A 3 by 4 Matrix
>>> B = MatrixSymbol('B', 4, 3) # A 4 by 3 Matrix
>>> A.shape
(3, 4)
>>> 2*A*B + Identity(3)
I + 2*A*B
"""
is_Atom = True
is_number = False
def __new__(cls, name, n, m, **assumptions):
n, m = sympify(n), sympify(m)
is_commutative = fuzzy_bool(assumptions.get('commutative', False))
assumptions['commutative'] = is_commutative
obj = Expr.__new__(cls)
obj._name = name
obj._shape = (n, m)
obj._assumptions = StdFactKB(assumptions)
return obj
def _hashable_content(self):
return ((self.name, self.shape) +
tuple(sorted((k, v) for k, v in self._assumptions.items()
if v is not None)))
@property
def shape(self):
return self._shape
@property
def name(self):
return self._name
def _eval_subs(self, old, new):
# only do substitutions in shape
shape = Tuple(*self.shape)._subs(old, new)
return MatrixSymbol(self.name, *shape)
def __call__(self, *args):
raise TypeError( f'{self.__class__} object is not callable' )
def _entry(self, i, j):
return MatrixElement(self, i, j)
@property
def free_symbols(self):
return {self}
def doit(self, **hints):
if hints.get('deep', True):
return type(self)(self.name,
*(_.doit(**hints) for _ in self.shape),
**self._assumptions._generator)
else:
return self
class Identity(MatrixExpr):
"""The Matrix Identity I - multiplicative identity
>>> A = MatrixSymbol('A', 3, 5)
>>> I = Identity(3)
>>> I*A
A
"""
is_Identity = True
def __new__(cls, n):
return super().__new__(cls, sympify(n))
@property
def rows(self):
return self.args[0]
@property
def cols(self):
return self.args[0]
@property
def shape(self):
return self.args[0], self.args[0]
def _eval_transpose(self):
return self
def _eval_trace(self):
return self.rows
def _eval_inverse(self):
return self
def conjugate(self):
return self
def _entry(self, i, j):
if i == j:
return Integer(1)
else:
return Integer(0)
def _eval_determinant(self):
return Integer(1)
class ZeroMatrix(MatrixExpr):
"""The Matrix Zero 0 - additive identity
>>> A = MatrixSymbol('A', 3, 5)
>>> Z = ZeroMatrix(3, 5)
>>> A+Z
A
>>> Z*A.T
0
"""
is_ZeroMatrix = True
def __new__(cls, m, n):
return super().__new__(cls, m, n)
@property
def shape(self):
return self.args[0], self.args[1]
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rpow__')
def __pow__(self, other):
if other != 1 and not self.is_square:
raise ShapeError(f'Power of non-square matrix {self}')
if other == 0:
return Identity(self.rows)
if other < 1:
raise ValueError('Matrix det == 0; not invertible.')
return self
def _eval_transpose(self):
return ZeroMatrix(self.cols, self.rows)
def _eval_trace(self):
return Integer(0)
def _eval_determinant(self):
return Integer(0)
def conjugate(self):
return self
def _entry(self, i, j):
return Integer(0)
def __bool__(self):
return False
|
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from oslo_log import versionutils
from oslo_serialization import jsonutils
import six
from six.moves.urllib import parse
from keystone.common import controller as common_controller
from keystone.common import dependency
from keystone.common import utils
from keystone.contrib.federation import constants as federation_constants
from keystone import exception
from keystone.i18n import _, _LE
from keystone import token
from keystone.token import provider
LOG = log.getLogger(__name__)
CONF = cfg.CONF
@dependency.requires('catalog_api', 'resource_api')
class V2TokenDataHelper(object):
"""Creates V2 token data."""
def v3_to_v2_token(self, token_id, v3_token_data):
token_data = {}
# Build v2 token
v3_token = v3_token_data['token']
token = {}
token['id'] = token_id
token['expires'] = v3_token.get('expires_at')
token['issued_at'] = v3_token.get('issued_at')
token['audit_ids'] = v3_token.get('audit_ids')
if 'project' in v3_token:
# v3 token_data does not contain all tenant attributes
tenant = self.resource_api.get_project(
v3_token['project']['id'])
token['tenant'] = common_controller.V2Controller.filter_domain_id(
tenant)
token_data['token'] = token
# Build v2 user
v3_user = v3_token['user']
user = common_controller.V2Controller.v3_to_v2_user(v3_user)
# Set user roles
user['roles'] = []
role_ids = []
for role in v3_token.get('roles', []):
# Filter role id since it's not included in v2 token response
role_ids.append(role.pop('id'))
user['roles'].append(role)
user['roles_links'] = []
token_data['user'] = user
# Get and build v2 service catalog
token_data['serviceCatalog'] = []
if 'tenant' in token:
catalog_ref = self.catalog_api.get_catalog(
user['id'], token['tenant']['id'])
if catalog_ref:
token_data['serviceCatalog'] = self.format_catalog(catalog_ref)
# Build v2 metadata
metadata = {}
metadata['roles'] = role_ids
# Setting is_admin to keep consistency in v2 response
metadata['is_admin'] = 0
token_data['metadata'] = metadata
return {'access': token_data}
@classmethod
def format_token(cls, token_ref, roles_ref=None, catalog_ref=None,
trust_ref=None):
audit_info = None
user_ref = token_ref['user']
metadata_ref = token_ref['metadata']
if roles_ref is None:
roles_ref = []
expires = token_ref.get('expires', provider.default_expire_time())
if expires is not None:
if not isinstance(expires, six.text_type):
expires = utils.isotime(expires)
token_data = token_ref.get('token_data')
if token_data:
token_audit = token_data.get(
'access', token_data).get('token', {}).get('audit_ids')
audit_info = token_audit
if audit_info is None:
audit_info = provider.audit_info(token_ref.get('parent_audit_id'))
o = {'access': {'token': {'id': token_ref['id'],
'expires': expires,
'issued_at': utils.strtime(),
'audit_ids': audit_info
},
'user': {'id': user_ref['id'],
'name': user_ref['name'],
'username': user_ref['name'],
'roles': roles_ref,
'roles_links': metadata_ref.get('roles_links',
[])
}
}
}
if 'bind' in token_ref:
o['access']['token']['bind'] = token_ref['bind']
if 'tenant' in token_ref and token_ref['tenant']:
token_ref['tenant']['enabled'] = True
o['access']['token']['tenant'] = token_ref['tenant']
if catalog_ref is not None:
o['access']['serviceCatalog'] = V2TokenDataHelper.format_catalog(
catalog_ref)
if metadata_ref:
if 'is_admin' in metadata_ref:
o['access']['metadata'] = {'is_admin':
metadata_ref['is_admin']}
else:
o['access']['metadata'] = {'is_admin': 0}
if 'roles' in metadata_ref:
o['access']['metadata']['roles'] = metadata_ref['roles']
if CONF.trust.enabled and trust_ref:
o['access']['trust'] = {'trustee_user_id':
trust_ref['trustee_user_id'],
'id': trust_ref['id'],
'trustor_user_id':
trust_ref['trustor_user_id'],
'impersonation':
trust_ref['impersonation']
}
return o
@classmethod
def format_catalog(cls, catalog_ref):
"""Munge catalogs from internal to output format
Internal catalogs look like::
{$REGION: {
{$SERVICE: {
$key1: $value1,
...
}
}
}
The legacy api wants them to look like::
[{'name': $SERVICE[name],
'type': $SERVICE,
'endpoints': [{
'tenantId': $tenant_id,
...
'region': $REGION,
}],
'endpoints_links': [],
}]
"""
if not catalog_ref:
return []
services = {}
for region, region_ref in catalog_ref.items():
for service, service_ref in region_ref.items():
new_service_ref = services.get(service, {})
new_service_ref['name'] = service_ref.pop('name')
new_service_ref['type'] = service
new_service_ref['endpoints_links'] = []
service_ref['region'] = region
endpoints_ref = new_service_ref.get('endpoints', [])
endpoints_ref.append(service_ref)
new_service_ref['endpoints'] = endpoints_ref
services[service] = new_service_ref
return list(services.values())
@dependency.requires('assignment_api', 'catalog_api', 'federation_api',
'identity_api', 'resource_api', 'role_api', 'trust_api')
class V3TokenDataHelper(object):
"""Token data helper."""
def __init__(self):
# Keep __init__ around to ensure dependency injection works.
super(V3TokenDataHelper, self).__init__()
def _get_filtered_domain(self, domain_id):
domain_ref = self.resource_api.get_domain(domain_id)
return {'id': domain_ref['id'], 'name': domain_ref['name']}
def _get_filtered_project(self, project_id):
project_ref = self.resource_api.get_project(project_id)
filtered_project = {
'id': project_ref['id'],
'name': project_ref['name']}
filtered_project['domain'] = self._get_filtered_domain(
project_ref['domain_id'])
return filtered_project
def _populate_scope(self, token_data, domain_id, project_id):
if 'domain' in token_data or 'project' in token_data:
# scope already exist, no need to populate it again
return
if domain_id:
token_data['domain'] = self._get_filtered_domain(domain_id)
if project_id:
token_data['project'] = self._get_filtered_project(project_id)
def _get_roles_for_user(self, user_id, domain_id, project_id):
roles = []
if domain_id:
roles = self.assignment_api.get_roles_for_user_and_domain(
user_id, domain_id)
if project_id:
roles = self.assignment_api.get_roles_for_user_and_project(
user_id, project_id)
return [self.role_api.get_role(role_id) for role_id in roles]
def populate_roles_for_groups(self, token_data, group_ids,
project_id=None, domain_id=None,
user_id=None):
"""Populate roles basing on provided groups and project/domain
Used for ephemeral users with dynamically assigned groups.
This method does not return anything, yet it modifies token_data in
place.
:param token_data: a dictionary used for building token response
:group_ids: list of group IDs a user is a member of
:project_id: project ID to scope to
:domain_id: domain ID to scope to
:user_id: user ID
:raises: exception.Unauthorized - when no roles were found for a
(group_ids, project_id) or (group_ids, domain_id) pairs.
"""
def check_roles(roles, user_id, project_id, domain_id):
# User was granted roles so simply exit this function.
if roles:
return
if project_id:
msg = _('User %(user_id)s has no access '
'to project %(project_id)s') % {
'user_id': user_id,
'project_id': project_id}
elif domain_id:
msg = _('User %(user_id)s has no access '
'to domain %(domain_id)s') % {
'user_id': user_id,
'domain_id': domain_id}
# Since no roles were found a user is not authorized to
# perform any operations. Raise an exception with
# appropriate error message.
raise exception.Unauthorized(msg)
roles = self.assignment_api.get_roles_for_groups(group_ids,
project_id,
domain_id)
check_roles(roles, user_id, project_id, domain_id)
token_data['roles'] = roles
def _populate_user(self, token_data, user_id, trust):
if 'user' in token_data:
# no need to repopulate user if it already exists
return
user_ref = self.identity_api.get_user(user_id)
if CONF.trust.enabled and trust and 'OS-TRUST:trust' not in token_data:
trustor_user_ref = (self.identity_api.get_user(
trust['trustor_user_id']))
try:
self.identity_api.assert_user_enabled(trust['trustor_user_id'])
except AssertionError:
raise exception.Forbidden(_('Trustor is disabled.'))
if trust['impersonation']:
user_ref = trustor_user_ref
token_data['OS-TRUST:trust'] = (
{
'id': trust['id'],
'trustor_user': {'id': trust['trustor_user_id']},
'trustee_user': {'id': trust['trustee_user_id']},
'impersonation': trust['impersonation']
})
filtered_user = {
'id': user_ref['id'],
'name': user_ref['name'],
'domain': self._get_filtered_domain(user_ref['domain_id'])}
token_data['user'] = filtered_user
def _populate_oauth_section(self, token_data, access_token):
if access_token:
access_token_id = access_token['id']
consumer_id = access_token['consumer_id']
token_data['OS-OAUTH1'] = ({'access_token_id': access_token_id,
'consumer_id': consumer_id})
def _populate_roles(self, token_data, user_id, domain_id, project_id,
trust, access_token):
if 'roles' in token_data:
# no need to repopulate roles
return
if access_token:
filtered_roles = []
authed_role_ids = jsonutils.loads(access_token['role_ids'])
all_roles = self.role_api.list_roles()
for role in all_roles:
for authed_role in authed_role_ids:
if authed_role == role['id']:
filtered_roles.append({'id': role['id'],
'name': role['name']})
token_data['roles'] = filtered_roles
return
if CONF.trust.enabled and trust:
token_user_id = trust['trustor_user_id']
token_project_id = trust['project_id']
# trusts do not support domains yet
token_domain_id = None
else:
token_user_id = user_id
token_project_id = project_id
token_domain_id = domain_id
if token_domain_id or token_project_id:
roles = self._get_roles_for_user(token_user_id,
token_domain_id,
token_project_id)
filtered_roles = []
if CONF.trust.enabled and trust:
for trust_role in trust['roles']:
match_roles = [x for x in roles
if x['id'] == trust_role['id']]
if match_roles:
filtered_roles.append(match_roles[0])
else:
raise exception.Forbidden(
_('Trustee has no delegated roles.'))
else:
for role in roles:
filtered_roles.append({'id': role['id'],
'name': role['name']})
# user has no project or domain roles, therefore access denied
if not filtered_roles:
if token_project_id:
msg = _('User %(user_id)s has no access '
'to project %(project_id)s') % {
'user_id': user_id,
'project_id': token_project_id}
else:
msg = _('User %(user_id)s has no access '
'to domain %(domain_id)s') % {
'user_id': user_id,
'domain_id': token_domain_id}
LOG.debug(msg)
raise exception.Unauthorized(msg)
token_data['roles'] = filtered_roles
def _populate_service_catalog(self, token_data, user_id,
domain_id, project_id, trust):
if 'catalog' in token_data:
# no need to repopulate service catalog
return
if CONF.trust.enabled and trust:
user_id = trust['trustor_user_id']
if project_id or domain_id:
service_catalog = self.catalog_api.get_v3_catalog(
user_id, project_id)
# TODO(ayoung): Enforce Endpoints for trust
token_data['catalog'] = service_catalog
def _populate_service_providers(self, token_data):
if 'service_providers' in token_data:
return
service_providers = self.federation_api.get_enabled_service_providers()
if service_providers:
token_data['service_providers'] = service_providers
def _populate_token_dates(self, token_data, expires=None, trust=None,
issued_at=None):
if not expires:
expires = provider.default_expire_time()
if not isinstance(expires, six.string_types):
expires = utils.isotime(expires, subsecond=True)
token_data['expires_at'] = expires
token_data['issued_at'] = (issued_at or
utils.isotime(subsecond=True))
def _populate_audit_info(self, token_data, audit_info=None):
if audit_info is None or isinstance(audit_info, six.string_types):
token_data['audit_ids'] = provider.audit_info(audit_info)
elif isinstance(audit_info, list):
token_data['audit_ids'] = audit_info
else:
msg = (_('Invalid audit info data type: %(data)s (%(type)s)') %
{'data': audit_info, 'type': type(audit_info)})
LOG.error(msg)
raise exception.UnexpectedError(msg)
def get_token_data(self, user_id, method_names, extras=None,
domain_id=None, project_id=None, expires=None,
trust=None, token=None, include_catalog=True,
bind=None, access_token=None, issued_at=None,
audit_info=None):
if extras is None:
extras = {}
if extras:
versionutils.deprecated(
what='passing token data with "extras"',
as_of=versionutils.deprecated.KILO,
in_favor_of='well-defined APIs')(lambda: None)()
token_data = {'methods': method_names,
'extras': extras}
# We've probably already written these to the token
if token:
for x in ('roles', 'user', 'catalog', 'project', 'domain'):
if x in token:
token_data[x] = token[x]
if CONF.trust.enabled and trust:
if user_id != trust['trustee_user_id']:
raise exception.Forbidden(_('User is not a trustee.'))
if bind:
token_data['bind'] = bind
self._populate_scope(token_data, domain_id, project_id)
self._populate_user(token_data, user_id, trust)
self._populate_roles(token_data, user_id, domain_id, project_id, trust,
access_token)
self._populate_audit_info(token_data, audit_info)
if include_catalog:
self._populate_service_catalog(token_data, user_id, domain_id,
project_id, trust)
self._populate_service_providers(token_data)
self._populate_token_dates(token_data, expires=expires, trust=trust,
issued_at=issued_at)
self._populate_oauth_section(token_data, access_token)
return {'token': token_data}
@dependency.requires('catalog_api', 'identity_api', 'oauth_api',
'resource_api', 'role_api', 'trust_api')
class BaseProvider(provider.Provider):
def __init__(self, *args, **kwargs):
super(BaseProvider, self).__init__(*args, **kwargs)
self.v3_token_data_helper = V3TokenDataHelper()
self.v2_token_data_helper = V2TokenDataHelper()
def get_token_version(self, token_data):
if token_data and isinstance(token_data, dict):
if 'token_version' in token_data:
if token_data['token_version'] in token.provider.VERSIONS:
return token_data['token_version']
# FIXME(morganfainberg): deprecate the following logic in future
# revisions. It is better to just specify the token_version in
# the token_data itself. This way we can support future versions
# that might have the same fields.
if 'access' in token_data:
return token.provider.V2
if 'token' in token_data and 'methods' in token_data['token']:
return token.provider.V3
raise exception.UnsupportedTokenVersionException()
def issue_v2_token(self, token_ref, roles_ref=None,
catalog_ref=None):
metadata_ref = token_ref['metadata']
trust_ref = None
if CONF.trust.enabled and metadata_ref and 'trust_id' in metadata_ref:
trust_ref = self.trust_api.get_trust(metadata_ref['trust_id'])
token_data = self.v2_token_data_helper.format_token(
token_ref, roles_ref, catalog_ref, trust_ref)
token_id = self._get_token_id(token_data)
token_data['access']['token']['id'] = token_id
return token_id, token_data
def _is_mapped_token(self, auth_context):
return (federation_constants.IDENTITY_PROVIDER in auth_context and
federation_constants.PROTOCOL in auth_context)
def issue_v3_token(self, user_id, method_names, expires_at=None,
project_id=None, domain_id=None, auth_context=None,
trust=None, metadata_ref=None, include_catalog=True,
parent_audit_id=None):
if auth_context and auth_context.get('bind'):
# NOTE(lbragstad): Check if the token provider being used actually
# supports bind authentication methods before proceeding.
if not self._supports_bind_authentication:
raise exception.NotImplemented(_(
'The configured token provider does not support bind '
'authentication.'))
# for V2, trust is stashed in metadata_ref
if (CONF.trust.enabled and not trust and metadata_ref and
'trust_id' in metadata_ref):
trust = self.trust_api.get_trust(metadata_ref['trust_id'])
token_ref = None
if auth_context and self._is_mapped_token(auth_context):
token_ref = self._handle_mapped_tokens(
auth_context, project_id, domain_id)
access_token = None
if 'oauth1' in method_names:
access_token_id = auth_context['access_token_id']
access_token = self.oauth_api.get_access_token(access_token_id)
token_data = self.v3_token_data_helper.get_token_data(
user_id,
method_names,
auth_context.get('extras') if auth_context else None,
domain_id=domain_id,
project_id=project_id,
expires=expires_at,
trust=trust,
bind=auth_context.get('bind') if auth_context else None,
token=token_ref,
include_catalog=include_catalog,
access_token=access_token,
audit_info=parent_audit_id)
token_id = self._get_token_id(token_data)
return token_id, token_data
def _handle_mapped_tokens(self, auth_context, project_id, domain_id):
def get_federated_domain():
return (CONF.federation.federated_domain_name or
federation_constants.FEDERATED_DOMAIN_KEYWORD)
federated_domain = get_federated_domain()
user_id = auth_context['user_id']
group_ids = auth_context['group_ids']
idp = auth_context[federation_constants.IDENTITY_PROVIDER]
protocol = auth_context[federation_constants.PROTOCOL]
token_data = {
'user': {
'id': user_id,
'name': parse.unquote(user_id),
federation_constants.FEDERATION: {
'groups': [{'id': x} for x in group_ids],
'identity_provider': {'id': idp},
'protocol': {'id': protocol}
},
'domain': {
'id': federated_domain,
'name': federated_domain
}
}
}
if project_id or domain_id:
self.v3_token_data_helper.populate_roles_for_groups(
token_data, group_ids, project_id, domain_id, user_id)
return token_data
def _verify_token_ref(self, token_ref):
"""Verify and return the given token_ref."""
if not token_ref:
raise exception.Unauthorized()
return token_ref
def _assert_is_not_federation_token(self, token_ref):
"""Make sure we aren't using v2 auth on a federation token."""
token_data = token_ref.get('token_data')
if (token_data and self.get_token_version(token_data) ==
token.provider.V3):
if 'OS-FEDERATION' in token_data['token']['user']:
msg = _('Attempting to use OS-FEDERATION token with V2 '
'Identity Service, use V3 Authentication')
raise exception.Unauthorized(msg)
def _assert_default_domain(self, token_ref):
"""Make sure we are operating on default domain only."""
if (token_ref.get('token_data') and
self.get_token_version(token_ref.get('token_data')) ==
token.provider.V3):
# this is a V3 token
msg = _('Non-default domain is not supported')
# user in a non-default is prohibited
if (token_ref['token_data']['token']['user']['domain']['id'] !=
CONF.identity.default_domain_id):
raise exception.Unauthorized(msg)
# domain scoping is prohibited
if token_ref['token_data']['token'].get('domain'):
raise exception.Unauthorized(
_('Domain scoped token is not supported'))
# project in non-default domain is prohibited
if token_ref['token_data']['token'].get('project'):
project = token_ref['token_data']['token']['project']
project_domain_id = project['domain']['id']
# scoped to project in non-default domain is prohibited
if project_domain_id != CONF.identity.default_domain_id:
raise exception.Unauthorized(msg)
# if token is scoped to trust, both trustor and trustee must
# be in the default domain. Furthermore, the delegated project
# must also be in the default domain
metadata_ref = token_ref['metadata']
if CONF.trust.enabled and 'trust_id' in metadata_ref:
trust_ref = self.trust_api.get_trust(metadata_ref['trust_id'])
trustee_user_ref = self.identity_api.get_user(
trust_ref['trustee_user_id'])
if (trustee_user_ref['domain_id'] !=
CONF.identity.default_domain_id):
raise exception.Unauthorized(msg)
trustor_user_ref = self.identity_api.get_user(
trust_ref['trustor_user_id'])
if (trustor_user_ref['domain_id'] !=
CONF.identity.default_domain_id):
raise exception.Unauthorized(msg)
project_ref = self.resource_api.get_project(
trust_ref['project_id'])
if (project_ref['domain_id'] !=
CONF.identity.default_domain_id):
raise exception.Unauthorized(msg)
def validate_v2_token(self, token_ref):
try:
self._assert_is_not_federation_token(token_ref)
self._assert_default_domain(token_ref)
# FIXME(gyee): performance or correctness? Should we return the
# cached token or reconstruct it? Obviously if we are going with
# the cached token, any role, project, or domain name changes
# will not be reflected. One may argue that with PKI tokens,
# we are essentially doing cached token validation anyway.
# Lets go with the cached token strategy. Since token
# management layer is now pluggable, one can always provide
# their own implementation to suit their needs.
token_data = token_ref.get('token_data')
if (not token_data or
self.get_token_version(token_data) !=
token.provider.V2):
# token is created by old v2 logic
metadata_ref = token_ref['metadata']
roles_ref = []
for role_id in metadata_ref.get('roles', []):
roles_ref.append(self.role_api.get_role(role_id))
# Get a service catalog if possible
# This is needed for on-behalf-of requests
catalog_ref = None
if token_ref.get('tenant'):
catalog_ref = self.catalog_api.get_catalog(
token_ref['user']['id'],
token_ref['tenant']['id'])
trust_ref = None
if CONF.trust.enabled and 'trust_id' in metadata_ref:
trust_ref = self.trust_api.get_trust(
metadata_ref['trust_id'])
token_data = self.v2_token_data_helper.format_token(
token_ref, roles_ref, catalog_ref, trust_ref)
trust_id = token_data['access'].get('trust', {}).get('id')
if trust_id:
# token trust validation
self.trust_api.get_trust(trust_id)
return token_data
except exception.ValidationError as e:
LOG.exception(_LE('Failed to validate token'))
raise exception.TokenNotFound(e)
def validate_v3_token(self, token_ref):
# FIXME(gyee): performance or correctness? Should we return the
# cached token or reconstruct it? Obviously if we are going with
# the cached token, any role, project, or domain name changes
# will not be reflected. One may argue that with PKI tokens,
# we are essentially doing cached token validation anyway.
# Lets go with the cached token strategy. Since token
# management layer is now pluggable, one can always provide
# their own implementation to suit their needs.
trust_id = token_ref.get('trust_id')
if trust_id:
# token trust validation
self.trust_api.get_trust(trust_id)
token_data = token_ref.get('token_data')
if not token_data or 'token' not in token_data:
# token ref is created by V2 API
project_id = None
project_ref = token_ref.get('tenant')
if project_ref:
project_id = project_ref['id']
issued_at = token_ref['token_data']['access']['token']['issued_at']
audit = token_ref['token_data']['access']['token'].get('audit_ids')
token_data = self.v3_token_data_helper.get_token_data(
token_ref['user']['id'],
['password', 'token'],
project_id=project_id,
bind=token_ref.get('bind'),
expires=token_ref['expires'],
issued_at=issued_at,
audit_info=audit)
return token_data
|
|
import os
import rope.base.change
from rope.base import libutils, utils, exceptions
from rope.contrib import codeassist, generate, autoimport, findit
from ropemode import refactor, decorators, dialog
class RopeMode(object):
def __init__(self, env):
self.project = None
self.old_content = None
self.env = env
self._assist = None
self._prepare_refactorings()
self.autoimport = None
for attrname in dir(self):
attr = getattr(self, attrname)
if not callable(attr):
continue
kind = getattr(attr, 'kind', None)
if kind == 'local':
key = getattr(attr, 'local_key', None)
prefix = getattr(attr, 'prefix', None)
self.env.local_command(attrname, attr, key, prefix)
if kind == 'global':
key = getattr(attr, 'global_key', None)
prefix = getattr(attr, 'prefix', None)
self.env.global_command(attrname, attr, key, prefix)
if kind == 'hook':
hook = getattr(attr, 'hook', None)
self.env.add_hook(attrname, attr, hook)
def _prepare_refactorings(self):
for name in dir(refactor):
if not name.startswith('_') and name != 'Refactoring':
attr = getattr(refactor, name)
if isinstance(attr, type) and \
issubclass(attr, refactor.Refactoring):
refname = self._refactoring_name(attr)
@decorators.local_command(attr.key, 'P', None, refname)
def do_refactor(prefix, self=self, refactoring=attr):
initial_asking = prefix is None
refactoring(self, self.env).show(initial_asking=initial_asking)
setattr(self, refname, do_refactor)
@staticmethod
def _refactoring_name(refactoring):
return refactor.refactoring_name(refactoring)
@decorators.rope_hook('before_save')
def before_save_actions(self):
if self.project is not None:
if not self._is_python_file(self.env.filename()):
return
resource = self._get_resource()
if resource.exists():
self.old_content = resource.read()
else:
self.old_content = ''
@decorators.rope_hook('after_save')
def after_save_actions(self):
if self.project is not None and self.old_content is not None:
libutils.report_change(self.project, self.env.filename(),
self.old_content)
self.old_content = None
@decorators.rope_hook('exit')
def exiting_actions(self):
if self.project is not None:
self.close_project()
@decorators.global_command('o')
def open_project(self, root=None):
if not root:
if self.env.get('auto_project'):
root = self.env.get_cur_dir()
else:
root = self.env.ask_directory('Rope project root folder: ')
if self.project is not None:
self.close_project()
address = rope.base.project._realpath(os.path.join(root,
'.ropeproject'))
if not os.path.exists(address) and not self.env.get('auto_project'):
if not self.env.y_or_n('Project not exists in %s, create one?' % root):
self.env.message("Project creation aborted")
return
progress = self.env.create_progress('Opening [%s] project' % root)
self.project = rope.base.project.Project(root)
if self.env.get('enable_autoimport'):
underlined = self.env.get('autoimport_underlineds')
self.autoimport = autoimport.AutoImport(self.project,
underlined=underlined)
progress.done()
@decorators.global_command('k')
def close_project(self):
if self.project is not None:
progress = self.env.create_progress('Closing [%s] project' %
self.project.address)
self.project.close()
self.project = None
progress.done()
@decorators.global_command()
def write_project(self):
if self.project is not None:
progress = self.env.create_progress(
'Writing [%s] project data to disk' % self.project.address)
self.project.sync()
progress.done()
@decorators.global_command('u')
def undo(self):
self._check_project()
change = self.project.history.tobe_undone
if change is None:
self.env.message('Nothing to undo!')
return
if self.env.y_or_n('Undo [%s]? ' % str(change)):
def undo(handle):
for changes in self.project.history.undo(task_handle=handle):
self._reload_buffers(changes, undo=True)
refactor.runtask(self.env, undo, 'Undo refactoring',
interrupts=False)
@decorators.global_command('r')
def redo(self):
self._check_project()
change = self.project.history.tobe_redone
if change is None:
self.env.message('Nothing to redo!')
return
if self.env.y_or_n('Redo [%s]? ' % str(change)):
def redo(handle):
for changes in self.project.history.redo(task_handle=handle):
self._reload_buffers(changes)
refactor.runtask(self.env, redo, 'Redo refactoring',
interrupts=False)
@decorators.local_command('a g', shortcut='C-c g')
def goto_definition(self):
definition = self._base_definition_location()
if definition:
self.env.push_mark()
self._goto_location(definition[0], definition[1])
else:
self.env.message('Cannot find the definition!')
@decorators.local_command()
def pop_mark(self):
self.env.pop_mark()
@decorators.local_command()
def definition_location(self):
definition = self._base_definition_location()
if definition:
return str(definition[0].real_path), definition[1]
return None
def _base_definition_location(self):
self._check_project()
resource, offset = self._get_location()
maxfixes = self.env.get('codeassist_maxfixes')
try:
definition = codeassist.get_definition_location(
self.project, self._get_text(), offset, resource, maxfixes)
except exceptions.BadIdentifierError:
return None
if tuple(definition) != (None, None):
return definition
return None
@decorators.local_command('a d', 'P', 'C-c d')
def show_doc(self, prefix):
self._check_project()
self._base_show_doc(prefix, self._base_get_doc(codeassist.get_doc))
@decorators.local_command()
def get_calltip(self):
self._check_project()
def _get_doc(project, text, offset, *args, **kwds):
try:
offset = text.rindex('(', 0, offset) - 1
except ValueError:
return None
return codeassist.get_calltip(project, text, offset, *args, **kwds)
return self._base_get_doc(_get_doc)
@decorators.local_command('a c', 'P')
def show_calltip(self, prefix):
self._base_show_doc(prefix, self.get_calltip())
def _base_show_doc(self, prefix, docs):
if docs:
self.env.show_doc(docs, prefix)
else:
self.env.message('No docs available!')
@decorators.local_command()
def get_doc(self):
self._check_project()
return self._base_get_doc(codeassist.get_doc)
def _base_get_doc(self, get_doc):
maxfixes = self.env.get('codeassist_maxfixes')
text = self._get_text()
offset = self.env.get_offset()
try:
return get_doc(self.project, text, offset,
self.resource, maxfixes)
except exceptions.BadIdentifierError:
return None
def _get_text(self):
resource = self.resource
if not self.env.is_modified() and resource is not None:
return resource.read()
return self.env.get_text()
def _base_findit(self, do_find, optionals, get_kwds):
self._check_project()
self._save_buffers()
resource, offset = self._get_location()
action, values = dialog.show_dialog(
self._askdata, ['search', 'cancel'], optionals=optionals)
if action == 'search':
kwds = get_kwds(values)
def calculate(handle):
resources = refactor._resources(self.project,
values.get('resources'))
return do_find(self.project, resource, offset,
resources=resources, task_handle=handle, **kwds)
result = refactor.runtask(self.env, calculate, 'Find Occurrences')
locations = [Location(location) for location in result]
self.env.show_occurrences(locations)
@decorators.local_command('a f', shortcut='C-c f')
def find_occurrences(self):
optionals = {
'unsure': dialog.Data('Find uncertain occurrences: ',
default='no', values=['yes', 'no']),
'resources': dialog.Data('Files to search: '),
'in_hierarchy': dialog.Data(
'Rename methods in class hierarchy: ',
default='no', values=['yes', 'no'])}
def get_kwds(values):
return {'unsure': values.get('unsure') == 'yes',
'in_hierarchy': values.get('in_hierarchy') == 'yes'}
self._base_findit(findit.find_occurrences, optionals, get_kwds)
@decorators.local_command('a i')
def find_implementations(self):
optionals = {'resources': dialog.Data('Files to search: ')}
def get_kwds(values):
return {}
self._base_findit(findit.find_implementations, optionals, get_kwds)
@decorators.local_command('a /', 'P', 'M-/')
def code_assist(self, prefix):
_CodeAssist(self, self.env).code_assist(prefix)
@decorators.local_command('a ?', 'P', 'M-?')
def lucky_assist(self, prefix):
_CodeAssist(self, self.env).lucky_assist(prefix)
@decorators.local_command(prefix='P')
def omni_complete(self, prefix):
self._assist.omni_complete(prefix)
def _find_start(self):
self._assist = _CodeAssist(self, self.env)
start = (self.env.cursor[1] - self.env.get_offset()
+ self._assist.starting_offset)
self.env._command('let g:pymode_offset = %s' % start)
@decorators.local_command('a')
def auto_import(self):
_CodeAssist(self, self.env).auto_import()
@decorators.local_command()
def completions(self):
return _CodeAssist(self, self.env).completions()
@decorators.local_command()
def extended_completions(self):
return _CodeAssist(self, self.env).extended_completions()
def _check_autoimport(self):
self._check_project()
if self.autoimport is None:
self.env.message('autoimport is disabled; '
'see `enable_autoimport\' variable')
return False
return True
@decorators.global_command('g')
def generate_autoimport_cache(self):
if not self._check_autoimport():
return
modules = self.env.get('autoimport_modules')
modules = [ m if isinstance(m, basestring) else m.value() for m in modules ]
def generate(handle):
self.autoimport.generate_cache(task_handle=handle)
self.autoimport.generate_modules_cache(modules, task_handle=handle)
refactor.runtask(self.env, generate, 'Generate autoimport cache')
self.write_project()
@decorators.global_command('f', 'P')
def find_file(self, prefix):
file = self._base_find_file(prefix)
if file is not None:
self.env.find_file(file.real_path)
@decorators.global_command('4 f', 'P')
def find_file_other_window(self, prefix):
file = self._base_find_file(prefix)
if file is not None:
self.env.find_file(file.real_path, other=True)
def _base_find_file(self, prefix):
self._check_project()
if prefix:
files = self.project.pycore.get_python_files()
else:
files = self.project.get_files()
return self._ask_file(files)
def _ask_file(self, files):
names = []
for file in files:
names.append('<'.join(reversed(file.path.split('/'))))
result = self.env.ask_values('Rope Find File: ', names)
if result is not None:
path = '/'.join(reversed(result.split('<')))
file = self.project.get_file(path)
return file
self.env.message('No file selected')
@decorators.local_command('a j')
def jump_to_global(self):
if not self._check_autoimport():
return
all_names = list(self.autoimport.get_all_names())
name = self.env.ask_values('Global name: ', all_names)
result = dict(self.autoimport.get_name_locations(name))
if len(result) == 1:
resource = list(result.keys())[0]
else:
resource = self._ask_file(result.keys())
if resource:
self._goto_location(resource, result[resource])
@decorators.global_command('c')
def project_config(self):
self._check_project()
if self.project.ropefolder is not None:
config = self.project.ropefolder.get_child('config.py')
self.env.find_file(config.real_path)
else:
self.env.message('No rope project folder found')
@decorators.global_command('n m')
def create_module(self):
def callback(sourcefolder, name):
return generate.create_module(self.project, name, sourcefolder)
self._create('module', callback)
@decorators.global_command('n p')
def create_package(self):
def callback(sourcefolder, name):
folder = generate.create_package(self.project, name, sourcefolder)
return folder.get_child('__init__.py')
self._create('package', callback)
@decorators.global_command('n f')
def create_file(self):
def callback(parent, name):
return parent.create_file(name)
self._create('file', callback, 'parent')
@decorators.global_command('n d')
def create_directory(self):
def callback(parent, name):
parent.create_folder(name)
self._create('directory', callback, 'parent')
@decorators.local_command()
def analyze_module(self):
"""Perform static object analysis on this module"""
self._check_project()
self.project.pycore.analyze_module(self.resource)
@decorators.global_command()
def analyze_modules(self):
"""Perform static object analysis on all project modules"""
self._check_project()
def _analyze_modules(handle):
libutils.analyze_modules(self.project, task_handle=handle)
refactor.runtask(self.env, _analyze_modules, 'Analyze project modules')
@decorators.local_command()
def run_module(self):
"""Run and perform dynamic object analysis on this module"""
self._check_project()
process = self.project.pycore.run_module(self.resource)
try:
process.wait_process()
finally:
process.kill_process()
def _create(self, name, callback, parentname='source'):
self._check_project()
confs = {'name': dialog.Data(name.title() + ' name: ')}
parentname = parentname + 'folder'
optionals = {parentname: dialog.Data(
parentname.title() + ' Folder: ',
default=self.project.address, kind='directory')}
action, values = dialog.show_dialog(
self._askdata, ['perform', 'cancel'], confs, optionals)
if action == 'perform':
parent = libutils.path_to_resource(
self.project, values.get(parentname, self.project.address))
resource = callback(parent, values['name'])
if resource:
self.env.find_file(resource.real_path)
def _goto_location(self, resource, lineno):
if resource:
self.env.find_file(str(resource.real_path),
other=self.env.get('goto_def_newwin'))
if lineno:
self.env.goto_line(lineno)
def _get_location(self):
offset = self.env.get_offset()
return self.resource, offset
def _get_resource(self, filename=None):
if filename is None:
filename = self.env.filename()
if filename is None or self.project is None:
return
resource = libutils.path_to_resource(self.project, filename, 'file')
return resource
@property
def resource(self):
"""the current resource
Returns `None` when file does not exist.
"""
resource = self._get_resource()
if resource and resource.exists():
return resource
@decorators.global_command()
def get_project_root(self):
if self.project is not None:
return self.project.root.real_path
else:
return None
def _check_project(self):
if self.project is None:
if self.env.get('guess_project'):
self.open_project(self._guess_project())
else:
self.open_project()
else:
self.project.validate(self.project.root)
def _guess_project(self):
cwd = self.env.filename()
if cwd is not None:
while True:
ropefolder = os.path.join(cwd, '.ropeproject')
if os.path.exists(ropefolder) and os.path.isdir(ropefolder):
return cwd
newcwd = os.path.dirname(cwd)
if newcwd == cwd:
break
cwd = newcwd
def _reload_buffers(self, changes, undo=False):
self._reload_buffers_for_changes(
changes.get_changed_resources(),
self._get_moved_resources(changes, undo))
def _reload_buffers_for_changes(self, changed, moved={}):
filenames = [resource.real_path for resource in changed]
moved = dict([(resource.real_path, moved[resource].real_path)
for resource in moved])
self.env.reload_files(filenames, moved)
def _get_moved_resources(self, changes, undo=False):
result = {}
if isinstance(changes, rope.base.change.ChangeSet):
for change in changes.changes:
result.update(self._get_moved_resources(change))
if isinstance(changes, rope.base.change.MoveResource):
result[changes.resource] = changes.new_resource
if undo:
return dict([(value, key) for key, value in result.items()])
return result
def _save_buffers(self, only_current=False):
if only_current:
filenames = [self.env.filename()]
else:
filenames = self.env.filenames()
pythons = []
for filename in filenames:
if self._is_python_file(filename):
pythons.append(filename)
self.env.save_files(pythons)
def _is_python_file(self, path):
resource = self._get_resource(path)
return (resource is not None and
resource.project == self.project and
self.project.pycore.is_python_file(resource))
def _askdata(self, data, starting=None):
ask_func = self.env.ask
ask_args = {'prompt': data.prompt, 'starting': starting,
'default': data.default}
if data.values:
ask_func = self.env.ask_values
ask_args['values'] = data.values
elif data.kind == 'directory':
ask_func = self.env.ask_directory
return ask_func(**ask_args)
class Location(object):
def __init__(self, location):
self.location = location
self.filename = location.resource.real_path
self.offset = location.offset
self.note = ''
if location.unsure:
self.note = '?'
@property
def lineno(self):
if hasattr(self.location, 'lineno'):
return self.location.lineno
return self.location.resource.read().count('\n', 0, self.offset) + 1
class _CodeAssist(object):
def __init__(self, interface, env):
self.interface = interface
self.env = env
def code_assist(self, prefix):
proposals = self._calculate_proposals()
if prefix is not None:
arg = self.env.prefix_value(prefix)
if arg == 0:
arg = len(proposals)
common_start = self._calculate_prefix(proposals[:arg])
self.env.insert(common_start[self.offset - self.starting_offset:])
self._starting = common_start
self._offset = self.starting_offset + len(common_start)
prompt = 'Completion for %s: ' % self.expression
proposals = map(self.env._completion_data, proposals)
result = self.env.ask_completion(prompt, proposals, self.starting)
if result is not None:
self._apply_assist(result)
def omni_complete(self, prefix):
proposals = self._calculate_proposals()
proposals = self.env._update_proposals(proposals)
command = u'let g:pythoncomplete_completions = [%s]' % proposals
self.env._command(command, encode=True)
def lucky_assist(self, prefix):
proposals = self._calculate_proposals()
selected = 0
if prefix is not None:
selected = self.env.prefix_value(prefix)
if 0 <= selected < len(proposals):
result = self.env._completion_text(proposals[selected])
else:
self.env.message('Not enough proposals!')
return
self._apply_assist(result)
def auto_import(self):
if not self.interface._check_autoimport():
return
if not self.autoimport.names and self.env.get('autoimport_generate'):
self.interface.generate_autoimport_cache()
name = self.env.current_word()
modules = self.autoimport.get_modules(name)
if modules:
if len(modules) == 1:
module = modules[0]
else:
module = self.env.ask_values(
'Which module to import: ', modules)
self._insert_import(name, module)
else:
self.env.message('Global name %s not found!' % name)
def completions(self):
proposals = self._calculate_proposals()
prefix = self.offset - self.starting_offset
return [self.env._completion_text(proposal)[prefix:]
for proposal in proposals]
def extended_completions(self):
proposals = self._calculate_proposals()
prefix = self.offset - self.starting_offset
return [[proposal.name[prefix:], proposal.get_doc(),
proposal.type] for proposal in proposals]
def _apply_assist(self, assist):
if ' : ' in assist:
name, module = assist.rsplit(' : ', 1)
self.env.delete(self.starting_offset + 1, self.offset + 1)
self.env.insert(name)
self._insert_import(name, module)
else:
self.env.delete(self.starting_offset + 1, self.offset + 1)
self.env.insert(assist)
def _calculate_proposals(self):
self.interface._check_project()
resource = self.interface.resource
maxfixes = self.env.get('codeassist_maxfixes')
proposals = codeassist.code_assist(
self.interface.project, self.source, self.offset,
resource, maxfixes=maxfixes)
if self.env.get('sorted_completions', True):
proposals = codeassist.sorted_proposals(proposals)
if self.autoimport is not None:
if self.starting.strip() and '.' not in self.expression:
import_assists = self.autoimport.import_assist(self.starting)
for assist in import_assists:
p = codeassist.CompletionProposal(' : '.join(assist),
'autoimport')
proposals.append(p)
return proposals
def _insert_import(self, name, module):
lineno = self.autoimport.find_insertion_line(self.source)
line = 'from %s import %s' % (module, name)
self.env.insert_line(line, lineno)
def _calculate_prefix(self, proposals):
if not proposals:
return ''
prefix = self.env._completion_text(proposals[0])
for proposal in proposals:
common = 0
name = self.env._completion_text(proposal)
for c1, c2 in zip(prefix, name):
if c1 != c2 or ' ' in (c1, c2):
break
common += 1
prefix = prefix[:common]
return prefix
@property
@utils.cacheit
def offset(self):
return self.env.get_offset()
@property
@utils.cacheit
def source(self):
return self.interface._get_text()
@property
@utils.cacheit
def starting_offset(self):
return codeassist.starting_offset(self.source, self.offset)
@property
@utils.cacheit
def starting(self):
return self.source[self.starting_offset:self.offset]
@property
@utils.cacheit
def expression(self):
return codeassist.starting_expression(self.source, self.offset)
@property
def autoimport(self):
return self.interface.autoimport
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
To run these tests against a live database:
1. Modify the file `keystone/tests/backend_sql.conf` to use the connection for
your live database
2. Set up a blank, live database.
3. run the tests using
./run_tests.sh -N test_sql_upgrade
WARNING::
Your database will be wiped.
Do not do this against a Database with valuable data as
all data will be lost.
"""
import copy
import json
import uuid
from migrate.versioning import api as versioning_api
import sqlalchemy
from keystone.common import sql
from keystone.common.sql import migration
from keystone.common import utils
from keystone import config
from keystone import credential
from keystone import exception
from keystone import tests
from keystone.tests import default_fixtures
CONF = config.CONF
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
class SqlMigrateBase(tests.TestCase):
def initialize_sql(self):
self.metadata = sqlalchemy.MetaData()
self.metadata.bind = self.engine
_config_file_list = [tests.etcdir('keystone.conf.sample'),
tests.testsdir('test_overrides.conf'),
tests.testsdir('backend_sql.conf')]
#override this to specify the complete list of configuration files
def config_files(self):
return self._config_file_list
def repo_package(self):
return None
def setUp(self):
super(SqlMigrateBase, self).setUp()
self.config(self.config_files())
self.base = sql.Base()
# create and share a single sqlalchemy engine for testing
self.engine = self.base.get_engine(allow_global_engine=False)
sql.core.set_global_engine(self.engine)
self.Session = self.base.get_sessionmaker(engine=self.engine,
autocommit=False)
self.initialize_sql()
self.repo_path = migration.find_migrate_repo(self.repo_package())
self.schema = versioning_api.ControlledSchema.create(
self.engine,
self.repo_path, 0)
# auto-detect the highest available schema version in the migrate_repo
self.max_version = self.schema.repository.version().version
def tearDown(self):
sqlalchemy.orm.session.Session.close_all()
table = sqlalchemy.Table("migrate_version", self.metadata,
autoload=True)
self.downgrade(0)
table.drop(self.engine, checkfirst=True)
sql.core.set_global_engine(None)
super(SqlMigrateBase, self).tearDown()
def select_table(self, name):
table = sqlalchemy.Table(name,
self.metadata,
autoload=True)
s = sqlalchemy.select([table])
return s
def assertTableExists(self, table_name):
try:
self.select_table(table_name)
except sqlalchemy.exc.NoSuchTableError:
raise AssertionError('Table "%s" does not exist' % table_name)
def assertTableDoesNotExist(self, table_name):
"""Asserts that a given table exists cannot be selected by name."""
# Switch to a different metadata otherwise you might still
# detect renamed or dropped tables
try:
temp_metadata = sqlalchemy.MetaData()
temp_metadata.bind = self.engine
sqlalchemy.Table(table_name, temp_metadata, autoload=True)
except sqlalchemy.exc.NoSuchTableError:
pass
else:
raise AssertionError('Table "%s" already exists' % table_name)
def upgrade(self, *args, **kwargs):
self._migrate(*args, **kwargs)
def downgrade(self, *args, **kwargs):
self._migrate(*args, downgrade=True, **kwargs)
def _migrate(self, version, repository=None, downgrade=False,
current_schema=None):
repository = repository or self.repo_path
err = ''
version = versioning_api._migrate_version(self.schema,
version,
not downgrade,
err)
if not current_schema:
current_schema = self.schema
changeset = current_schema.changeset(version)
for ver, change in changeset:
self.schema.runchange(ver, change, changeset.step)
self.assertEqual(self.schema.version, version)
def assertTableColumns(self, table_name, expected_cols):
"""Asserts that the table contains the expected set of columns."""
self.initialize_sql()
table = self.select_table(table_name)
actual_cols = [col.name for col in table.columns]
self.assertEqual(expected_cols, actual_cols, '%s table' % table_name)
class SqlUpgradeTests(SqlMigrateBase):
def test_blank_db_to_start(self):
self.assertTableDoesNotExist('user')
def test_start_version_0(self):
version = migration.db_version()
self.assertEqual(version, 0, "DB is at version 0")
def test_two_steps_forward_one_step_back(self):
"""You should be able to cleanly undo and re-apply all upgrades.
Upgrades are run in the following order::
0 -> 1 -> 0 -> 1 -> 2 -> 1 -> 2 -> 3 -> 2 -> 3 ...
^---------^ ^---------^ ^---------^
"""
for x in range(1, self.max_version + 1):
self.upgrade(x)
self.downgrade(x - 1)
self.upgrade(x)
def test_upgrade_add_initial_tables(self):
self.upgrade(1)
self.assertTableColumns("user", ["id", "name", "extra"])
self.assertTableColumns("tenant", ["id", "name", "extra"])
self.assertTableColumns("role", ["id", "name"])
self.assertTableColumns("user_tenant_membership",
["user_id", "tenant_id"])
self.assertTableColumns("metadata", ["user_id", "tenant_id", "data"])
self.populate_user_table()
def test_upgrade_add_policy(self):
self.upgrade(5)
self.assertTableDoesNotExist('policy')
self.upgrade(6)
self.assertTableExists('policy')
self.assertTableColumns('policy', ['id', 'type', 'blob', 'extra'])
def test_upgrade_normalize_identity(self):
self.upgrade(8)
self.populate_user_table()
self.populate_tenant_table()
self.upgrade(10)
self.assertTableColumns("user",
["id", "name", "extra",
"password", "enabled"])
self.assertTableColumns("tenant",
["id", "name", "extra", "description",
"enabled"])
self.assertTableColumns("role", ["id", "name", "extra"])
self.assertTableColumns("user_tenant_membership",
["user_id", "tenant_id"])
self.assertTableColumns("metadata", ["user_id", "tenant_id", "data"])
session = self.Session()
user_table = sqlalchemy.Table("user",
self.metadata,
autoload=True)
a_user = session.query(user_table).filter("id='foo'").one()
self.assertTrue(a_user.enabled)
a_user = session.query(user_table).filter("id='badguy'").one()
self.assertFalse(a_user.enabled)
tenant_table = sqlalchemy.Table("tenant",
self.metadata,
autoload=True)
a_tenant = session.query(tenant_table).filter("id='baz'").one()
self.assertEqual(a_tenant.description, 'description')
session.commit()
session.close()
def test_upgrade_user_tenant_membership_to_metadata(self):
self.upgrade(16)
self.assertTableColumns(
'user_project_membership',
['user_id', 'tenant_id'])
user = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': 'default',
'extra': json.dumps({}),
}
project = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': 'default',
'extra': json.dumps({}),
}
metadata = {
'user_id': user['id'],
'tenant_id': project['id'],
}
session = self.Session()
self.insert_dict(session, 'user', user)
self.insert_dict(session, 'project', project)
self.insert_dict(session, 'user_project_membership', metadata)
self.upgrade(17)
user_project_metadata_table = sqlalchemy.Table(
'user_project_metadata', self.metadata, autoload=True)
result = session.query(user_project_metadata_table).one()
self.assertEqual(result.user_id, user['id'])
self.assertEqual(result.project_id, project['id'])
self.assertEqual(
json.loads(result.data),
{'roles': [CONF.member_role_id]})
def test_normalized_enabled_states(self):
self.upgrade(8)
users = {
'bool_enabled_user': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'extra': json.dumps({'enabled': True})},
'bool_disabled_user': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'extra': json.dumps({'enabled': False})},
'str_enabled_user': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'extra': json.dumps({'enabled': 'True'})},
'str_disabled_user': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'extra': json.dumps({'enabled': 'False'})},
'int_enabled_user': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'extra': json.dumps({'enabled': 1})},
'int_disabled_user': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'extra': json.dumps({'enabled': 0})},
'null_enabled_user': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'extra': json.dumps({'enabled': None})},
'unset_enabled_user': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'extra': json.dumps({})}}
session = self.Session()
for user in users.values():
self.insert_dict(session, 'user', user)
session.commit()
self.upgrade(10)
user_table = sqlalchemy.Table('user', self.metadata, autoload=True)
q = session.query(user_table, 'enabled')
user = q.filter_by(id=users['bool_enabled_user']['id']).one()
self.assertTrue(user.enabled)
user = q.filter_by(id=users['bool_disabled_user']['id']).one()
self.assertFalse(user.enabled)
user = q.filter_by(id=users['str_enabled_user']['id']).one()
self.assertTrue(user.enabled)
user = q.filter_by(id=users['str_disabled_user']['id']).one()
self.assertFalse(user.enabled)
user = q.filter_by(id=users['int_enabled_user']['id']).one()
self.assertTrue(user.enabled)
user = q.filter_by(id=users['int_disabled_user']['id']).one()
self.assertFalse(user.enabled)
user = q.filter_by(id=users['null_enabled_user']['id']).one()
self.assertTrue(user.enabled)
user = q.filter_by(id=users['unset_enabled_user']['id']).one()
self.assertTrue(user.enabled)
def test_downgrade_10_to_8(self):
self.upgrade(10)
self.populate_user_table(with_pass_enab=True)
self.populate_tenant_table(with_desc_enab=True)
self.downgrade(8)
self.assertTableColumns('user',
['id', 'name', 'extra'])
self.assertTableColumns('tenant',
['id', 'name', 'extra'])
session = self.Session()
user_table = sqlalchemy.Table("user",
self.metadata,
autoload=True)
a_user = session.query(user_table).filter("id='badguy'").one()
self.assertEqual(a_user.name, default_fixtures.USERS[2]['name'])
tenant_table = sqlalchemy.Table("tenant",
self.metadata,
autoload=True)
a_tenant = session.query(tenant_table).filter("id='baz'").one()
self.assertEqual(a_tenant.name, default_fixtures.TENANTS[1]['name'])
session.commit()
session.close()
def test_upgrade_endpoints(self):
self.upgrade(10)
service_extra = {
'name': uuid.uuid4().hex,
}
service = {
'id': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'extra': json.dumps(service_extra),
}
endpoint_extra = {
'publicurl': uuid.uuid4().hex,
'internalurl': uuid.uuid4().hex,
'adminurl': uuid.uuid4().hex,
}
endpoint = {
'id': uuid.uuid4().hex,
'region': uuid.uuid4().hex,
'service_id': service['id'],
'extra': json.dumps(endpoint_extra),
}
session = self.Session()
self.insert_dict(session, 'service', service)
self.insert_dict(session, 'endpoint', endpoint)
session.commit()
session.close()
self.upgrade(13)
self.assertTableColumns(
'service',
['id', 'type', 'extra'])
self.assertTableColumns(
'endpoint',
['id', 'legacy_endpoint_id', 'interface', 'region', 'service_id',
'url', 'extra'])
endpoint_table = sqlalchemy.Table(
'endpoint', self.metadata, autoload=True)
session = self.Session()
self.assertEqual(session.query(endpoint_table).count(), 3)
for interface in ['public', 'internal', 'admin']:
q = session.query(endpoint_table)
q = q.filter_by(legacy_endpoint_id=endpoint['id'])
q = q.filter_by(interface=interface)
ref = q.one()
self.assertNotEqual(ref.id, endpoint['id'])
self.assertEqual(ref.legacy_endpoint_id, endpoint['id'])
self.assertEqual(ref.interface, interface)
self.assertEqual(ref.region, endpoint['region'])
self.assertEqual(ref.service_id, endpoint['service_id'])
self.assertEqual(ref.url, endpoint_extra['%surl' % interface])
self.assertEqual(ref.extra, '{}')
session.commit()
session.close()
def assertTenantTables(self):
self.assertTableExists('tenant')
self.assertTableExists('user_tenant_membership')
self.assertTableDoesNotExist('project')
self.assertTableDoesNotExist('user_project_membership')
def assertProjectTables(self):
self.assertTableExists('project')
self.assertTableExists('user_project_membership')
self.assertTableDoesNotExist('tenant')
self.assertTableDoesNotExist('user_tenant_membership')
def test_upgrade_tenant_to_project(self):
self.upgrade(14)
self.assertTenantTables()
self.upgrade(15)
self.assertProjectTables()
def test_downgrade_project_to_tenant(self):
# TODO(henry-nash): Debug why we need to re-load the tenant
# or user_tenant_membership ahead of upgrading to project
# in order for the assertProjectTables to work on sqlite
# (MySQL is fine without it)
self.upgrade(14)
self.assertTenantTables()
self.upgrade(15)
self.assertProjectTables()
self.downgrade(14)
self.assertTenantTables()
def test_upgrade_add_group_tables(self):
self.upgrade(13)
self.upgrade(14)
self.assertTableExists('group')
self.assertTableExists('group_project_metadata')
self.assertTableExists('group_domain_metadata')
self.assertTableExists('user_group_membership')
def test_upgrade_14_to_16(self):
self.upgrade(14)
self.populate_user_table(with_pass_enab=True)
self.populate_tenant_table(with_desc_enab=True)
self.upgrade(16)
self.assertTableColumns("user",
["id", "name", "extra",
"password", "enabled", "domain_id"])
session = self.Session()
user_table = sqlalchemy.Table("user",
self.metadata,
autoload=True)
a_user = session.query(user_table).filter("id='foo'").one()
self.assertTrue(a_user.enabled)
self.assertEqual(a_user.domain_id, DEFAULT_DOMAIN_ID)
a_user = session.query(user_table).filter("id='badguy'").one()
self.assertEqual(a_user.name, default_fixtures.USERS[2]['name'])
self.assertEqual(a_user.domain_id, DEFAULT_DOMAIN_ID)
project_table = sqlalchemy.Table("project",
self.metadata,
autoload=True)
a_project = session.query(project_table).filter("id='baz'").one()
self.assertEqual(a_project.description,
default_fixtures.TENANTS[1]['description'])
self.assertEqual(a_project.domain_id, DEFAULT_DOMAIN_ID)
session.commit()
session.close()
self.check_uniqueness_constraints()
def test_downgrade_16_to_14(self):
self.upgrade(16)
self.populate_user_table(with_pass_enab_domain=True)
self.populate_tenant_table(with_desc_enab_domain=True)
self.downgrade(14)
self.assertTableColumns("user",
["id", "name", "extra",
"password", "enabled"])
session = self.Session()
user_table = sqlalchemy.Table("user",
self.metadata,
autoload=True)
a_user = session.query(user_table).filter("id='foo'").one()
self.assertTrue(a_user.enabled)
a_user = session.query(user_table).filter("id='badguy'").one()
self.assertEqual(a_user.name, default_fixtures.USERS[2]['name'])
tenant_table = sqlalchemy.Table("tenant",
self.metadata,
autoload=True)
a_tenant = session.query(tenant_table).filter("id='baz'").one()
self.assertEqual(a_tenant.description,
default_fixtures.TENANTS[1]['description'])
session.commit()
session.close()
def test_downgrade_remove_group_tables(self):
self.upgrade(14)
self.downgrade(13)
self.assertTableDoesNotExist('group')
self.assertTableDoesNotExist('group_project_metadata')
self.assertTableDoesNotExist('group_domain_metadata')
self.assertTableDoesNotExist('user_group_membership')
def test_downgrade_endpoints(self):
self.upgrade(13)
service_extra = {
'name': uuid.uuid4().hex,
}
service = {
'id': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'extra': json.dumps(service_extra),
}
common_endpoint_attrs = {
'legacy_endpoint_id': uuid.uuid4().hex,
'region': uuid.uuid4().hex,
'service_id': service['id'],
'extra': json.dumps({}),
}
endpoints = {
'public': {
'id': uuid.uuid4().hex,
'interface': 'public',
'url': uuid.uuid4().hex,
},
'internal': {
'id': uuid.uuid4().hex,
'interface': 'internal',
'url': uuid.uuid4().hex,
},
'admin': {
'id': uuid.uuid4().hex,
'interface': 'admin',
'url': uuid.uuid4().hex,
},
}
session = self.Session()
self.insert_dict(session, 'service', service)
for endpoint in endpoints.values():
endpoint.update(common_endpoint_attrs)
self.insert_dict(session, 'endpoint', endpoint)
session.commit()
session.close()
self.downgrade(9)
self.assertTableColumns(
'service',
['id', 'type', 'extra'])
self.assertTableColumns(
'endpoint',
['id', 'region', 'service_id', 'extra'])
endpoint_table = sqlalchemy.Table(
'endpoint', self.metadata, autoload=True)
session = self.Session()
self.assertEqual(session.query(endpoint_table).count(), 1)
q = session.query(endpoint_table)
q = q.filter_by(id=common_endpoint_attrs['legacy_endpoint_id'])
ref = q.one()
self.assertEqual(ref.id, common_endpoint_attrs['legacy_endpoint_id'])
self.assertEqual(ref.region, endpoint['region'])
self.assertEqual(ref.service_id, endpoint['service_id'])
extra = json.loads(ref.extra)
for interface in ['public', 'internal', 'admin']:
expected_url = endpoints[interface]['url']
self.assertEqual(extra['%surl' % interface], expected_url)
session.commit()
session.close()
def insert_dict(self, session, table_name, d):
"""Naively inserts key-value pairs into a table, given a dictionary."""
this_table = sqlalchemy.Table(table_name, self.metadata, autoload=True)
insert = this_table.insert()
insert.execute(d)
session.commit()
def test_upgrade_31_to_32(self):
self.upgrade(32)
user_table = self.select_table("user")
self.assertEqual(user_table.c.name.type.length, 255)
def test_upgrade_34_to_35(self):
self.upgrade(35)
quota_table = self.select_table("quota")
self.assertEquals(type(quota_table.c.resource.type),
sqlalchemy.types.VARCHAR)
def test_downgrade_32_to_31(self):
self.upgrade(32)
session = self.Session()
# NOTE(aloga): we need a different metadata object
user_table = sqlalchemy.Table('user',
sqlalchemy.MetaData(),
autoload=True,
autoload_with=self.engine)
user_id = uuid.uuid4().hex
ins = user_table.insert().values(
{'id': user_id,
'name': 'a' * 255,
'password': uuid.uuid4().hex,
'enabled': True,
'domain_id': DEFAULT_DOMAIN_ID,
'extra': '{}'})
session.execute(ins)
session.commit()
self.downgrade(31)
# Check that username has been truncated
q = session.query(user_table.c.name)
q = q.filter(user_table.c.id == user_id)
r = q.one()
user_name = r[0]
self.assertEqual(len(user_name), 64)
user_table = self.select_table("user")
self.assertEqual(user_table.c.name.type.length, 64)
def test_downgrade_to_0(self):
self.upgrade(self.max_version)
if self.engine.name == 'mysql':
self._mysql_check_all_tables_innodb()
self.downgrade(0)
for table_name in ["user", "token", "role", "user_tenant_membership",
"metadata"]:
self.assertTableDoesNotExist(table_name)
def test_upgrade_add_domain_tables(self):
self.upgrade(6)
self.assertTableDoesNotExist('credential')
self.assertTableDoesNotExist('domain')
self.assertTableDoesNotExist('user_domain_metadata')
self.upgrade(7)
self.assertTableExists('credential')
self.assertTableColumns('credential', ['id', 'user_id', 'project_id',
'blob', 'type', 'extra'])
self.assertTableExists('domain')
self.assertTableColumns('domain', ['id', 'name', 'enabled', 'extra'])
self.assertTableExists('user_domain_metadata')
self.assertTableColumns('user_domain_metadata',
['user_id', 'domain_id', 'data'])
def test_metadata_table_migration(self):
# Scaffolding
session = self.Session()
self.upgrade(16)
domain_table = sqlalchemy.Table('domain', self.metadata, autoload=True)
user_table = sqlalchemy.Table('user', self.metadata, autoload=True)
role_table = sqlalchemy.Table('role', self.metadata, autoload=True)
project_table = sqlalchemy.Table(
'project', self.metadata, autoload=True)
metadata_table = sqlalchemy.Table(
'metadata', self.metadata, autoload=True)
# Create a Domain
domain = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True}
session.execute(domain_table.insert().values(domain))
# Create a Project
project = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain['id'],
'extra': "{}"}
session.execute(project_table.insert().values(project))
# Create another Project
project2 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain['id'],
'extra': "{}"}
session.execute(project_table.insert().values(project2))
# Create a User
user = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain['id'],
'password': uuid.uuid4().hex,
'enabled': True,
'extra': json.dumps({})}
session.execute(user_table.insert().values(user))
# Create a Role
role = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
session.execute(role_table.insert().values(role))
# And another role
role2 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
session.execute(role_table.insert().values(role2))
# Grant Role to User
role_grant = {'user_id': user['id'],
'tenant_id': project['id'],
'data': json.dumps({"roles": [role['id']]})}
session.execute(metadata_table.insert().values(role_grant))
role_grant = {'user_id': user['id'],
'tenant_id': project2['id'],
'data': json.dumps({"roles": [role2['id']]})}
session.execute(metadata_table.insert().values(role_grant))
# Create another user to test the case where member_role_id is already
# assigned.
user2 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain['id'],
'password': uuid.uuid4().hex,
'enabled': True,
'extra': json.dumps({})}
session.execute(user_table.insert().values(user2))
# Grant CONF.member_role_id to User2
role_grant = {'user_id': user2['id'],
'tenant_id': project['id'],
'data': json.dumps({"roles": [CONF.member_role_id]})}
session.execute(metadata_table.insert().values(role_grant))
session.commit()
self.upgrade(17)
user_project_metadata_table = sqlalchemy.Table(
'user_project_metadata', self.metadata, autoload=True)
s = sqlalchemy.select([metadata_table.c.data]).where(
(metadata_table.c.user_id == user['id']) &
(metadata_table.c.tenant_id == project['id']))
r = session.execute(s)
test_project1 = json.loads(r.fetchone()['data'])
self.assertEqual(len(test_project1['roles']), 1)
self.assertIn(role['id'], test_project1['roles'])
# Test user in project2 has role2
s = sqlalchemy.select([metadata_table.c.data]).where(
(metadata_table.c.user_id == user['id']) &
(metadata_table.c.tenant_id == project2['id']))
r = session.execute(s)
test_project2 = json.loads(r.fetchone()['data'])
self.assertEqual(len(test_project2['roles']), 1)
self.assertIn(role2['id'], test_project2['roles'])
# Test for user in project has role in user_project_metadata
# Migration 17 does not properly migrate this data, so this should
# be None.
s = sqlalchemy.select([user_project_metadata_table.c.data]).where(
(user_project_metadata_table.c.user_id == user['id']) &
(user_project_metadata_table.c.project_id == project['id']))
r = session.execute(s)
self.assertIsNone(r.fetchone())
# Create a conflicting user-project in user_project_metadata with
# a different role
data = json.dumps({"roles": [role2['id']]})
role_grant = {'user_id': user['id'],
'project_id': project['id'],
'data': data}
cmd = user_project_metadata_table.insert().values(role_grant)
self.engine.execute(cmd)
# Create another conflicting user-project for User2
data = json.dumps({"roles": [role2['id']]})
role_grant = {'user_id': user2['id'],
'project_id': project['id'],
'data': data}
cmd = user_project_metadata_table.insert().values(role_grant)
self.engine.execute(cmd)
# End Scaffolding
session.commit()
# Migrate to 20
self.upgrade(20)
# The user-project pairs should have all roles from the previous
# metadata table in addition to any roles currently in
# user_project_metadata
s = sqlalchemy.select([user_project_metadata_table.c.data]).where(
(user_project_metadata_table.c.user_id == user['id']) &
(user_project_metadata_table.c.project_id == project['id']))
r = session.execute(s)
role_ids = json.loads(r.fetchone()['data'])['roles']
self.assertEqual(len(role_ids), 3)
self.assertIn(CONF.member_role_id, role_ids)
self.assertIn(role['id'], role_ids)
self.assertIn(role2['id'], role_ids)
# pairs that only existed in old metadata table should be in
# user_project_metadata
s = sqlalchemy.select([user_project_metadata_table.c.data]).where(
(user_project_metadata_table.c.user_id == user['id']) &
(user_project_metadata_table.c.project_id == project2['id']))
r = session.execute(s)
role_ids = json.loads(r.fetchone()['data'])['roles']
self.assertEqual(len(role_ids), 2)
self.assertIn(CONF.member_role_id, role_ids)
self.assertIn(role2['id'], role_ids)
self.assertTableDoesNotExist('metadata')
def test_upgrade_default_roles(self):
def count_member_roles():
session = self.Session()
query_string = ("select count(*) as c from role "
"where name='%s'" % config.CONF.member_role_name)
role_count = session.execute(query_string).fetchone()['c']
session.close()
return role_count
self.upgrade(16)
self.assertEqual(0, count_member_roles())
self.upgrade(17)
self.assertEqual(1, count_member_roles())
self.downgrade(16)
self.assertEqual(0, count_member_roles())
def check_uniqueness_constraints(self):
# Check uniqueness constraints for User & Project tables are
# correct following schema modification. The Group table's
# schema is never modified, so we don't bother to check that.
domain_table = sqlalchemy.Table('domain',
self.metadata,
autoload=True)
domain1 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True}
domain2 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True}
cmd = domain_table.insert().values(domain1)
self.engine.execute(cmd)
cmd = domain_table.insert().values(domain2)
self.engine.execute(cmd)
# First, the User table.
this_table = sqlalchemy.Table('user',
self.metadata,
autoload=True)
user = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain1['id'],
'password': uuid.uuid4().hex,
'enabled': True,
'extra': json.dumps({})}
cmd = this_table.insert().values(user)
self.engine.execute(cmd)
# now insert a user with the same name into a different
# domain - which should work.
user['id'] = uuid.uuid4().hex
user['domain_id'] = domain2['id']
cmd = this_table.insert().values(user)
self.engine.execute(cmd)
# TODO(henry-nash): For now, as part of clean-up we delete one of these
# users. Although not part of this test, unless we do so the
# downgrade(16->15) that is part of teardown with fail due to having
# two uses with clashing name as we try to revert to a single global
# name space. This limitation is raised as Bug #1125046 and the delete
# could be removed depending on how that bug is resolved.
cmd = this_table.delete(id=user['id'])
self.engine.execute(cmd)
# Now, the Project table.
this_table = sqlalchemy.Table('project',
self.metadata,
autoload=True)
project = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain1['id'],
'description': uuid.uuid4().hex,
'enabled': True,
'extra': json.dumps({})}
cmd = this_table.insert().values(project)
self.engine.execute(cmd)
# now insert a project with the same name into a different
# domain - which should work.
project['id'] = uuid.uuid4().hex
project['domain_id'] = domain2['id']
cmd = this_table.insert().values(project)
self.engine.execute(cmd)
# TODO(henry-nash): For now, we delete one of the projects for the same
# reason as we delete one of the users (Bug #1125046). This delete
# could be removed depending on that bug resolution.
cmd = this_table.delete(id=project['id'])
self.engine.execute(cmd)
def test_upgrade_trusts(self):
self.assertEqual(self.schema.version, 0, "DB is at version 0")
self.upgrade(20)
self.assertTableColumns("token",
["id", "expires", "extra", "valid"])
self.upgrade(21)
self.assertTableColumns("trust",
["id", "trustor_user_id",
"trustee_user_id",
"project_id", "impersonation",
"deleted_at",
"expires_at", "extra"])
self.assertTableColumns("trust_role",
["trust_id", "role_id"])
self.assertTableColumns("token",
["id", "expires", "extra", "valid",
"trust_id", "user_id"])
def test_fixup_role(self):
session = self.Session()
self.assertEqual(self.schema.version, 0, "DB is at version 0")
self.upgrade(1)
self.insert_dict(session, "role", {"id": "test", "name": "test"})
self.upgrade(18)
self.insert_dict(session, "role", {"id": "test2",
"name": "test2",
"extra": None})
r = session.execute('select count(*) as c from role '
'where extra is null')
self.assertEqual(r.fetchone()['c'], 2)
session.commit()
self.upgrade(19)
r = session.execute('select count(*) as c from role '
'where extra is null')
self.assertEqual(r.fetchone()['c'], 0)
def test_legacy_endpoint_id(self):
session = self.Session()
self.upgrade(21)
service = {
'id': uuid.uuid4().hex,
'name': 'keystone',
'type': 'identity'}
self.insert_dict(session, 'service', service)
legacy_endpoint_id = uuid.uuid4().hex
endpoint = {
'id': uuid.uuid4().hex,
'service_id': service['id'],
'interface': uuid.uuid4().hex[:8],
'url': uuid.uuid4().hex,
'extra': json.dumps({
'legacy_endpoint_id': legacy_endpoint_id})}
self.insert_dict(session, 'endpoint', endpoint)
session.commit()
self.upgrade(22)
endpoint_table = sqlalchemy.Table(
'endpoint', self.metadata, autoload=True)
self.assertEqual(session.query(endpoint_table).count(), 1)
ref = session.query(endpoint_table).one()
self.assertEqual(ref.id, endpoint['id'], ref)
self.assertEqual(ref.service_id, endpoint['service_id'])
self.assertEqual(ref.interface, endpoint['interface'])
self.assertEqual(ref.url, endpoint['url'])
self.assertEqual(ref.legacy_endpoint_id, legacy_endpoint_id)
self.assertEqual(ref.extra, '{}')
def test_group_project_FK_fixup(self):
# To create test data we must start before we broke in the
# group_project_metadata table in 015.
self.upgrade(14)
session = self.Session()
domain_table = sqlalchemy.Table('domain', self.metadata, autoload=True)
group_table = sqlalchemy.Table('group', self.metadata, autoload=True)
tenant_table = sqlalchemy.Table('tenant', self.metadata, autoload=True)
role_table = sqlalchemy.Table('role', self.metadata, autoload=True)
group_project_metadata_table = sqlalchemy.Table(
'group_project_metadata', self.metadata, autoload=True)
# Create a Domain
domain = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True}
session.execute(domain_table.insert().values(domain))
# Create two Tenants
tenant = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'extra': "{}"}
session.execute(tenant_table.insert().values(tenant))
tenant1 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'extra': "{}"}
session.execute(tenant_table.insert().values(tenant1))
# Create a Group
group = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain['id'],
'extra': json.dumps({})}
session.execute(group_table.insert().values(group))
# Create roles
role_list = []
for _ in range(2):
role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
session.execute(role_table.insert().values(role))
role_list.append(role)
# Grant Role to User on Project
role_grant = {'group_id': group['id'],
'project_id': tenant['id'],
'data': json.dumps({'roles': [role_list[0]['id']]})}
session.execute(
group_project_metadata_table.insert().values(role_grant))
role_grant = {'group_id': group['id'],
'project_id': tenant1['id'],
'data': json.dumps({'roles': [role_list[1]['id']]})}
session.execute(
group_project_metadata_table.insert().values(role_grant))
session.commit()
# Now upgrade and fix up the FKs
self.upgrade(28)
self.assertTableExists('group_project_metadata')
self.assertTableExists('project')
self.assertTableDoesNotExist('tenant')
s = sqlalchemy.select([group_project_metadata_table.c.data]).where(
(group_project_metadata_table.c.group_id == group['id']) &
(group_project_metadata_table.c.project_id == tenant['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn(role_list[0]['id'], data['roles'])
s = sqlalchemy.select([group_project_metadata_table.c.data]).where(
(group_project_metadata_table.c.group_id == group['id']) &
(group_project_metadata_table.c.project_id == tenant1['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn(role_list[1]['id'], data['roles'])
self.downgrade(27)
self.assertTableExists('group_project_metadata')
self.assertTableExists('project')
self.assertTableDoesNotExist('tenant')
def test_assignment_metadata_migration(self):
self.upgrade(28)
# Scaffolding
session = self.Session()
domain_table = sqlalchemy.Table('domain', self.metadata, autoload=True)
user_table = sqlalchemy.Table('user', self.metadata, autoload=True)
group_table = sqlalchemy.Table('group', self.metadata, autoload=True)
role_table = sqlalchemy.Table('role', self.metadata, autoload=True)
project_table = sqlalchemy.Table(
'project', self.metadata, autoload=True)
user_project_metadata_table = sqlalchemy.Table(
'user_project_metadata', self.metadata, autoload=True)
user_domain_metadata_table = sqlalchemy.Table(
'user_domain_metadata', self.metadata, autoload=True)
group_project_metadata_table = sqlalchemy.Table(
'group_project_metadata', self.metadata, autoload=True)
group_domain_metadata_table = sqlalchemy.Table(
'group_domain_metadata', self.metadata, autoload=True)
# Create a Domain
domain = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True}
session.execute(domain_table.insert().values(domain))
# Create anther Domain
domain2 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True}
session.execute(domain_table.insert().values(domain2))
# Create a Project
project = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain['id'],
'extra': "{}"}
session.execute(project_table.insert().values(project))
# Create another Project
project2 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain['id'],
'extra': "{}"}
session.execute(project_table.insert().values(project2))
# Create a User
user = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain['id'],
'password': uuid.uuid4().hex,
'enabled': True,
'extra': json.dumps({})}
session.execute(user_table.insert().values(user))
# Create a Group
group = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain['id'],
'extra': json.dumps({})}
session.execute(group_table.insert().values(group))
# Create roles
role_list = []
for _ in range(7):
role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
session.execute(role_table.insert().values(role))
role_list.append(role)
# Grant Role to User on Project
role_grant = {'user_id': user['id'],
'project_id': project['id'],
'data': json.dumps({'roles': [role_list[0]['id']]})}
session.execute(
user_project_metadata_table.insert().values(role_grant))
role_grant = {'user_id': user['id'],
'project_id': project2['id'],
'data': json.dumps({'roles': [role_list[1]['id']]})}
session.execute(
user_project_metadata_table.insert().values(role_grant))
# Grant Role to Group on different Project
role_grant = {'group_id': group['id'],
'project_id': project2['id'],
'data': json.dumps({'roles': [role_list[2]['id']]})}
session.execute(
group_project_metadata_table.insert().values(role_grant))
# Grant Role to User on Domain
role_grant = {'user_id': user['id'],
'domain_id': domain['id'],
'data': json.dumps({'roles': [role_list[3]['id']]})}
session.execute(user_domain_metadata_table.insert().values(role_grant))
# Grant Role to Group on Domain
role_grant = {'group_id': group['id'],
'domain_id': domain['id'],
'data': json.dumps(
{'roles': [role_list[4]['id']],
'other': 'somedata'})}
session.execute(
group_domain_metadata_table.insert().values(role_grant))
session.commit()
self.upgrade(29)
s = sqlalchemy.select([user_project_metadata_table.c.data]).where(
(user_project_metadata_table.c.user_id == user['id']) &
(user_project_metadata_table.c.project_id == project['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn({'id': role_list[0]['id']}, data['roles'])
s = sqlalchemy.select([user_project_metadata_table.c.data]).where(
(user_project_metadata_table.c.user_id == user['id']) &
(user_project_metadata_table.c.project_id == project2['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn({'id': role_list[1]['id']}, data['roles'])
s = sqlalchemy.select([group_project_metadata_table.c.data]).where(
(group_project_metadata_table.c.group_id == group['id']) &
(group_project_metadata_table.c.project_id == project2['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn({'id': role_list[2]['id']}, data['roles'])
s = sqlalchemy.select([user_domain_metadata_table.c.data]).where(
(user_domain_metadata_table.c.user_id == user['id']) &
(user_domain_metadata_table.c.domain_id == domain['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn({'id': role_list[3]['id']}, data['roles'])
s = sqlalchemy.select([group_domain_metadata_table.c.data]).where(
(group_domain_metadata_table.c.group_id == group['id']) &
(group_domain_metadata_table.c.domain_id == domain['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn({'id': role_list[4]['id']}, data['roles'])
self.assertIn('other', data)
# Now add an entry that has one regular and one inherited role
role_grant = {'user_id': user['id'],
'domain_id': domain2['id'],
'data': json.dumps(
{'roles': [{'id': role_list[5]['id']},
{'id': role_list[6]['id'],
'inherited_to': 'projects'}]})}
session.execute(user_domain_metadata_table.insert().values(role_grant))
session.commit()
self.downgrade(28)
s = sqlalchemy.select([user_project_metadata_table.c.data]).where(
(user_project_metadata_table.c.user_id == user['id']) &
(user_project_metadata_table.c.project_id == project['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn(role_list[0]['id'], data['roles'])
s = sqlalchemy.select([user_project_metadata_table.c.data]).where(
(user_project_metadata_table.c.user_id == user['id']) &
(user_project_metadata_table.c.project_id == project2['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn(role_list[1]['id'], data['roles'])
s = sqlalchemy.select([group_project_metadata_table.c.data]).where(
(group_project_metadata_table.c.group_id == group['id']) &
(group_project_metadata_table.c.project_id == project2['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn(role_list[2]['id'], data['roles'])
s = sqlalchemy.select([user_domain_metadata_table.c.data]).where(
(user_domain_metadata_table.c.user_id == user['id']) &
(user_domain_metadata_table.c.domain_id == domain['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn(role_list[3]['id'], data['roles'])
s = sqlalchemy.select([group_domain_metadata_table.c.data]).where(
(group_domain_metadata_table.c.group_id == group['id']) &
(group_domain_metadata_table.c.domain_id == domain['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn(role_list[4]['id'], data['roles'])
self.assertIn('other', data)
# For user-domain2, where we had one regular and one inherited role,
# only the direct role should remain, the inherited role should
# have been deleted during the downgrade
s = sqlalchemy.select([user_domain_metadata_table.c.data]).where(
(user_domain_metadata_table.c.user_id == user['id']) &
(user_domain_metadata_table.c.domain_id == domain2['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn(role_list[5]['id'], data['roles'])
def test_drop_credential_constraint(self):
ec2_credential = {
'id': '100',
'user_id': 'foo',
'project_id': 'bar',
'type': 'ec2',
'blob': json.dumps({
"access": "12345",
"secret": "12345"
})
}
user = {
'id': 'foo',
'name': 'FOO',
'password': 'foo2',
'enabled': True,
'email': '[email protected]',
'extra': json.dumps({'enabled': True})
}
tenant = {
'id': 'bar',
'name': 'BAR',
'description': 'description',
'enabled': True,
'extra': json.dumps({'enabled': True})
}
session = self.Session()
self.upgrade(7)
self.insert_dict(session, 'user', user)
self.insert_dict(session, 'tenant', tenant)
self.insert_dict(session, 'credential', ec2_credential)
session.commit()
self.upgrade(30)
cred_table = sqlalchemy.Table('credential',
self.metadata,
autoload=True)
cred = session.query(cred_table).filter("id='100'").one()
self.assertEqual(cred.user_id,
ec2_credential['user_id'])
def test_drop_credential_indexes(self):
self.upgrade(31)
table = sqlalchemy.Table('credential', self.metadata, autoload=True)
self.assertEqual(len(table.indexes), 0)
def test_downgrade_30(self):
self.upgrade(31)
self.downgrade(30)
table = sqlalchemy.Table('credential', self.metadata, autoload=True)
index_data = [(idx.name, idx.columns.keys())
for idx in table.indexes]
if self.engine.name == 'mysql':
self.assertIn(('user_id', ['user_id']), index_data)
self.assertIn(('credential_project_id_fkey', ['project_id']),
index_data)
else:
self.assertEqual(len(index_data), 0)
def test_migrate_ec2_credential(self):
user = {
'id': 'foo',
'name': 'FOO',
'password': 'foo2',
'enabled': True,
'email': '[email protected]',
'extra': json.dumps({'enabled': True})
}
project = {
'id': 'bar',
'name': 'BAR',
'description': 'description',
'enabled': True,
'extra': json.dumps({'enabled': True})
}
ec2_credential = {
'access': uuid.uuid4().hex,
'secret': uuid.uuid4().hex,
'user_id': user['id'],
'tenant_id': project['id'],
}
session = self.Session()
self.upgrade(7)
self.insert_dict(session, 'ec2_credential', ec2_credential)
self.insert_dict(session, 'user', user)
self.insert_dict(session, 'tenant', project)
self.upgrade(33)
self.assertTableDoesNotExist('ec2_credential')
cred_table = sqlalchemy.Table('credential',
self.metadata,
autoload=True)
expected_credential_id = utils.hash_access_key(
ec2_credential['access'])
cred = session.query(cred_table).filter_by(
id=expected_credential_id).one()
self.assertEqual(cred.user_id, ec2_credential['user_id'])
self.assertEqual(cred.project_id, ec2_credential['tenant_id'])
# test list credential using credential manager.
credential_api = credential.Manager()
self.assertNotEmpty(credential_api.
list_credentials(
user_id=ec2_credential['user_id']))
self.downgrade(32)
session.commit()
self.assertTableExists('ec2_credential')
ec2_cred_table = sqlalchemy.Table('ec2_credential',
self.metadata,
autoload=True)
ec2_cred = session.query(ec2_cred_table).filter_by(
access=ec2_credential['access']).one()
self.assertEqual(ec2_cred.user_id, ec2_credential['user_id'])
def test_migrate_ec2_credential_with_conflict_project(self):
user = {
'id': 'foo',
'name': 'FOO',
'password': 'foo2',
'enabled': True,
'email': '[email protected]',
'extra': json.dumps({'enabled': True})
}
project_1 = {
'id': 'bar',
'name': 'BAR',
'description': 'description',
'enabled': True,
'extra': json.dumps({'enabled': True})
}
project_2 = {
'id': 'baz',
'name': 'BAZ',
'description': 'description',
'enabled': True,
'extra': json.dumps({'enabled': True})
}
ec2_credential = {
'access': uuid.uuid4().hex,
'secret': uuid.uuid4().hex,
'user_id': user['id'],
'tenant_id': project_1['id'],
}
blob = {'access': ec2_credential['access'],
'secret': ec2_credential['secret']}
v3_credential = {
'id': utils.hash_access_key(ec2_credential['access']),
'user_id': user['id'],
# set the project id to simulate a conflict
'project_id': project_2['id'],
'blob': json.dumps(blob),
'type': 'ec2',
'extra': json.dumps({})
}
session = self.Session()
self.upgrade(7)
self.insert_dict(session, 'ec2_credential', ec2_credential)
self.insert_dict(session, 'user', user)
self.insert_dict(session, 'tenant', project_1)
self.insert_dict(session, 'tenant', project_2)
self.upgrade(32)
self.insert_dict(session, 'credential', v3_credential)
self.assertRaises(exception.Conflict, self.upgrade, 33)
def test_migrate_ec2_credential_with_conflict_secret(self):
user = {
'id': 'foo',
'name': 'FOO',
'password': 'foo2',
'enabled': True,
'email': '[email protected]',
'extra': json.dumps({'enabled': True})
}
project_1 = {
'id': 'bar',
'name': 'BAR',
'description': 'description',
'enabled': True,
'extra': json.dumps({'enabled': True})
}
project_2 = {
'id': 'baz',
'name': 'BAZ',
'description': 'description',
'enabled': True,
'extra': json.dumps({'enabled': True})
}
ec2_credential = {
'access': uuid.uuid4().hex,
'secret': uuid.uuid4().hex,
'user_id': user['id'],
'tenant_id': project_1['id'],
}
blob = {'access': ec2_credential['access'],
'secret': 'different secret'}
v3_cred_different_secret = {
'id': utils.hash_access_key(ec2_credential['access']),
'user_id': user['id'],
'project_id': project_1['id'],
'blob': json.dumps(blob),
'type': 'ec2',
'extra': json.dumps({})
}
session = self.Session()
self.upgrade(7)
self.insert_dict(session, 'ec2_credential', ec2_credential)
self.insert_dict(session, 'user', user)
self.insert_dict(session, 'tenant', project_1)
self.insert_dict(session, 'tenant', project_2)
self.upgrade(32)
self.insert_dict(session, 'credential', v3_cred_different_secret)
self.assertRaises(exception.Conflict, self.upgrade, 33)
def test_migrate_ec2_credential_with_invalid_blob(self):
user = {
'id': 'foo',
'name': 'FOO',
'password': 'foo2',
'enabled': True,
'email': '[email protected]',
'extra': json.dumps({'enabled': True})
}
project_1 = {
'id': 'bar',
'name': 'BAR',
'description': 'description',
'enabled': True,
'extra': json.dumps({'enabled': True})
}
project_2 = {
'id': 'baz',
'name': 'BAZ',
'description': 'description',
'enabled': True,
'extra': json.dumps({'enabled': True})
}
ec2_credential = {
'access': uuid.uuid4().hex,
'secret': uuid.uuid4().hex,
'user_id': user['id'],
'tenant_id': project_1['id'],
}
blob = '{"abc":"def"d}'
v3_cred_invalid_blob = {
'id': utils.hash_access_key(ec2_credential['access']),
'user_id': user['id'],
'project_id': project_1['id'],
'blob': json.dumps(blob),
'type': 'ec2',
'extra': json.dumps({})
}
session = self.Session()
self.upgrade(7)
self.insert_dict(session, 'ec2_credential', ec2_credential)
self.insert_dict(session, 'user', user)
self.insert_dict(session, 'tenant', project_1)
self.insert_dict(session, 'tenant', project_2)
self.upgrade(32)
self.insert_dict(session, 'credential', v3_cred_invalid_blob)
self.assertRaises(exception.ValidationError, self.upgrade, 33)
def test_migrate_add_default_project_id_column_upgrade(self):
user1 = {
'id': 'foo1',
'name': 'FOO1',
'password': 'foo2',
'enabled': True,
'email': '[email protected]',
'extra': json.dumps({'tenantId': 'bar'}),
'domain_id': DEFAULT_DOMAIN_ID
}
user2 = {
'id': 'foo2',
'name': 'FOO2',
'password': 'foo2',
'enabled': True,
'email': '[email protected]',
'extra': json.dumps({'tenant_id': 'bar'}),
'domain_id': DEFAULT_DOMAIN_ID
}
user3 = {
'id': 'foo3',
'name': 'FOO3',
'password': 'foo2',
'enabled': True,
'email': '[email protected]',
'extra': json.dumps({'default_project_id': 'bar'}),
'domain_id': DEFAULT_DOMAIN_ID
}
user4 = {
'id': 'foo4',
'name': 'FOO4',
'password': 'foo2',
'enabled': True,
'email': '[email protected]',
'extra': json.dumps({'tenantId': 'baz',
'default_project_id': 'bar'}),
'domain_id': DEFAULT_DOMAIN_ID
}
session = self.Session()
self.upgrade(33)
self.insert_dict(session, 'user', user1)
self.insert_dict(session, 'user', user2)
self.insert_dict(session, 'user', user3)
self.insert_dict(session, 'user', user4)
self.assertTableColumns('user',
['id', 'name', 'extra', 'password',
'enabled', 'domain_id'])
session.commit()
session.close()
self.upgrade(34)
session = self.Session()
self.assertTableColumns('user',
['id', 'name', 'extra', 'password',
'enabled', 'domain_id', 'default_project_id'])
user_table = sqlalchemy.Table('user', self.metadata, autoload=True)
updated_user1 = session.query(user_table).filter_by(id='foo1').one()
old_json_data = json.loads(user1['extra'])
new_json_data = json.loads(updated_user1.extra)
self.assertNotIn('tenantId', new_json_data)
self.assertEqual(old_json_data['tenantId'],
updated_user1.default_project_id)
updated_user2 = session.query(user_table).filter_by(id='foo2').one()
old_json_data = json.loads(user2['extra'])
new_json_data = json.loads(updated_user2.extra)
self.assertNotIn('tenant_id', new_json_data)
self.assertEqual(old_json_data['tenant_id'],
updated_user2.default_project_id)
updated_user3 = session.query(user_table).filter_by(id='foo3').one()
old_json_data = json.loads(user3['extra'])
new_json_data = json.loads(updated_user3.extra)
self.assertNotIn('default_project_id', new_json_data)
self.assertEqual(old_json_data['default_project_id'],
updated_user3.default_project_id)
updated_user4 = session.query(user_table).filter_by(id='foo4').one()
old_json_data = json.loads(user4['extra'])
new_json_data = json.loads(updated_user4.extra)
self.assertNotIn('default_project_id', new_json_data)
self.assertNotIn('tenantId', new_json_data)
self.assertEqual(old_json_data['default_project_id'],
updated_user4.default_project_id)
def test_migrate_add_default_project_id_column_downgrade(self):
user1 = {
'id': 'foo1',
'name': 'FOO1',
'password': 'foo2',
'enabled': True,
'email': '[email protected]',
'extra': json.dumps({}),
'default_project_id': 'bar',
'domain_id': DEFAULT_DOMAIN_ID
}
self.upgrade(34)
session = self.Session()
self.insert_dict(session, 'user', user1)
self.assertTableColumns('user',
['id', 'name', 'extra', 'password',
'enabled', 'domain_id', 'default_project_id'])
session.commit()
session.close()
self.downgrade(33)
session = self.Session()
self.assertTableColumns('user',
['id', 'name', 'extra', 'password',
'enabled', 'domain_id'])
user_table = sqlalchemy.Table('user', self.metadata, autoload=True)
updated_user1 = session.query(user_table).filter_by(id='foo1').one()
new_json_data = json.loads(updated_user1.extra)
self.assertIn('tenantId', new_json_data)
self.assertIn('default_project_id', new_json_data)
self.assertEqual(user1['default_project_id'],
new_json_data['tenantId'])
self.assertEqual(user1['default_project_id'],
new_json_data['default_project_id'])
self.assertEqual(user1['default_project_id'],
new_json_data['tenant_id'])
def populate_user_table(self, with_pass_enab=False,
with_pass_enab_domain=False):
# Populate the appropriate fields in the user
# table, depending on the parameters:
#
# Default: id, name, extra
# pass_enab: Add password, enabled as well
# pass_enab_domain: Add password, enabled and domain as well
#
this_table = sqlalchemy.Table("user",
self.metadata,
autoload=True)
for user in default_fixtures.USERS:
extra = copy.deepcopy(user)
extra.pop('id')
extra.pop('name')
if with_pass_enab:
password = extra.pop('password', None)
enabled = extra.pop('enabled', True)
ins = this_table.insert().values(
{'id': user['id'],
'name': user['name'],
'password': password,
'enabled': bool(enabled),
'extra': json.dumps(extra)})
else:
if with_pass_enab_domain:
password = extra.pop('password', None)
enabled = extra.pop('enabled', True)
extra.pop('domain_id')
ins = this_table.insert().values(
{'id': user['id'],
'name': user['name'],
'domain_id': user['domain_id'],
'password': password,
'enabled': bool(enabled),
'extra': json.dumps(extra)})
else:
ins = this_table.insert().values(
{'id': user['id'],
'name': user['name'],
'extra': json.dumps(extra)})
self.engine.execute(ins)
def populate_tenant_table(self, with_desc_enab=False,
with_desc_enab_domain=False):
# Populate the appropriate fields in the tenant or
# project table, depending on the parameters
#
# Default: id, name, extra
# desc_enab: Add description, enabled as well
# desc_enab_domain: Add description, enabled and domain as well,
# plus use project instead of tenant
#
if with_desc_enab_domain:
# By this time tenants are now projects
this_table = sqlalchemy.Table("project",
self.metadata,
autoload=True)
else:
this_table = sqlalchemy.Table("tenant",
self.metadata,
autoload=True)
for tenant in default_fixtures.TENANTS:
extra = copy.deepcopy(tenant)
extra.pop('id')
extra.pop('name')
if with_desc_enab:
desc = extra.pop('description', None)
enabled = extra.pop('enabled', True)
ins = this_table.insert().values(
{'id': tenant['id'],
'name': tenant['name'],
'description': desc,
'enabled': bool(enabled),
'extra': json.dumps(extra)})
else:
if with_desc_enab_domain:
desc = extra.pop('description', None)
enabled = extra.pop('enabled', True)
extra.pop('domain_id')
ins = this_table.insert().values(
{'id': tenant['id'],
'name': tenant['name'],
'domain_id': tenant['domain_id'],
'description': desc,
'enabled': bool(enabled),
'extra': json.dumps(extra)})
else:
ins = this_table.insert().values(
{'id': tenant['id'],
'name': tenant['name'],
'extra': json.dumps(extra)})
self.engine.execute(ins)
def _mysql_check_all_tables_innodb(self):
database = self.engine.url.database
connection = self.engine.connect()
# sanity check
total = connection.execute("SELECT count(*) "
"from information_schema.TABLES "
"where TABLE_SCHEMA='%(database)s'" %
dict(database=database))
self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")
noninnodb = connection.execute("SELECT table_name "
"from information_schema.TABLES "
"where TABLE_SCHEMA='%(database)s' "
"and ENGINE!='InnoDB' "
"and TABLE_NAME!='migrate_version'" %
dict(database=database))
names = [x[0] for x in noninnodb]
self.assertEqual(names, [],
"Non-InnoDB tables exist")
connection.close()
|
|
import functools
import operator
import random
from ..config import using
from ..core import Dummy
from ..domains.algebraicfield import AlgebraicElement
from ..integrals.heurisch import _symbols
from ..ntheory import nextprime
from .modulargcd import (_euclidean_algorithm, _gf_gcdex, _minpoly_from_dense,
_trunc)
from .polyerrors import NotInvertible, UnluckyLeadingCoefficient
from .polyutils import _sort_factors
from .rings import PolynomialRing
from .solvers import solve_lin_sys
# TODO
# ====
# -) efficiency of _factor can be improved for irreducible polynomials if the
# univariate factorization is done before the LC is factored
def _alpha_to_z(f, ring):
r"""
Change the representation of a polynomial over
`\mathbb Q(\alpha)` by replacing the algebraic element `\alpha` by a new
variable `z`.
Parameters
==========
f : PolyElement
polynomial in `\mathbb Q(\alpha)[x_0, \ldots, x_n]`
ring : PolynomialRing
the polynomial ring `\mathbb Q[x_0, \ldots, x_n, z]`
Returns
=======
f_ : PolyElement
polynomial in `\mathbb Q[x_0, \ldots, x_n, z]`
"""
if isinstance(f, AlgebraicElement):
ring = ring.drop(*ring.gens[:-1])
f_ = ring(dict(f.rep))
else:
f_ = ring.zero
for monom, coeff in f.items():
coeff = coeff.rep.all_coeffs()
n = len(coeff)
for i in range(n):
m = monom + (n-i-1,)
if coeff[n - i - 1]:
f_[m] = coeff[n - i - 1]
return f_
def _z_to_alpha(f, ring):
r"""
Change the representation of a polynomial in
`\mathbb Q[x_0, \ldots, x_n, z]` by replacing the variable `z` by the
algebraic element `\alpha` of the given ring
`\mathbb Q(\alpha)[x_0, \ldots, x_n]`.
Parameters
==========
f : PolyElement
polynomial in `\mathbb Q[x_0, \ldots, x_n, z]`
ring : PolynomialRing
the polynomial ring `\mathbb Q(\alpha)[x_0, \ldots, x_n]`
Returns
=======
f_ : PolyElement
polynomial in `\mathbb Q(\alpha)[x_0, \ldots, x_n]`
"""
domain = ring.domain
f_ = ring.zero
for monom, coeff in f.items():
m = monom[:-1]
c = domain([0]*monom[-1] + [domain.domain(coeff)])
if m not in f_:
f_[m] = c
else:
f_[m] += c
return f_
def _distinct_prime_divisors(S, domain):
r"""
Try to find pairwise coprime divisors of all elements of a given list
`S` of integers.
If this fails, ``None`` is returned.
References
==========
* :cite:`Javadi2009factor`
"""
gcd = domain.gcd
divisors = []
for i, s in enumerate(S):
divisors.append(s)
for j in range(i):
g = gcd(divisors[i], divisors[j])
divisors[i] = divisors[i] // g
divisors[j] = divisors[j] // g
g1 = gcd(divisors[i], g)
g2 = gcd(divisors[j], g)
while g1 != 1:
g1 = gcd(divisors[i], g1)
divisors[i] = divisors[i] // g1
while g2 != 1:
g2 = gcd(divisors[j], g2)
divisors[j] = divisors[j] // g2
if divisors[i] == 1 or divisors[j] == 1:
return
return divisors
def _denominator(f):
r"""
Compute the denominator `\mathrm{den}(f)` of a polynomial `f` over
`\mathbb Q`, i.e. the smallest integer such that `\mathrm{den}(f) f` is
a polynomial over `\mathbb Z`.
"""
ring = f.ring.domain.ring
lcm = ring.lcm
den = ring.one
for coeff in f.values():
den = lcm(den, coeff.denominator)
return den
def _monic_associate(f, ring):
r"""
Compute the monic associate of a polynomial `f` over
`\mathbb Q(\alpha)`, which is defined as
.. math ::
\mathrm{den}\left( \frac 1 {\mathrm{lc}(f)} f \right) \cdot \frac 1 {\mathrm{lc}(f)} f.
The result is a polynomial in `\mathbb Z[x_0, \ldots, x_n, z]`.
See also
========
_denominator
_alpha_to_z
"""
qring = ring.clone(domain=ring.domain.field)
f = _alpha_to_z(f.monic(), qring)
f_ = f.clear_denoms()[1].set_ring(ring)
return f_.primitive()[1]
def _leading_coeffs(f, U, gamma, lcfactors, A, D, denoms, divisors):
r"""
Compute the true leading coefficients in `x_0` of the irreducible
factors of a polynomial `f`.
If this fails, ``None`` is returned.
Parameters
==========
f : PolyElement
squarefree polynomial in `Z[x_0, \ldots, x_n, z]`
U : list of PolyElement objects
monic univariate factors of `f(x_0, A)` in `\mathbb Q(\alpha)[x_0]`
gamma : Integer
integer content of `\mathrm{lc}_{x_0}(f)`
lcfactors : list of (PolyElement, Integer) objects
factorization of `\mathrm{lc}_{x_0}(f)` in
`\mathbb Z[x_1, \ldots, x_n, z]`
A : list of Integer objects
the evaluation point `[a_1, \ldots, a_n]`
D : Integer
integral multiple of the defect of `\mathbb Q(\alpha)`
denoms : list of Integer objects
denominators of `\frac 1 {l(A)}` for `l` in ``lcfactors``
divisors : list of Integer objects
pairwise coprime divisors of all elements of ``denoms``
Returns
=======
f : PolyElement
possibly updated polynomial `f`
lcs : list of PolyElement objects
true leading coefficients of the irreducible factors of `f`
U_ : list of PolyElement objects
list of possibly updated monic associates of the univariate factors
`U`
References
==========
* :cite:`Javadi2009factor`
"""
ring = f.ring
domain = ring.domain
symbols = f.ring.symbols
qring = ring.clone(symbols=(symbols[0], symbols[-1]), domain=domain.field)
gcd = domain.gcd
U = [_alpha_to_z(u, qring) for u, _ in U]
denominators = [_denominator(u) for u in U]
omega = D * gamma
m = len(denoms)
for i in range(m):
pi = gcd(omega, divisors[i])
divisors[i] //= pi
if divisors[i] == 1:
raise NotImplementedError
e = []
for dj in denominators:
ej = []
for i in range(m):
eji = 0
g1 = gcd(dj, divisors[i])
while g1 != 1:
eji += 1
dj = dj // g1
g1 = gcd(dj, g1)
ej.append(eji)
e.append(ej)
n = len(denominators)
if any(sum(e[j][i] for j in range(n)) != lcfactors[i][1] for i in range(m)):
return
lcring = ring.drop(0)
lcs = []
for j in range(n):
lj = functools.reduce(operator.mul, [lcfactors[i][0]**e[j][i] for i in range(m)], lcring.one)
lcs.append(lj)
zring = qring.clone(domain=domain)
for j in range(n):
lj = lcs[j]
dj = denominators[j]
ljA = lj.eval(list(zip(lcring.gens, A)))
lcs[j] = lj*dj
U[j] = (U[j]*dj).set_ring(zring) * ljA.set_ring(zring)
if omega == 1:
f *= dj
else:
d = gcd(omega, dj)
f *= (dj // d)
if omega != 1:
lcs[0] *= omega
U[0] *= omega
return f, lcs, U
def _test_evaluation_points(f, gamma, lcfactors, A, D):
r"""
Test if an evaluation point is suitable for _factor.
If it is not, ``None`` is returned.
Parameters
==========
f : PolyElement
squarefree polynomial in `\mathbb Z[x_0, \ldots, x_n, z]`
gamma : Integer
leading coefficient of `f` in `\mathbb Z`
lcfactors : list of (PolyElement, Integer) objects
factorization of `\mathrm{lc}_{x_0}(f)` in
`\mathbb Z[x_1, \ldots, x_n, z]`
A : list of Integer objects
the evaluation point `[a_1, \ldots, a_n]`
D : Integer
integral multiple of the defect of `\mathbb Q(\alpha)`
Returns
=======
fA : PolyElement
`f` evaluated at `A`, i.e. `f(x_0, A)`
denoms : list of Integer objects
the denominators of `\frac 1 {l(A)}` for `l` in ``lcfactors``
divisors : list of Integer objects
pairwise coprime divisors of all elements of ``denoms``
References
==========
* :cite:`Javadi2009factor`
See also
========
_factor
"""
ring = f.ring
qring = ring.clone(domain=ring.domain.field)
fA = f.eval(list(zip(ring.gens[1:-1], A)))
if fA.degree() < f.degree():
return
if not fA.is_squarefree:
return
omega = gamma * D
denoms = []
for l, _ in lcfactors:
lA = l.eval(list(zip(l.ring.gens, A))) # in Q(alpha)
denoms.append(_denominator(_alpha_to_z(lA**(-1), qring)))
if any(denoms.count(denom) > 1 for denom in denoms):
raise UnluckyLeadingCoefficient
divisors = _distinct_prime_divisors(denoms, ring.domain)
if divisors is None:
return
elif any(omega % d == 0 for d in divisors):
return
return fA, denoms, divisors
def _subs_ground(f, A):
r"""
Substitute variables in the coefficients of a polynomial `f` over a
``PolynomialRing``.
"""
f_ = f.ring.zero
for monom, coeff in f.items():
if coeff.compose(A):
f_[monom] = coeff.compose(A)
return f_
def _padic_lift(f, pfactors, lcs, B, minpoly, p):
r"""
Lift the factorization of a polynomial over `\mathbb Z_p[z]/(\mu(z))` to
a factorization over `\mathbb Z_{p^m}[z]/(\mu(z))`, where `p^m \geq B`.
If this fails, ``None`` is returned.
Parameters
==========
f : PolyElement
squarefree polynomial in `\mathbb Z[x_0, \ldots, x_n, z]`
pfactors : list of PolyElement objects
irreducible factors of `f` modulo `p`
lcs : list of PolyElement objects
true leading coefficients in `x_0` of the irreducible factors of `f`
B : Integer
heuristic numerical bound on the size of the largest integer
coefficient in the irreducible factors of `f`
minpoly : PolyElement
minimal polynomial `\mu` of `\alpha` over `\mathbb Q`
p : Integer
prime number
Returns
=======
H : list of PolyElement objects
factorization of `f` modulo `p^m`, where `p^m \geq B`
References
==========
* :cite:`Javadi2009factor`
"""
ring = f.ring
domain = ring.domain
x = ring.gens[0]
tails = [g - g.eject(*ring.gens[1:]).LC.set_ring(ring)*x**g.degree() for g in pfactors]
coeffs = []
for i, g in enumerate(tails):
coeffs += _symbols(f'c{i}', len(g))
coeffring = PolynomialRing(domain, coeffs)
ring_ = ring.clone(domain=coeffring)
S = []
k = 0
for t in tails:
s = ring_.zero
r = len(t)
for i, monom in zip(range(k, k + r), t):
s[monom] = coeffring.gens[i]
S.append(s)
k += r
m = minpoly.set_ring(ring_)
f = f.set_ring(ring_)
x = ring_.gens[0]
H = [t.set_ring(ring_) + li.set_ring(ring_)*x**g.degree() for t, g, li in
zip(tails, pfactors, lcs)]
prod = functools.reduce(operator.mul, H)
e = (f - prod) % m
P = domain(p)
while e and P < 2*B:
poly = e // P
for s, h in zip(S, H):
poly -= (prod//h)*s
poly = _trunc(poly, m, P)
P_domain = domain.finite_ring(P)
try:
solution = solve_lin_sys([_.set_domain(P_domain)
for _ in poly.values()],
coeffring.clone(domain=P_domain))
except NotInvertible:
return
if solution is None:
return
else:
solution = {k.set_domain(domain): v.set_domain(domain).trunc_ground(P)
for k, v in solution.items()}
assert len(solution) == coeffring.ngens
subs = list(solution.items())
H = [h + _subs_ground(s, subs)*P for h, s in zip(H, S)]
P = P**2
prod = functools.reduce(operator.mul, H)
e = (f - prod) % m
if e == 0:
return [h.set_ring(ring) for h in H]
else:
return
def _div(f, g, minpoly, p):
r"""
Division with remainder for univariate polynomials over
`\mathbb Z_p[z]/(\mu(z))`.
"""
ring = f.ring
domain = ring.domain
rem = f
deg = g.degree(0)
lcinv, _, gcd = _gf_gcdex(g.eject(*ring.gens[1:]).LC, minpoly, p)
if not gcd == 1:
raise NotImplementedError
quotient = ring.zero
while True:
degrem = rem.degree(0)
if degrem < deg:
break
m = ring.from_terms([((degrem - deg, 0), domain.one)])
quo = (lcinv * rem.eject(*ring.gens[1:]).LC).set_ring(ring)*m
rem = _trunc(rem - g*quo, minpoly, p)
quotient += quo
return _trunc(quotient, minpoly, p), rem
def _extended_euclidean_algorithm(f, g, minpoly, p):
r"""
Extended Euclidean Algorithm for univariate polynomials over
`\mathbb Z_p[z]/(\mu(z))`.
Returns `s, t, h`, where `h` is the GCD of `f` and `g` and
`sf + tg = h`.
"""
ring = f.ring
zero = ring.zero
one = ring.one
f = _trunc(f, minpoly, p)
g = _trunc(g, minpoly, p)
s0, s1 = zero, one
t0, t1 = one, zero
while g:
result = _div(f, g, minpoly, p)
if result is None:
raise NotImplementedError
quo, rem = result
f, g = g, rem
s0, s1 = s1 - quo*s0, s0
t0, t1 = t1 - quo*t0, t0
lcfinv = _gf_gcdex(f.eject(*ring.gens[1:]).LC, minpoly, p)[0].set_ring(ring)
return (_trunc(s1 * lcfinv, minpoly, p), _trunc(t1 * lcfinv, minpoly, p),
_trunc(f * lcfinv, minpoly, p))
def _diophantine_univariate(F, m, minpoly, p):
r"""
Solve univariate Diophantine equations of the form
.. math ::
\sum_{f \in F} \left( h_f(x) \cdot \prod_{g \in F \setminus \lbrace f \rbrace } g(x) \right) = x^m
over `\mathbb Z_p[z]/(\mu(z))`.
"""
ring = F[0].ring
domain = ring.domain
m = ring.from_terms([((m, 0), domain.one)])
if len(F) == 2:
f, g = F
result = _extended_euclidean_algorithm(g, f, minpoly, p)
if result is None:
raise NotImplementedError
s, t, _ = result
s *= m
t *= m
q, s = _div(s, f, minpoly, p)
t += q*g
s = _trunc(s, minpoly, p)
t = _trunc(t, minpoly, p)
result = [s, t]
else:
G = [F[-1]]
for f in reversed(F[1:-1]):
G.insert(0, f * G[0])
S, T = [], [ring.one]
for f, g in zip(F, G):
result = _diophantine([g, f], T[-1], [], 0, minpoly, p)
if result is None:
raise NotImplementedError
t, s = result
T.append(t)
S.append(s)
result, S = [], S + [T[-1]]
for s, f in zip(S, F):
r = _div(s*m, f, minpoly, p)[1]
s = _trunc(r, minpoly, p)
result.append(s)
return result
def _diophantine(F, c, A, d, minpoly, p):
r"""
Solve multivariate Diophantine equations over `\mathbb Z_p[z]/(\mu(z))`.
"""
ring = c.ring
if not A:
S = [ring.zero for _ in F]
c = _trunc(c, minpoly, p)
for (exp,), coeff in c.eject(1).items():
T = _diophantine_univariate(F, exp, minpoly, p)
if T is None:
raise NotImplementedError
for j, (s, t) in enumerate(zip(S, T)):
S[j] = _trunc(s + t*coeff.set_ring(ring), minpoly, p)
else:
n = len(A)
e = functools.reduce(operator.mul, F)
a, A = A[-1], A[:-1]
B, G = [], []
for f in F:
B.append(e//f)
G.append(f.eval(n, a))
C = c.eval(n, a)
S = _diophantine(G, C, A, d, minpoly, p)
if S is None:
raise NotImplementedError
S = [s.set_ring(ring) for s in S]
for s, b in zip(S, B):
c = c - s*b
c = _trunc(c, minpoly, p)
m = ring.gens[n] - a
M = ring.one
for k in range(d):
if not c:
break
M = M * m
C = c.diff(x=n, m=k + 1).eval(x=n, a=a)
if C:
C = C.quo_ground(ring.domain.factorial(k + 1))
T = _diophantine(G, C, A, d, minpoly, p)
if T is None:
raise NotImplementedError
for i, t in enumerate(T):
T[i] = t.set_ring(ring) * M
for i, (s, t) in enumerate(zip(S, T)):
S[i] = s + t
for t, b in zip(T, B):
c = c - t * b
c = _trunc(c, minpoly, p)
else:
raise NotImplementedError
S = [_trunc(s, minpoly, p) for s in S]
return S
def _hensel_lift(f, H, LC, A, minpoly, p):
r"""
Parallel Hensel lifting algorithm over `\mathbb Z_p[z]/(\mu(z))`.
Parameters
==========
f : PolyElement
squarefree polynomial in `\mathbb Z[x_0, \ldots, x_n, z]`
H : list of PolyElement objects
monic univariate factors of `f(x_0, A)` in
`\mathbb Z[x_0, z]`
LC : list of PolyElement objects
true leading coefficients of the irreducible factors of `f`
A : list of Integer objects
the evaluation point `[a_1, \ldots, a_n]`
p : Integer
prime number
Returns
=======
pfactors : list of PolyElement objects
irreducible factors of `f` modulo `p`
"""
ring = f.ring
n = len(A)
S = [f]
H = list(H)
for i, a in enumerate(reversed(A[1:])):
s = S[0].eval(n - i, a)
S.insert(0, _trunc(s, minpoly, p))
d = max(f.degree(_) for _ in ring.gens[1:-1])
for j, s, a in zip(range(1, n + 1), S, A):
G = list(H)
I, J = A[:j - 1], A[j:]
Hring = f.ring
for _ in range(j, n):
Hring = Hring.drop(j + 1)
x = Hring.gens[0]
evalpoints = list(zip(LC[0].ring.gens[j:-1], J))
for i, (h, lc) in enumerate(zip(H, LC)):
if evalpoints:
lc = lc.eval(evalpoints)
lc = _trunc(lc, minpoly, p).set_ring(Hring)
H[i] = h.set_ring(Hring) + (lc - h.eject(*h.ring.gens[1:]).LC.set_ring(Hring))*x**h.degree()
m = Hring.gens[j] - a
M = Hring.one
c = _trunc(s - functools.reduce(operator.mul, H), minpoly, p)
dj = s.degree(j)
for k in range(dj):
if not c:
break
M = M * m
C = c.diff(x=j, m=k + 1).eval(x=j, a=a)
if C:
C = C.quo_ground(ring.domain.factorial(k + 1)) # coeff of (x_{j-1} - a_{j-1})^(k + 1) in c
T = _diophantine(G, C, I, d, minpoly, p)
if T is None:
raise NotImplementedError
for i, (h, t) in enumerate(zip(H, T)):
H[i] = _trunc(h + t.set_ring(Hring)*M, minpoly, p)
c = _trunc(s - functools.reduce(operator.mul, H), minpoly, p)
prod = functools.reduce(operator.mul, H)
if _trunc(prod, minpoly, p) == f.trunc_ground(p):
return H
def _sqf_p(f, minpoly, p):
r"""
Return ``True`` if `f` is square-free in `\mathbb Z_p[z]/(\mu(z))[x]`.
"""
ring = f.ring
lcinv, *_ = _gf_gcdex(f.eject(*ring.gens[1:]).LC, minpoly, p)
f = _trunc(f * lcinv.set_ring(ring), minpoly, p)
if not f:
return True
else:
return _euclidean_algorithm(f, _trunc(f.diff(0), minpoly, p), minpoly, p) == 1
def _test_prime(fA, D, minpoly, p, domain):
r"""
Test if a prime number is suitable for _factor.
See also
========
_factor
"""
if fA.LC % p == 0 or minpoly.LC % p == 0:
return False
if not _sqf_p(fA, minpoly, p):
return False
if D % p == 0:
return False
return True
# squarefree f with cont_x0(f) = 1
def _factor(f, save):
r"""
Factor a multivariate polynomial `f`, which is squarefree and primitive
in `x_0`, in `\mathbb Q(\alpha)[x_0, \ldots, x_n]`.
References
==========
* :cite:`Javadi2009factor`
"""
ring = f.ring # Q(alpha)[x_0, ..., x_{n-1}]
lcring = ring.drop(0)
uniring = ring.drop(*ring.gens[1:])
ground = ring.domain.domain
n = ring.ngens
z = Dummy('z')
qring = ring.clone(symbols=ring.symbols + (z,), domain=ground)
lcqring = qring.drop(0)
groundring = ground.ring
zring = qring.clone(domain=groundring)
lczring = zring.drop(0)
minpoly = _minpoly_from_dense(ring.domain.mod, zring.drop(*zring.gens[:-1]))
f_ = _monic_associate(f, zring)
if save is True:
D = minpoly.resultant(minpoly.diff(0))
else:
D = groundring.one
# heuristic bound for p-adic lift
B = (f_.max_norm() + 1)*D
lc = f_.eject(*zring.gens[1:]).LC
gamma, lcfactors = efactor(_z_to_alpha(lc, lcring)) # over QQ(alpha)[x_1, ..., x_n]
gamma = ground.convert(gamma)
D_ = gamma.denominator
gamma_ = gamma.numerator
lcfactors_ = []
for l, exp in lcfactors:
den, l_ = _alpha_to_z(l, lcqring).clear_denoms() # l_ in QQ[x_1, ..., x_n, z], but coeffs in ZZ
cont, l_ = l_.set_ring(lczring).primitive()
D_ *= den**exp
gamma_ *= cont**exp
lcfactors_.append((l_, exp))
f_ *= D_
p = 2
N = 0
history = set()
tries = 5 # how big should this be?
while True:
for _ in range(tries):
A = tuple(random.randint(-N, N) for _ in range(n - 1))
if A in history:
continue
history.add(A)
try:
result = _test_evaluation_points(f_, gamma_, lcfactors, A, D)
except UnluckyLeadingCoefficient:
# TODO: check interval
C = [random.randint(1, 3*(N + 1)) for _ in range(n - 1)]
gens = zring.gens
x = gens[0]
for i, ci in zip(range(1, n + 1), C):
xi = gens[i]
f_ = f_.compose(xi, x + xi*ci)
lc, factors = _factor(_z_to_alpha(f_, ring), save)
gens = factors[0].ring.gens
x = gens[0]
for i, ci in zip(range(1, n + 1), C):
xi = gens[i]
factors = [g.compose(xi, (xi - x).quo_ground(ci)) for g in factors]
return (lc, factors)
if result is None:
continue
fA, denoms, divisors = result
with using(aa_factor_method='trager'):
_, fAfactors = _z_to_alpha(fA, uniring).factor_list()
if len(fAfactors) == 1:
g = _z_to_alpha(f_, ring)
return (f.LC, [g.monic()])
result = _leading_coeffs(f_, fAfactors, gamma_, lcfactors_, A, D, denoms, divisors)
if result is None:
continue
f_, lcs, fAfactors_ = result
prod = groundring.one
for lc in lcs:
prod *= lc.LC
delta = (ground(prod, f_.LC)).numerator
f_ *= delta
while not _test_prime(fA, D, minpoly, p, zring.domain):
p = nextprime(p)
pfactors = _hensel_lift(f_, fAfactors_, lcs, A, minpoly, p)
if pfactors is None:
p = nextprime(p)
f_ = f_.primitive()[1]
continue
factors = _padic_lift(f_, pfactors, lcs, B, minpoly, p)
if factors is None:
p = nextprime(p)
f_ = f_.primitive()[1]
B *= B
continue
return (f.LC, [_z_to_alpha(g.primitive()[1], ring).monic() for g in factors])
N += 1
# output of the form (lc, [(poly1, exp1), ...])
def efactor(f, save=True):
r"""Factor a multivariate polynomial `f` in `\mathbb Q(\alpha)[x_0, \ldots, x_n]`.
By default, an estimate of the defect of the algebraic field is included
in all computations. If ``save`` is set to ``False``, the defect will be
treated as one, thus computations are faster. However, if the defect of
`\alpha` is larger than one, this may lead to wrong results.
References
==========
* :cite:`Javadi2009factor`
"""
ring = f.ring
assert ring.domain.is_AlgebraicField
if f.is_ground:
return (f[1], [])
n = ring.ngens
if n == 1:
with using(aa_factor_method='trager'):
return f.factor_list()
else:
cont, f = f.eject(*ring.gens[1:]).primitive()
f = f.inject()
if cont != 1:
lccont, contfactors = efactor(cont)
lc, factors = efactor(f)
contfactors = [(g.set_ring(ring), exp) for g, exp in contfactors]
return (lccont * lc, _sort_factors(contfactors + factors))
# this is only correct because the content in x_0 is already divided out
lc, sqflist = f.sqf_list()
factors = []
for g, exp in sqflist:
lcg, gfactors = _factor(g, save)
lc *= lcg
factors = factors + [(gi, exp) for gi in gfactors]
return (lc, _sort_factors(factors))
|
|
"""
Provide a simple user friendly API to Theano-managed memory.
"""
# Standard imports
from __future__ import absolute_import, print_function, division
import copy
import logging
# Third-party imports
import numpy
# Theano imports
from theano.gof import Container, Variable, generic, utils
_logger = logging.getLogger('theano.compile.sharedvalue')
__docformat__ = 'restructuredtext en'
class SharedVariable(Variable):
"""
Variable that is (defaults to being) shared between functions that
it appears in.
Parameters
----------
name : str
The name for this variable (see `Variable`).
type : str
The type for this variable (see `Variable`).
value
A value to associate with this variable (a new container will be
created).
strict
True : assignments to .value will not be cast or copied, so they must
have the correct type.
allow_downcast
Only applies if `strict` is False.
True : allow assigned value to lose precision when cast during
assignment.
False : never allow precision loss.
None : only allow downcasting of a Python float to a scalar floatX.
container
The container to use for this variable. Illegal to pass this as well as
a value.
Notes
-----
For more user-friendly constructor, see `shared`.
"""
# Container object
container = None
"""
A container to use for this SharedVariable when it is an implicit
function parameter.
:type: `Container`
"""
# default_update
# If this member is present, its value will be used as the "update" for
# this Variable, unless another update value has been passed to "function",
# or the "no_default_updates" list passed to "function" contains it.
def __init__(self, name, type, value, strict,
allow_downcast=None, container=None):
super(SharedVariable, self).__init__(type=type, name=name,
owner=None, index=None)
if container is not None:
self.container = container
if (value is not None) or (strict is not None):
raise TypeError('value and strict are ignored if you pass '
'a container here')
else:
self.container = Container(
self,
storage=[type.filter(value, strict=strict,
allow_downcast=allow_downcast)],
readonly=False,
strict=strict,
allow_downcast=allow_downcast)
def get_value(self, borrow=False, return_internal_type=False):
"""
Get the non-symbolic value associated with this SharedVariable.
Parameters
----------
borrow : bool
True to permit returning of an object aliased to internal memory.
return_internal_type : bool
True to permit the returning of an arbitrary type object used
internally to store the shared variable.
Only with borrow=False and return_internal_type=True does this function
guarantee that you actually get the internal object.
But in that case, you may get different return types when using
different compute devices.
"""
if borrow:
return self.container.value
else:
return copy.deepcopy(self.container.value)
def set_value(self, new_value, borrow=False):
"""
Set the non-symbolic value associated with this SharedVariable.
Parameters
----------
borrow : bool
True to use the new_value directly, potentially creating problems
related to aliased memory.
Changes to this value will be visible to all functions using
this SharedVariable.
"""
if borrow:
self.container.value = new_value
else:
self.container.value = copy.deepcopy(new_value)
def zero(self, borrow=False):
"""
Set the values of a shared variable to 0.
Parameters
----------
borrow : bbol
True to modify the value of a shared variable directly by using
its previous value. Potentially this can cause problems
regarding to the aliased memory.
Changes done with this function will be visible to all functions using
this SharedVariable.
"""
if borrow:
self.container.value[...] = 0
else:
self.container.value = 0 * self.container.value
def clone(self):
cp = self.__class__(
name=self.name,
type=self.type,
value=None,
strict=None,
container=self.container)
cp.tag = copy.copy(self.tag)
return cp
def __getitem__(self, *args):
# __getitem__ is not available for generic SharedVariable objects.
# We raise a TypeError like Python would do if __getitem__ was not
# implemented at all, but with a more explicit error message to help
# Theano users figure out the root of the problem more easily.
value = self.get_value(borrow=True)
if isinstance(value, numpy.ndarray):
# Array probably had an unknown dtype.
msg = ("a Numpy array with dtype: '%s'. This data type is not "
"currently recognized by Theano tensors: please cast "
"your data into a supported numeric type if you need "
"Theano tensor functionalities." % value.dtype)
else:
msg = ('an object of type: %s. Did you forget to cast it into '
'a Numpy array before calling theano.shared()?' %
type(value))
raise TypeError(
"The generic 'SharedVariable' object is not subscriptable. "
"This shared variable contains %s" % msg)
def _value_get(self):
raise Exception("sharedvar.value does not exist anymore. Use "
"sharedvar.get_value() or sharedvar.set_value()"
" instead.")
def _value_set(self, new_value):
raise Exception("sharedvar.value does not exist anymore. Use "
"sharedvar.get_value() or sharedvar.set_value()"
" instead.")
# We keep this just to raise an error
value = property(_value_get, _value_set)
def shared_constructor(ctor, remove=False):
if remove:
shared.constructors.remove(ctor)
else:
shared.constructors.append(ctor)
return ctor
def shared(value, name=None, strict=False, allow_downcast=None, **kwargs):
"""Return a SharedVariable Variable, initialized with a copy or
reference of `value`.
This function iterates over constructor functions to find a
suitable SharedVariable subclass. The suitable one is the first
constructor that accept the given value. See the documentation of
:func:`shared_constructor` for the definition of a contructor
function.
This function is meant as a convenient default. If you want to use a
specific shared variable constructor, consider calling it directly.
``theano.shared`` is a shortcut to this function.
.. attribute:: constructors
A list of shared variable constructors that will be tried in reverse
order.
Notes
-----
By passing kwargs, you effectively limit the set of potential constructors
to those that can accept those kwargs.
Some shared variable have ``borrow`` as extra kwargs.
`See <http://deeplearning.net/software/theano/tutorial/aliasing.\
html#borrowing-when-creating-shared-variables>`_ for details.
Some shared variable have ``broadcastable`` as extra kwargs. As shared
variable shapes can change, all dimensions default to not being
broadcastable, even if ``value`` has a shape of 1 along some dimension.
This parameter allows you to create for example a `row` or `column` 2d
tensor.
"""
try:
if isinstance(value, Variable):
raise TypeError("Shared variable constructor needs numeric "
"values and not symbolic variables.")
for ctor in reversed(shared.constructors):
try:
var = ctor(value, name=name, strict=strict,
allow_downcast=allow_downcast, **kwargs)
utils.add_tag_trace(var)
return var
except TypeError:
continue
# This may happen when kwargs were supplied
# if kwargs were given, the generic_constructor won't be callable.
#
# This was done on purpose, the rationale being that if kwargs
# were supplied, the user didn't want them to be ignored.
except MemoryError as e:
e.args = e.args + ('you might consider'
' using \'theano.shared(..., borrow=True)\'',)
raise
raise TypeError('No suitable SharedVariable constructor could be found.'
' Are you sure all kwargs are supported?'
' We do not support the parameter dtype or type.'
' value="%s". parameters="%s"' %
(value, kwargs))
shared.constructors = []
@shared_constructor
def generic_constructor(value, name=None, strict=False, allow_downcast=None):
"""
SharedVariable Constructor.
"""
return SharedVariable(type=generic, value=value, name=name, strict=strict,
allow_downcast=allow_downcast)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import os
import unittest
import warnings
from six.moves import reload_module as reload
from mock import patch
from mock import MagicMock as Mock
import pyrax
import pyrax.exceptions as exc
import pyrax.utils as utils
from pyrax import fakes
class PyraxInitTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
reload(pyrax)
self.orig_connect_to_cloudservers = pyrax.connect_to_cloudservers
self.orig_connect_to_cloudfiles = pyrax.connect_to_cloudfiles
ctclb = pyrax.connect_to_cloud_loadbalancers
self.orig_connect_to_cloud_loadbalancers = ctclb
self.orig_connect_to_cloud_databases = pyrax.connect_to_cloud_databases
self.orig_get_service_endpoint = pyrax._get_service_endpoint
super(PyraxInitTest, self).__init__(*args, **kwargs)
self.username = "fakeuser"
self.password = "fakeapikey"
self.tenant_id = "faketenantid"
def setUp(self):
self.identity = fakes.FakeIdentity()
vers = pyrax.version.version
pyrax.settings._settings = {
"default": {
"auth_endpoint": "DEFAULT_AUTH",
"region": "DFW",
"encoding": "utf-8",
"http_debug": False,
"identity_class": pyrax.rax_identity.RaxIdentity,
"identity_type": "rax_identity.RaxIdentity",
"keyring_username": "fakeuser",
"tenant_id": None,
"tenant_name": None,
"user_agent": "pyrax/%s" % vers,
"use_servicenet": False,
"verify_ssl": False,
},
"alternate": {
"auth_endpoint": "ALT_AUTH",
"region": "NOWHERE",
"encoding": "utf-8",
"http_debug": False,
"identity_class": pyrax.keystone_identity.KeystoneIdentity,
"identity_type": "keystone_identity.KeystoneIdentity",
"keyring_username": "fakeuser",
"tenant_id": None,
"tenant_name": None,
"user_agent": "pyrax/%s" % vers,
"use_servicenet": False,
"verify_ssl": False,
}}
pyrax.identity = fakes.FakeIdentity()
pyrax.identity.authenticated = True
pyrax.connect_to_cloudservers = Mock()
pyrax.connect_to_cloudfiles = Mock()
pyrax.connect_to_cloud_loadbalancers = Mock()
pyrax.connect_to_cloud_databases = Mock()
pyrax._get_service_endpoint = Mock(return_value="http://example.com/")
pyrax.USER_AGENT = "DUMMY"
def tearDown(self):
pyrax.settings._settings = {}
pyrax.connect_to_cloudservers = self.orig_connect_to_cloudservers
pyrax.connect_to_cloudfiles = self.orig_connect_to_cloudfiles
octclb = self.orig_connect_to_cloud_loadbalancers
pyrax.connect_to_cloud_loadbalancers = octclb
pyrax.connect_to_cloud_databases = self.orig_connect_to_cloud_databases
pyrax._get_service_endpoint = self.orig_get_service_endpoint
def test_require_auth(self):
@pyrax._require_auth
def testfunc():
pass
pyrax.identity.authenticated = True
testfunc()
pyrax.identity.authenticated = False
self.assertRaises(exc.NotAuthenticated, testfunc)
def test_import_identity(self):
sav = pyrax.utils.import_class
cls = utils.random_unicode()
pyrax.utils.import_class = Mock(return_value=cls)
ret = pyrax._import_identity(cls)
self.assertEqual(ret, cls)
pyrax.utils.import_class = sav
def test_import_identity_external(self):
sav = pyrax.utils.import_class
cls = utils.random_unicode()
def fake_import(nm):
if "pyrax.identity." in nm:
raise ImportError()
else:
return nm
pyrax.utils.import_class = fake_import
ret = pyrax._import_identity(cls)
self.assertEqual(ret, cls)
pyrax.utils.import_class = sav
def test_create_context(self):
sav = pyrax._create_identity
pyrax._create_identity = Mock()
id_type = utils.random_unicode()
username = utils.random_unicode()
password = utils.random_unicode()
tenant_id = utils.random_unicode()
tenant_name = utils.random_unicode()
api_key = utils.random_unicode()
verify_ssl = utils.random_unicode()
pyrax.create_context(id_type=id_type, username=username,
password=password, tenant_id=tenant_id,
tenant_name=tenant_name, api_key=api_key,
verify_ssl=verify_ssl)
pyrax._create_identity.assert_called_once_with(id_type=id_type,
username=username, password=password, tenant_id=tenant_id,
tenant_name=tenant_name, api_key=api_key,
verify_ssl=verify_ssl, return_context=True)
pyrax._create_identity = sav
def test_settings_get(self):
def_ep = pyrax.get_setting("auth_endpoint", "default")
alt_ep = pyrax.get_setting("auth_endpoint", "alternate")
self.assertEqual(def_ep, "DEFAULT_AUTH")
self.assertEqual(alt_ep, "ALT_AUTH")
def test_settings_get_from_env(self):
pyrax.settings._settings = {"default": {}}
pyrax.settings.env_dct = {"identity_type": "fake"}
typ = utils.random_unicode()
ident = utils.random_unicode()
sav_env = os.environ
sav_imp = pyrax._import_identity
pyrax._import_identity = Mock(return_value=ident)
os.environ = {"fake": typ}
ret = pyrax.get_setting("identity_class")
pyrax._import_identity = sav_imp
os.environ = sav_env
def test_settings_set_bad_env(self):
key = utils.random_unicode()
val = utils.random_unicode()
self.assertRaises(exc.EnvironmentNotFound, pyrax.settings.set, key,
val, "bad_env")
def test_settings_set_bad_key(self):
key = utils.random_unicode()
val = utils.random_unicode()
self.assertRaises(exc.InvalidSetting, pyrax.settings.set, key, val)
def test_settings_set_region(self):
key = "region"
val = utils.random_unicode()
pyrax.settings.set(key, val)
self.assertEqual(pyrax.get_setting(key), val)
def test_settings_set_region_no_identity(self):
key = "region"
val = utils.random_unicode()
sav = pyrax.identity
pyrax.identity = None
ret = pyrax.settings.set(key, val)
self.assertIsNone(ret)
pyrax.identity = sav
def test_settings_set_verify_ssl(self):
key = "verify_ssl"
val = utils.random_unicode()
pyrax.settings.set(key, val)
self.assertEqual(pyrax.get_setting(key), val)
def test_settings_set_verify_ssl_no_identity(self):
key = "verify_ssl"
val = utils.random_unicode()
sav = pyrax.identity
pyrax.identity = None
ret = pyrax.settings.set(key, val)
self.assertIsNone(ret)
pyrax.identity = sav
def test_read_config(self):
dummy_cfg = fakes.fake_config_file
sav_region = pyrax.default_region
sav_USER_AGENT = pyrax.USER_AGENT
with utils.SelfDeletingTempfile() as cfgfile:
with open(cfgfile, "w") as cfg:
cfg.write(dummy_cfg)
pyrax.settings.read_config(cfgfile)
self.assertEqual(pyrax.get_setting("region"), "FAKE")
self.assertTrue(pyrax.get_setting("user_agent").startswith("FAKE "))
pyrax.default_region = sav_region
pyrax.USER_AGENT = sav_USER_AGENT
def test_read_config_creds(self):
dummy_cfg = fakes.fake_config_file
sav_region = pyrax.default_region
sav_USER_AGENT = pyrax.USER_AGENT
with utils.SelfDeletingTempfile() as cfgfile:
with open(cfgfile, "w") as cfg:
cfg.write(dummy_cfg)
# Add password entry
cfg.write("password = fake\n")
with warnings.catch_warnings(record=True) as warn:
pyrax.settings.read_config(cfgfile)
self.assertEqual(len(warn), 1)
pyrax.default_region = sav_region
pyrax.USER_AGENT = sav_USER_AGENT
def test_read_config_bad(self):
sav_region = pyrax.default_region
dummy_cfg = fakes.fake_config_file
# Test invalid setting
dummy_cfg = dummy_cfg.replace("custom_user_agent", "fake")
sav_USER_AGENT = pyrax.USER_AGENT
with utils.SelfDeletingTempfile() as cfgfile:
with open(cfgfile, "w") as cfg:
cfg.write(dummy_cfg)
pyrax.settings.read_config(cfgfile)
self.assertEqual(pyrax.USER_AGENT, sav_USER_AGENT)
# Test bad file
with utils.SelfDeletingTempfile() as cfgfile:
with open(cfgfile, "w") as cfg:
cfg.write("FAKE")
self.assertRaises(exc.InvalidConfigurationFile,
pyrax.settings.read_config, cfgfile)
pyrax.default_region = sav_region
pyrax.USER_AGENT = sav_USER_AGENT
def test_set_credentials(self):
pyrax.set_credentials(self.username, self.password)
self.assertEqual(pyrax.identity.username, self.username)
self.assertEqual(pyrax.identity.password, self.password)
self.assertTrue(pyrax.identity.authenticated)
def test_set_bad_credentials(self):
self.assertRaises(exc.AuthenticationFailed, pyrax.set_credentials,
"bad", "creds")
self.assertFalse(pyrax.identity.authenticated)
def test_set_credential_file(self):
with utils.SelfDeletingTempfile() as tmpname:
with open(tmpname, "wb") as tmp:
tmp.write("[keystone]\n")
tmp.write("username = %s\n" % self.username)
tmp.write("password = %s\n" % self.password)
tmp.write("tenant_id = %s\n" % self.tenant_id)
pyrax.set_credential_file(tmpname)
self.assertEqual(pyrax.identity.username, self.username)
self.assertEqual(pyrax.identity.password, self.password)
self.assertTrue(pyrax.identity.authenticated)
def test_set_bad_credential_file(self):
with utils.SelfDeletingTempfile() as tmpname:
with open(tmpname, "wb") as tmp:
tmp.write("[keystone]\n")
tmp.write("username = bad\n")
tmp.write("password = creds\n")
tmp.write("tenant_id = stuff\n")
self.assertRaises(exc.AuthenticationFailed,
pyrax.set_credential_file, tmpname)
self.assertFalse(pyrax.identity.authenticated)
def test_keyring_auth_no_module(self):
pyrax.keyring = None
self.assertRaises(exc.KeyringModuleNotInstalled, pyrax.keyring_auth)
def test_keyring_auth_no_username(self):
pyrax.keyring = object()
set_obj = pyrax.settings
env = set_obj.environment
set_obj._settings[env]["keyring_username"] = ""
self.assertRaises(exc.KeyringUsernameMissing, pyrax.keyring_auth)
def test_keyring_auth(self):
class FakeKeyring(object):
pass
fake_keyring = FakeKeyring()
pyrax.keyring = fake_keyring
fake_keyring.get_password = Mock(return_value="fakeapikey")
pyrax.keyring_username = "fakeuser"
pyrax.keyring_auth()
self.assertTrue(pyrax.identity.authenticated)
def test_auth_with_token(self):
pyrax.authenticated = False
tok = utils.random_unicode()
tname = utils.random_unicode()
pyrax.auth_with_token(tok, tenant_name=tname)
self.assertTrue(pyrax.identity.authenticated)
self.assertEqual(pyrax.identity.token, tok)
self.assertEqual(pyrax.identity.tenant_name, tname)
def test_clear_credentials(self):
pyrax.set_credentials(self.username, self.password)
# These next lines are required to test that clear_credentials
# actually resets them to None.
pyrax.cloudservers = object()
pyrax.cloudfiles = object()
pyrax.cloud_loadbalancers = object()
pyrax.cloud_databases = object()
default_region = object()
self.assertTrue(pyrax.identity.authenticated)
self.assertIsNotNone(pyrax.cloudfiles)
pyrax.clear_credentials()
self.assertIsNone(pyrax.identity)
self.assertIsNone(pyrax.cloudservers)
self.assertIsNone(pyrax.cloudfiles)
self.assertIsNone(pyrax.cloud_loadbalancers)
self.assertIsNone(pyrax.cloud_databases)
def test_get_environment(self):
env = pyrax.get_environment()
all_envs = pyrax.list_environments()
self.assertTrue(env in all_envs)
def test_set_environment(self):
env = "alternate"
sav = pyrax.authenticate
pyrax.authenticate = Mock()
pyrax.set_environment(env)
self.assertEqual(pyrax.get_environment(), env)
pyrax.authenticate = sav
def test_set_environment_fail(self):
sav = pyrax.authenticate
pyrax.authenticate = Mock()
env = "doesn't exist"
self.assertRaises(exc.EnvironmentNotFound, pyrax.set_environment, env)
pyrax.authenticate = sav
def test_set_default_region(self):
orig_region = pyrax.default_region
new_region = "test"
pyrax.set_default_region(new_region)
self.assertEqual(pyrax.default_region, new_region)
pyrax.default_region = orig_region
def test_set_identity_type_setting(self):
savtyp = pyrax.get_setting("identity_type")
savcls = pyrax.get_setting("identity_class")
pyrax.set_setting("identity_class", None)
pyrax.set_setting("identity_type", "keystone")
cls = pyrax.get_setting("identity_class")
self.assertEqual(cls, pyrax.keystone_identity.KeystoneIdentity)
pyrax.set_setting("identity_type", savtyp)
pyrax.set_setting("identity_class", savcls)
def test_set_region_setting(self):
ident = pyrax.identity
ident.region = "DFW"
pyrax.set_setting("region", "ORD")
self.assertEqual(ident.region, "DFW")
pyrax.set_setting("region", "LON")
self.assertEqual(ident.region, "LON")
def test_safe_region(self):
# Pass direct
reg = utils.random_unicode()
ret = pyrax._safe_region(reg)
self.assertEqual(reg, ret)
# From config setting
orig_reg = pyrax.get_setting("region")
reg = utils.random_unicode()
pyrax.set_setting("region", reg)
ret = pyrax._safe_region()
self.assertEqual(reg, ret)
# Identity default
pyrax.set_setting("region", None)
orig_defreg = pyrax.identity.get_default_region
reg = utils.random_unicode()
pyrax.identity.get_default_region = Mock(return_value=reg)
ret = pyrax._safe_region()
self.assertEqual(reg, ret)
pyrax.identity.get_default_region = orig_defreg
pyrax.set_setting("region", orig_reg)
def test_safe_region_no_context(self):
reg = None
sav_ident = pyrax.identity
sav_create = pyrax._create_identity
def set_ident():
pyrax.identity = sav_ident
pyrax._create_identity = Mock(side_effect=set_ident)
sav_get = pyrax.settings.get
pyrax.settings.get = Mock(return_value=None)
pyrax.identity = None
ret = pyrax._safe_region(reg)
self.assertIsNotNone(ret)
pyrax._create_identity = sav_create
pyrax.identity = sav_ident
pyrax.settings.get = sav_get
def test_make_agent_name(self):
test_agent = "TEST"
ret = pyrax._make_agent_name(test_agent)
self.assertTrue(ret.endswith(test_agent))
self.assertTrue(ret.startswith(pyrax.USER_AGENT))
def test_connect_to_services(self):
pyrax.connect_to_services()
pyrax.connect_to_cloudservers.assert_called_once_with(region=None)
pyrax.connect_to_cloudfiles.assert_called_once_with(region=None)
pyrax.connect_to_cloud_loadbalancers.assert_called_once_with(
region=None)
pyrax.connect_to_cloud_databases.assert_called_once_with(region=None)
@patch('pyrax.nc.Client', new=fakes.FakeCSClient)
def test_connect_to_cloudservers(self):
pyrax.cloudservers = None
sav = pyrax.connect_to_cloudservers
pyrax.connect_to_cloudservers = self.orig_connect_to_cloudservers
pyrax.cloudservers = pyrax.connect_to_cloudservers()
self.assertIsNotNone(pyrax.cloudservers)
pyrax.connect_to_cloudservers = sav
@patch('pyrax.StorageClient', new=fakes.FakeService)
def test_connect_to_cloudfiles(self):
pyrax.cloudfiles = None
pyrax.connect_to_cloudfiles = self.orig_connect_to_cloudfiles
pyrax.cloudfiles = pyrax.connect_to_cloudfiles(self.identity)
self.assertIsNotNone(pyrax.cloudfiles)
def test_connect_to_cloudfiles_ServiceNet(self):
orig = pyrax.get_setting("use_servicenet")
pyrax.set_setting("use_servicenet", True)
pyrax.cloudfiles = None
pyrax.connect_to_cloudfiles = self.orig_connect_to_cloudfiles
sav = pyrax._create_client
pyrax._create_client = Mock()
cf = pyrax.connect_to_cloudfiles(public=False)
pyrax._create_client.assert_called_once_with(ep_name="object_store",
region=None, public=False)
pyrax.set_setting("use_servicenet", orig)
pyrax._create_client = sav
@patch('pyrax.CloudLoadBalancerClient', new=fakes.FakeService)
def test_connect_to_cloud_loadbalancers(self):
pyrax.cloud_loadbalancers = None
octclb = self.orig_connect_to_cloud_loadbalancers
pyrax.connect_to_cloud_loadbalancers = octclb
pyrax.cloud_loadbalancers = pyrax.connect_to_cloud_loadbalancers()
self.assertIsNotNone(pyrax.cloud_loadbalancers)
@patch('pyrax.CloudDatabaseClient', new=fakes.FakeService)
def test_connect_to_cloud_databases(self):
pyrax.cloud_databases = None
pyrax.connect_to_cloud_databases = self.orig_connect_to_cloud_databases
pyrax.cloud_databases = pyrax.connect_to_cloud_databases()
self.assertIsNotNone(pyrax.cloud_databases)
def test_set_http_debug(self):
pyrax.cloudservers = None
sav = pyrax.connect_to_cloudservers
pyrax.connect_to_cloudservers = self.orig_connect_to_cloudservers
pyrax.cloudservers = pyrax.connect_to_cloudservers()
pyrax.cloudservers.http_log_debug = False
pyrax.set_http_debug(True)
self.assertTrue(pyrax.cloudservers.http_log_debug)
pyrax.set_http_debug(False)
self.assertFalse(pyrax.cloudservers.http_log_debug)
pyrax.connect_to_cloudservers = sav
def test_get_encoding(self):
sav = pyrax.get_setting
pyrax.get_setting = Mock(return_value=None)
enc = pyrax.get_encoding()
self.assertEqual(enc, pyrax.default_encoding)
pyrax.get_setting = sav
def test_import_fail(self):
import __builtin__
sav_import = __builtin__.__import__
def fake_import(nm, *args):
if nm == "identity":
raise ImportError
else:
return sav_import(nm, *args)
__builtin__.__import__ = fake_import
self.assertRaises(ImportError, reload, pyrax)
__builtin__.__import__ = sav_import
reload(pyrax)
if __name__ == "__main__":
unittest.main()
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.rebol
~~~~~~~~~~~~~~~~~~~~~
Lexers for the REBOL and related languages.
:copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Generic, Whitespace
__all__ = ['RebolLexer', 'RedLexer']
class RebolLexer(RegexLexer):
"""
A `REBOL <http://www.rebol.com/>`_ lexer.
.. versionadded:: 1.1
"""
name = 'REBOL'
aliases = ['rebol']
filenames = ['*.r', '*.r3', '*.reb']
mimetypes = ['text/x-rebol']
flags = re.IGNORECASE | re.MULTILINE
escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)'
def word_callback(lexer, match):
word = match.group()
if re.match(".*:$", word):
yield match.start(), Generic.Subheading, word
elif re.match(
r'(native|alias|all|any|as-string|as-binary|bind|bound\?|case|'
r'catch|checksum|comment|debase|dehex|exclude|difference|disarm|'
r'either|else|enbase|foreach|remove-each|form|free|get|get-env|if|'
r'in|intersect|loop|minimum-of|maximum-of|mold|new-line|'
r'new-line\?|not|now|prin|print|reduce|compose|construct|repeat|'
r'reverse|save|script\?|set|shift|switch|throw|to-hex|trace|try|'
r'type\?|union|unique|unless|unprotect|unset|until|use|value\?|'
r'while|compress|decompress|secure|open|close|read|read-io|'
r'write-io|write|update|query|wait|input\?|exp|log-10|log-2|'
r'log-e|square-root|cosine|sine|tangent|arccosine|arcsine|'
r'arctangent|protect|lowercase|uppercase|entab|detab|connected\?|'
r'browse|launch|stats|get-modes|set-modes|to-local-file|'
r'to-rebol-file|encloak|decloak|create-link|do-browser|bind\?|'
r'hide|draw|show|size-text|textinfo|offset-to-caret|'
r'caret-to-offset|local-request-file|rgb-to-hsv|hsv-to-rgb|'
r'crypt-strength\?|dh-make-key|dh-generate-key|dh-compute-key|'
r'dsa-make-key|dsa-generate-key|dsa-make-signature|'
r'dsa-verify-signature|rsa-make-key|rsa-generate-key|'
r'rsa-encrypt)$', word):
yield match.start(), Name.Builtin, word
elif re.match(
r'(add|subtract|multiply|divide|remainder|power|and~|or~|xor~|'
r'minimum|maximum|negate|complement|absolute|random|head|tail|'
r'next|back|skip|at|pick|first|second|third|fourth|fifth|sixth|'
r'seventh|eighth|ninth|tenth|last|path|find|select|make|to|copy\*|'
r'insert|remove|change|poke|clear|trim|sort|min|max|abs|cp|'
r'copy)$', word):
yield match.start(), Name.Function, word
elif re.match(
r'(error|source|input|license|help|install|echo|Usage|with|func|'
r'throw-on-error|function|does|has|context|probe|\?\?|as-pair|'
r'mod|modulo|round|repend|about|set-net|append|join|rejoin|reform|'
r'remold|charset|array|replace|move|extract|forskip|forall|alter|'
r'first+|also|take|for|forever|dispatch|attempt|what-dir|'
r'change-dir|clean-path|list-dir|dirize|rename|split-path|delete|'
r'make-dir|delete-dir|in-dir|confirm|dump-obj|upgrade|what|'
r'build-tag|process-source|build-markup|decode-cgi|read-cgi|'
r'write-user|save-user|set-user-name|protect-system|parse-xml|'
r'cvs-date|cvs-version|do-boot|get-net-info|desktop|layout|'
r'scroll-para|get-face|alert|set-face|uninstall|unfocus|'
r'request-dir|center-face|do-events|net-error|decode-url|'
r'parse-header|parse-header-date|parse-email-addrs|import-email|'
r'send|build-attach-body|resend|show-popup|hide-popup|open-events|'
r'find-key-face|do-face|viewtop|confine|find-window|'
r'insert-event-func|remove-event-func|inform|dump-pane|dump-face|'
r'flag-face|deflag-face|clear-fields|read-net|vbug|path-thru|'
r'read-thru|load-thru|do-thru|launch-thru|load-image|'
r'request-download|do-face-alt|set-font|set-para|get-style|'
r'set-style|make-face|stylize|choose|hilight-text|hilight-all|'
r'unlight-text|focus|scroll-drag|clear-face|reset-face|scroll-face|'
r'resize-face|load-stock|load-stock-block|notify|request|flash|'
r'request-color|request-pass|request-text|request-list|'
r'request-date|request-file|dbug|editor|link-relative-path|'
r'emailer|parse-error)$', word):
yield match.start(), Keyword.Namespace, word
elif re.match(
r'(halt|quit|do|load|q|recycle|call|run|ask|parse|view|unview|'
r'return|exit|break)$', word):
yield match.start(), Name.Exception, word
elif re.match('REBOL$', word):
yield match.start(), Generic.Heading, word
elif re.match("to-.*", word):
yield match.start(), Keyword, word
elif re.match(r'(\+|-|\*|/|//|\*\*|and|or|xor|=\?|=|==|<>|<|>|<=|>=)$',
word):
yield match.start(), Operator, word
elif re.match(r".*\?$", word):
yield match.start(), Keyword, word
elif re.match(r".*\!$", word):
yield match.start(), Keyword.Type, word
elif re.match("'.*", word):
yield match.start(), Name.Variable.Instance, word # lit-word
elif re.match("#.*", word):
yield match.start(), Name.Label, word # issue
elif re.match("%.*", word):
yield match.start(), Name.Decorator, word # file
else:
yield match.start(), Name.Variable, word
tokens = {
'root': [
(r'[^R]+', Comment),
(r'REBOL\s+\[', Generic.Strong, 'script'),
(r'R', Comment)
],
'script': [
(r'\s+', Text),
(r'#"', String.Char, 'char'),
(r'#\{[0-9a-f]*\}', Number.Hex),
(r'2#\{', Number.Hex, 'bin2'),
(r'64#\{[0-9a-z+/=\s]*\}', Number.Hex),
(r'"', String, 'string'),
(r'\{', String, 'string2'),
(r';#+.*\n', Comment.Special),
(r';\*+.*\n', Comment.Preproc),
(r';.*\n', Comment),
(r'%"', Name.Decorator, 'stringFile'),
(r'%[^(^{")\s\[\]]+', Name.Decorator),
(r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money
(r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time
(r'\d+[\-/][0-9a-z]+[\-/]\d+(\/\d+\:\d+((\:\d+)?'
r'([.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date
(r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple
(r'\d+X\d+', Keyword.Constant), # pair
(r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float),
(r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float),
(r'[+-]?\d+(\'\d+)?', Number),
(r'[\[\]()]', Generic.Strong),
(r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url
(r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url
(r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email
(r'comment\s"', Comment, 'commentString1'),
(r'comment\s\{', Comment, 'commentString2'),
(r'comment\s\[', Comment, 'commentBlock'),
(r'comment\s[^(\s{"\[]+', Comment),
(r'/[^(^{")\s/[\]]*', Name.Attribute),
(r'([^(^{")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback),
(r'<[\w:.-]*>', Name.Tag),
(r'<[^(<>\s")]+', Name.Tag, 'tag'),
(r'([^(^{")\s]+)', Text),
],
'string': [
(r'[^(^")]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'"', String, '#pop'),
],
'string2': [
(r'[^(^{})]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'\{', String, '#push'),
(r'\}', String, '#pop'),
],
'stringFile': [
(r'[^(^")]+', Name.Decorator),
(escape_re, Name.Decorator),
(r'\^.', Name.Decorator),
(r'"', Name.Decorator, '#pop'),
],
'char': [
(escape_re + '"', String.Char, '#pop'),
(r'\^."', String.Char, '#pop'),
(r'."', String.Char, '#pop'),
],
'tag': [
(escape_re, Name.Tag),
(r'"', Name.Tag, 'tagString'),
(r'[^(<>\r\n")]+', Name.Tag),
(r'>', Name.Tag, '#pop'),
],
'tagString': [
(r'[^(^")]+', Name.Tag),
(escape_re, Name.Tag),
(r'[(|)]+', Name.Tag),
(r'\^.', Name.Tag),
(r'"', Name.Tag, '#pop'),
],
'tuple': [
(r'(\d+\.)+', Keyword.Constant),
(r'\d+', Keyword.Constant, '#pop'),
],
'bin2': [
(r'\s+', Number.Hex),
(r'([01]\s*){8}', Number.Hex),
(r'\}', Number.Hex, '#pop'),
],
'commentString1': [
(r'[^(^")]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'"', Comment, '#pop'),
],
'commentString2': [
(r'[^(^{})]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'\{', Comment, '#push'),
(r'\}', Comment, '#pop'),
],
'commentBlock': [
(r'\[', Comment, '#push'),
(r'\]', Comment, '#pop'),
(r'"', Comment, "commentString1"),
(r'\{', Comment, "commentString2"),
(r'[^(\[\]"{)]+', Comment),
],
}
def analyse_text(text):
"""
Check if code contains REBOL header and so it probably not R code
"""
if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE):
# The code starts with REBOL header
return 1.0
elif re.search(r'\s*REBOL\s*\[', text, re.IGNORECASE):
# The code contains REBOL header but also some text before it
return 0.5
class RedLexer(RegexLexer):
"""
A `Red-language <http://www.red-lang.org/>`_ lexer.
.. versionadded:: 2.0
"""
name = 'Red'
aliases = ['red', 'red/system']
filenames = ['*.red', '*.reds']
mimetypes = ['text/x-red', 'text/x-red-system']
flags = re.IGNORECASE | re.MULTILINE
escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)'
def word_callback(lexer, match):
word = match.group()
if re.match(".*:$", word):
yield match.start(), Generic.Subheading, word
elif re.match(r'(if|unless|either|any|all|while|until|loop|repeat|'
r'foreach|forall|func|function|does|has|switch|'
r'case|reduce|compose|get|set|print|prin|equal\?|'
r'not-equal\?|strict-equal\?|lesser\?|greater\?|lesser-or-equal\?|'
r'greater-or-equal\?|same\?|not|type\?|stats|'
r'bind|union|replace|charset|routine)$', word):
yield match.start(), Name.Builtin, word
elif re.match(r'(make|random|reflect|to|form|mold|absolute|add|divide|multiply|negate|'
r'power|remainder|round|subtract|even\?|odd\?|and~|complement|or~|xor~|'
r'append|at|back|change|clear|copy|find|head|head\?|index\?|insert|'
r'length\?|next|pick|poke|remove|reverse|select|sort|skip|swap|tail|tail\?|'
r'take|trim|create|close|delete|modify|open|open\?|query|read|rename|'
r'update|write)$', word):
yield match.start(), Name.Function, word
elif re.match(r'(yes|on|no|off|true|false|tab|cr|lf|newline|escape|slash|sp|space|null|'
r'none|crlf|dot|null-byte)$', word):
yield match.start(), Name.Builtin.Pseudo, word
elif re.match(r'(#system-global|#include|#enum|#define|#either|#if|#import|#export|'
r'#switch|#default|#get-definition)$', word):
yield match.start(), Keyword.Namespace, word
elif re.match(r'(system|halt|quit|quit-return|do|load|q|recycle|call|run|ask|parse|'
r'raise-error|return|exit|break|alias|push|pop|probe|\?\?|spec-of|body-of|'
r'quote|forever)$', word):
yield match.start(), Name.Exception, word
elif re.match(r'(action\?|block\?|char\?|datatype\?|file\?|function\?|get-path\?|zero\?|'
r'get-word\?|integer\?|issue\?|lit-path\?|lit-word\?|logic\?|native\?|'
r'op\?|paren\?|path\?|refinement\?|set-path\?|set-word\?|string\?|unset\?|'
r'any-struct\?|none\?|word\?|any-series\?)$', word):
yield match.start(), Keyword, word
elif re.match(r'(JNICALL|stdcall|cdecl|infix)$', word):
yield match.start(), Keyword.Namespace, word
elif re.match("to-.*", word):
yield match.start(), Keyword, word
elif re.match(r'(\+|-\*\*|-|\*\*|//|/|\*|and|or|xor|=\?|===|==|=|<>|<=|>=|'
r'<<<|>>>|<<|>>|<|>%)$', word):
yield match.start(), Operator, word
elif re.match(r".*\!$", word):
yield match.start(), Keyword.Type, word
elif re.match("'.*", word):
yield match.start(), Name.Variable.Instance, word # lit-word
elif re.match("#.*", word):
yield match.start(), Name.Label, word # issue
elif re.match("%.*", word):
yield match.start(), Name.Decorator, word # file
elif re.match(":.*", word):
yield match.start(), Generic.Subheading, word # get-word
else:
yield match.start(), Name.Variable, word
tokens = {
'root': [
(r'[^R]+', Comment),
(r'Red/System\s+\[', Generic.Strong, 'script'),
(r'Red\s+\[', Generic.Strong, 'script'),
(r'R', Comment)
],
'script': [
(r'\s+', Text),
(r'#"', String.Char, 'char'),
(r'#\{[0-9a-f\s]*\}', Number.Hex),
(r'2#\{', Number.Hex, 'bin2'),
(r'64#\{[0-9a-z+/=\s]*\}', Number.Hex),
(r'([0-9a-f]+)(h)((\s)|(?=[\[\]{}"()]))',
bygroups(Number.Hex, Name.Variable, Whitespace)),
(r'"', String, 'string'),
(r'\{', String, 'string2'),
(r';#+.*\n', Comment.Special),
(r';\*+.*\n', Comment.Preproc),
(r';.*\n', Comment),
(r'%"', Name.Decorator, 'stringFile'),
(r'%[^(^{")\s\[\]]+', Name.Decorator),
(r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money
(r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time
(r'\d+[\-/][0-9a-z]+[\-/]\d+(/\d+:\d+((:\d+)?'
r'([\.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date
(r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple
(r'\d+X\d+', Keyword.Constant), # pair
(r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float),
(r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float),
(r'[+-]?\d+(\'\d+)?', Number),
(r'[\[\]()]', Generic.Strong),
(r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url
(r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url
(r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email
(r'comment\s"', Comment, 'commentString1'),
(r'comment\s\{', Comment, 'commentString2'),
(r'comment\s\[', Comment, 'commentBlock'),
(r'comment\s[^(\s{"\[]+', Comment),
(r'/[^(^{^")\s/[\]]*', Name.Attribute),
(r'([^(^{^")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback),
(r'<[\w:.-]*>', Name.Tag),
(r'<[^(<>\s")]+', Name.Tag, 'tag'),
(r'([^(^{")\s]+)', Text),
],
'string': [
(r'[^(^")]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'"', String, '#pop'),
],
'string2': [
(r'[^(^{})]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'\{', String, '#push'),
(r'\}', String, '#pop'),
],
'stringFile': [
(r'[^(^")]+', Name.Decorator),
(escape_re, Name.Decorator),
(r'\^.', Name.Decorator),
(r'"', Name.Decorator, '#pop'),
],
'char': [
(escape_re + '"', String.Char, '#pop'),
(r'\^."', String.Char, '#pop'),
(r'."', String.Char, '#pop'),
],
'tag': [
(escape_re, Name.Tag),
(r'"', Name.Tag, 'tagString'),
(r'[^(<>\r\n")]+', Name.Tag),
(r'>', Name.Tag, '#pop'),
],
'tagString': [
(r'[^(^")]+', Name.Tag),
(escape_re, Name.Tag),
(r'[(|)]+', Name.Tag),
(r'\^.', Name.Tag),
(r'"', Name.Tag, '#pop'),
],
'tuple': [
(r'(\d+\.)+', Keyword.Constant),
(r'\d+', Keyword.Constant, '#pop'),
],
'bin2': [
(r'\s+', Number.Hex),
(r'([01]\s*){8}', Number.Hex),
(r'\}', Number.Hex, '#pop'),
],
'commentString1': [
(r'[^(^")]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'"', Comment, '#pop'),
],
'commentString2': [
(r'[^(^{})]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'\{', Comment, '#push'),
(r'\}', Comment, '#pop'),
],
'commentBlock': [
(r'\[', Comment, '#push'),
(r'\]', Comment, '#pop'),
(r'"', Comment, "commentString1"),
(r'\{', Comment, "commentString2"),
(r'[^(\[\]"{)]+', Comment),
],
}
|
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides various utilities around services."""
from itertools import chain
from random import choice
from sqlalchemy.orm import undefer
from sqlalchemy.orm.attributes import set_committed_value
from sqlalchemy.orm.session import object_session
from sqlalchemy.sql import or_
from aquilon.exceptions_ import ArgumentError, InternalError
from aquilon.aqdb.model import (Host, Cluster, Service, ServiceInstance,
MetaCluster, EsxCluster, Archetype, Personality)
from aquilon.worker.templates import (Plenary, PlenaryCollection,
PlenaryServiceInstanceServer)
class Chooser(object):
"""Helper for choosing services for an object."""
def __new__(cls, dbobj, *args, **kwargs):
if isinstance(dbobj, Host):
chooser = super(Chooser, HostChooser).__new__(HostChooser)
elif isinstance(dbobj, Cluster):
chooser = super(Chooser, ClusterChooser).__new__(ClusterChooser)
else:
# Just assume the consumer invoked the right subclass...
chooser = super(Chooser, cls).__new__(cls)
# Lock the owner in the DB to avoid problems with parallel runs
dbobj.lock_row()
return chooser
# Technically apply_changes is a method, but whatever...
abstract_fields = ["description", "archetype", "personality", "location",
"required_services", "original_service_instances",
"apply_changes"]
def __init__(self, dbobj, logger, required_only=False):
"""Initialize the chooser.
To clear out bindings that are not required, pass in
required_only=True.
Several staging areas and caches are set up within this object.
The general flow is that potential service instance choices
are kept in staging_services (dictionary of service to list of
service instances) and finalized into chosen_services (dictionary
of service to single service instance).
The original state of the object is held in the cache
original_service_instances (dictionary of service to single service
instance).
The instances_bound and instances_unbound lists are populated
after chosen_services with the differences between chosen_services
and original_service_instances.
Subclasses should call this before starting their own
initialization.
"""
self.dbobj = dbobj
self.session = object_session(dbobj)
self.required_only = required_only
self.logger = logger
self.description = self.generate_description()
self.logger.debug("Creating service Chooser for %s", self.description)
# Cache of the service maps
self.mapped_services = {}
# Stores interim service instance lists
self.staging_services = {}
# Report as many errors as possible in one shot
self.errors = []
# Cache the servers backing service instances
self.servers = {}
# Set of service instances with a new client
self.instances_bound = set()
# Set of service instances losing a client
self.instances_unbound = set()
# Track the chosen services
self.chosen_services = {}
# Keep stashed plenaries for rollback purposes
self.plenaries = PlenaryCollection(logger=self.logger)
def generate_description(self):
return str(self.dbobj)
def verify_init(self):
"""This is more of a verify-and-finalize method..."""
for field in self.abstract_fields:
if not hasattr(self, field):
raise InternalError("%s provides no %s field" %
(type(self.dbobj), field))
# This can be tweaked...
if not self.required_only:
for (service, instance) in self.original_service_instances.items():
self.staging_services[service] = [instance]
def error(self, msg, *args, **kwargs):
"""Errors are consolidated so that many can be reported at once."""
formatted = msg % args
self.errors.append(formatted)
self.logger.info(msg, *args, **kwargs)
def set_required(self):
"""Main entry point when setting the required services for a host."""
self.verify_init()
self.prestash_primary()
self.logger.debug("Setting required services")
self.cache_service_maps(self.required_services)
for dbservice in self.required_services:
self.find_service_instances(dbservice)
self.check_errors()
for dbservice in self.required_services:
self.choose_cluster_aligned(dbservice)
self.choose_available_capacity(dbservice)
self.choose_past_use(dbservice)
self.check_errors()
# If this code needs to be made more efficient, this could
# be refactored. We don't always need count_servers()...
# In theory don't always need the loop above, either.
self.count_servers()
for dbservice in self.required_services:
self.reduce_service_instances(dbservice)
self.finalize_service_instances()
self.analyze_changes()
self.stash_services()
self.apply_changes()
self.check_errors()
def set_single(self, service, instance=None, force=False):
"""Use this to update a single service.
If planning to use this method, construct the Chooser with
required_only=False. If required_only is True, all other
bindings will be cleared.
"""
self.verify_init()
self.prestash_primary()
if instance:
self.logger.debug("Setting service %s instance %s",
service.name, instance.name)
self.staging_services[service] = [instance]
else:
self.logger.debug("Setting service %s with auto-bind",
service.name)
self.staging_services[service] = None
self.cache_service_maps([service])
self.find_service_instances(service)
self.check_errors()
self.choose_cluster_aligned(service)
self.choose_available_capacity(service)
self.check_errors()
self.choose_past_use(service)
# If this code needs to be made more efficient, this could
# be refactored. We don't always need count_servers()...
self.count_servers()
self.reduce_service_instances(service)
self.finalize_service_instances()
self.analyze_changes()
if not force and self.instances_bound and self.instances_unbound:
cfg_path = list(self.instances_unbound)[0].cfg_path
self.error("%s is already bound to %s, use unbind "
"to clear first or rebind to force." %
(self.description, cfg_path))
self.check_errors()
self.stash_services()
self.apply_changes()
self.check_errors()
def cache_service_maps(self, dbservices):
self.service_maps = ServiceInstance.get_mapped_instance_cache(
self.personality, self.location, dbservices, self.network)
def find_service_instances(self, dbservice):
"""This finds the "closest" service instances, based on the known maps.
It expects that cache_service_maps has been run.
"""
instances = self.service_maps.get(dbservice, [])
if len(instances) >= 1:
for instance in instances:
self.logger.debug("Found service %s instance %s "
"in the maps.",
instance.service.name, instance.name)
self.staging_services[dbservice] = instances
return
self.error("Could not find a relevant service map for service %s "
"on %s", dbservice.name, self.description)
def check_errors(self):
if self.errors:
raise ArgumentError("\n".join(self.errors))
def choose_cluster_aligned(self, dbservice):
# Only implemented for hosts.
pass
def get_footprint(self, instance):
return 1
def instance_full(self, instance, max_clients, current_clients):
"""Check if the instance is effectively full.
This check is complicated because clusters have a larger impact
than a single host does.
"""
if max_clients is None:
return False
if instance == self.original_service_instances.get(instance.service):
if current_clients > max_clients:
return True
return False
return current_clients + self.get_footprint(instance) > max_clients
def choose_available_capacity(self, dbservice):
"""Verify that the available instances have spare capacity.
Error out if none should be used.
"""
maxed_out_instances = set()
for instance in self.staging_services[dbservice][:]:
max_clients = instance.enforced_max_clients
current_clients = instance.client_count
if self.instance_full(instance, max_clients, current_clients):
self.staging_services[dbservice].remove(instance)
maxed_out_instances.add(instance)
self.logger.debug("Rejected service %s instance %s with "
"max_client value of %s since client_count "
"is %s.",
instance.service.name, instance.name,
max_clients, current_clients)
if len(self.staging_services[dbservice]) < 1:
self.error("The available instances %s for service %s are "
"at full capacity.",
[str(instance.name)
for instance in maxed_out_instances],
dbservice.name)
return
def choose_past_use(self, dbservice):
"""If more than one service instance was found in the maps,
this method checks to see if we can reduce the list to a single
choice by checking to see if any of the instances was already in use.
"""
if len(self.staging_services[dbservice]) > 1 and \
self.original_service_instances.get(dbservice, None) and \
self.original_service_instances[dbservice] in \
self.staging_services[dbservice]:
self.logger.debug("Chose service %s instance %s because "
"of past use.",
dbservice.name,
self.original_service_instances[dbservice])
self.staging_services[dbservice] = [
self.original_service_instances[dbservice]]
return
def count_servers(self, dbservice=None):
"""Get a count of the number of times a server backs
service instances in use by this host.
This method is called both to initialize the count and to update
it as service instances are locked in.
"""
if dbservice:
instance_lists = [self.staging_services[dbservice]]
else:
instance_lists = self.staging_services.values()
for instances in instance_lists:
if len(instances) > 1:
# Ignore any services where an instance has not been chosen.
continue
for host in instances[0].server_hosts:
if self.servers.get(host, None):
self.servers[host] += 1
else:
self.servers[host] = 1
def reduce_service_instances(self, dbservice):
if len(self.staging_services[dbservice]) == 1:
self.count_servers(dbservice)
return
self.choose_affinity(dbservice)
if len(self.staging_services[dbservice]) == 1:
self.count_servers(dbservice)
return
self.choose_least_loaded(dbservice)
if len(self.staging_services[dbservice]) == 1:
self.count_servers(dbservice)
return
self.choose_random(dbservice)
self.count_servers(dbservice)
return
def choose_affinity(self, dbservice):
"""Attempt to choose a service based on server affinity,
also known as stickiness.
This could be extremely complicated when trying to deal with
instances backed by multiple servers. Starting simple.
Count the number of servers backing this instance that
back other instances used by client. Any instance that does
not have the largest count gets tossed.
"""
max_servers = 0
max_instances = None
for instance in self.staging_services[dbservice]:
common_servers = []
self.logger.debug("Checking service %s instance %s servers %s",
instance.service.name, instance.name,
[host.fqdn for host in instance.server_hosts])
for host in instance.server_hosts:
if self.servers.get(host, None):
common_servers.append(host)
if not common_servers:
continue
if len(common_servers) > max_servers:
max_servers = len(common_servers)
max_instances = [instance]
elif len(common_servers) == max_servers:
max_instances.append(instance)
if max_instances and \
len(max_instances) < len(self.staging_services[dbservice]):
for instance in self.staging_services[dbservice]:
if instance not in max_instances:
self.logger.debug("Discounted service %s instance %s "
"due to server affinity (stickiness).",
instance.service.name, instance.name)
self.staging_services[dbservice] = max_instances
def choose_least_loaded(self, dbservice):
"""Choose a service instance based on load."""
least_clients = None
least_loaded = []
for instance in self.staging_services[dbservice]:
client_count = instance.client_count
if not least_loaded or client_count < least_clients:
least_clients = client_count
least_loaded = [instance]
elif client_count == least_clients:
least_loaded.append(instance)
if len(least_loaded) < len(self.staging_services[dbservice]):
for instance in self.staging_services[dbservice]:
if instance not in least_loaded:
self.logger.debug("Discounted service %s instance %s "
"due to load.",
instance.service.name, instance.name)
self.staging_services[dbservice] = least_loaded
def choose_random(self, dbservice):
"""Pick a service instance randomly."""
self.staging_services[dbservice] = [
choice(self.staging_services[dbservice])]
self.logger.debug("Randomly chose service %s instance %s "
"from remaining choices.",
dbservice.name,
self.staging_services[dbservice][0].name)
def finalize_service_instances(self):
"""Fill out the list of chosen services."""
for (service, instances) in self.staging_services.items():
if len(instances) < 1: # pragma: no cover
self.error("Internal Error: Attempt to finalize on "
"service %s without any candidates." %
service.name)
continue
if len(instances) > 1: # pragma: no cover
self.error("Internal Error: Attempt to finalize on "
"service %s with too many candidates %s." %
(service.name,
["service %s instance %s" %
(instance.service.name, instance.name)
for instance in instances]))
self.chosen_services[service] = instances[0]
def analyze_changes(self):
"""Determine what changed."""
for (service, instance) in self.chosen_services.items():
if not self.original_service_instances.get(service, None) or \
self.original_service_instances[service] != instance:
self.instances_bound.add(instance)
for (service, instance) in self.original_service_instances.items():
if not self.chosen_services.get(service, None) or \
self.chosen_services[service] != instance:
self.instances_unbound.add(instance)
def stash_services(self):
for instance in self.instances_bound.union(self.instances_unbound):
plenary = PlenaryServiceInstanceServer(instance, logger=self.logger)
plenary.stash()
self.plenaries.append(plenary)
def flush_changes(self):
self.session.flush()
def get_write_key(self):
return self.plenaries.get_write_key()
def write_plenary_templates(self, locked=False):
self.plenaries.write(locked=locked)
def prestash_primary(self):
pass
def restore_stash(self):
self.plenaries.restore_stash()
def changed_server_fqdns(self):
hosts = set()
for instance in chain(self.instances_bound, self.instances_unbound):
for srv in instance.servers:
# Skip servers that do not have a profile
if not srv.host.personality.archetype.is_compileable:
continue
if (srv.host.branch == self.dbobj.branch and
srv.host.sandbox_author_id == self.dbobj.sandbox_author_id):
hosts.add(str(srv.host.fqdn))
return hosts
class HostChooser(Chooser):
"""Choose services for a host."""
def __init__(self, dbobj, *args, **kwargs):
"""Provide initialization specific for host bindings."""
if not isinstance(dbobj, Host):
raise InternalError("HostChooser can only choose services for "
"hosts, got %r (%s)" % (dbobj, type(dbobj)))
self.dbhost = dbobj
Chooser.__init__(self, dbobj, *args, **kwargs)
self.location = self.dbhost.machine.location
self.archetype = self.dbhost.archetype
self.personality = self.dbhost.personality
# If the primary name is a ReservedName, then it does not have a network
# attribute
if hasattr(self.dbhost.machine.primary_name, 'network'):
self.network = self.dbhost.machine.primary_name.network
else:
self.network = None
# all of them would be self. but that should be optimized
# dbhost.machine.interfaces[x].assignments[y].network
"""Stores interim service instance lists."""
q = self.session.query(Service)
q = q.outerjoin(Service.archetypes)
q = q.reset_joinpoint()
q = q.outerjoin(Service.personalities)
q = q.filter(or_(Archetype.id == self.archetype.id,
Personality.id == self.personality.id))
self.required_services = set(q.all())
self.original_service_instances = {}
"""Cache of any already bound services (keys) and the instance
that was bound (values).
"""
q = self.session.query(ServiceInstance)
q = q.options(undefer('_client_count'))
q = q.filter(ServiceInstance.clients.contains(self.dbhost))
set_committed_value(self.dbhost, 'services_used', q.all())
for si in self.dbhost.services_used:
self.original_service_instances[si.service] = si
self.logger.debug("%s original binding: %s",
self.description, si.cfg_path)
self.cluster_aligned_services = {}
if self.dbhost.cluster:
# Note that cluster services are currently ignored unless
# they are otherwise required by the archetype/personality.
for si in self.dbhost.cluster.service_bindings:
self.cluster_aligned_services[si.service] = si
for service in self.dbhost.cluster.required_services:
if service not in self.cluster_aligned_services:
# Don't just error here because the error() call
# has not yet been set up. Will error out later.
self.cluster_aligned_services[service] = None
# Went back and forth on this... deciding not to force
# an aligned service as required. This should give
# flexibility for multiple services to be aligned for
# a cluster type without being forced on all the
# personalities.
#self.required_services.add(item.service)
if self.dbhost.cluster.metacluster:
mc = self.dbhost.cluster.metacluster
for si in mc.service_bindings:
if si.service in self.cluster_aligned_services:
cas = self.cluster_aligned_services[si.service]
if cas == None:
# Error out later.
continue
self.logger.client_info(
"Replacing {0.name} instance with {1.name} "
"(bound to {2:l}) for service {3.name}".format(
cas, si, mc, si.service))
self.cluster_aligned_services[si.service] = si
for service in mc.required_services:
if service not in self.cluster_aligned_services:
# Don't just error here because the error() call
# has not yet been set up. Will error out later.
self.cluster_aligned_services[service] = None
def generate_description(self):
return format(self.dbhost)
def choose_cluster_aligned(self, dbservice):
if dbservice not in self.cluster_aligned_services:
return
if not self.cluster_aligned_services[dbservice]:
self.error("No instance set for %s aligned service %s."
" Please run `make cluster --cluster %s` to resolve.",
format(self.dbhost.cluster),
dbservice.name,
self.dbhost.cluster.name)
return
# This check is necessary to prevent bind_client from overriding
# the cluster's binding. The error message will be misleading...
if self.cluster_aligned_services[dbservice] not in \
self.staging_services[dbservice]:
self.error("{0} is set to use {1:l}, but that instance is not in a "
"service map for {2}.".format(self.dbhost.cluster,
self.cluster_aligned_services[dbservice],
self.dbhost.fqdn))
return
self.logger.debug("Chose service %s instance %s because it is cluster "
"aligned.",
dbservice.name,
self.cluster_aligned_services[dbservice].name)
self.staging_services[dbservice] = [
self.cluster_aligned_services[dbservice]]
return
def apply_changes(self):
"""Update the host object with pending changes."""
for instance in self.instances_bound:
self.logger.client_info("%s adding binding for service %s "
"instance %s",
self.description,
instance.service.name, instance.name)
self.dbhost.services_used.append(instance)
for instance in self.instances_unbound:
self.logger.client_info("%s removing binding for "
"service %s instance %s",
self.description,
instance.service.name, instance.name)
self.dbhost.services_used.remove(instance)
def prestash_primary(self):
plenary_host = Plenary.get_plenary(self.dbhost, logger=self.logger)
plenary_host.stash()
self.plenaries.append(plenary_host)
# This may be too much action at a distance... however, if
# we are potentially re-writing a host plenary, it seems like
# a good idea to also verify known dependencies.
plenary_machine = Plenary.get_plenary(self.dbhost.machine,
logger=self.logger)
plenary_machine.stash()
self.plenaries.append(plenary_machine)
if self.dbhost.resholder:
for dbres in self.dbhost.resholder.resources:
resource_plenary = Plenary.get_plenary(dbres, logger=self.logger)
resource_plenary.stash()
self.plenaries.append(resource_plenary)
class ClusterChooser(Chooser):
"""Choose services for a cluster."""
def __init__(self, dbobj, *args, **kwargs):
"""Provide initialization specific for cluster bindings."""
if not isinstance(dbobj, Cluster):
raise InternalError("ClusterChooser can only choose services for "
"clusters, got %r (%s)" % (dbobj, type(dbobj)))
self.dbcluster = dbobj
Chooser.__init__(self, dbobj, *args, **kwargs)
self.location = self.dbcluster.location_constraint
self.archetype = self.dbcluster.personality.archetype
self.personality = self.dbcluster.personality
self.required_services = set()
# TODO Should be calculated from member host's network membership.
self.network = None
"""Stores interim service instance lists."""
for service in self.archetype.services:
self.required_services.add(service)
for service in self.personality.services:
self.required_services.add(service)
self.original_service_instances = {}
"""Cache of any already bound services (keys) and the instance
that was bound (values).
"""
for si in self.dbcluster.service_bindings:
self.original_service_instances[si.service] = si
self.logger.debug("%s original binding: %s",
self.description, si.cfg_path)
def generate_description(self):
return format(self.dbcluster)
def get_footprint(self, instance):
"""If this cluster is bound to a service, how many hosts bind?"""
if self.dbcluster.personality in instance.service.personalities or \
self.dbcluster.personality.archetype in instance.service.archetypes:
if self.dbcluster.cluster_type == 'meta':
return 0
return self.dbcluster.max_hosts
return 0
def apply_changes(self):
"""Update the cluster object with pending changes."""
for instance in self.instances_unbound:
self.logger.client_info("%s removing binding for "
"service %s instance %s",
self.description,
instance.service.name, instance.name)
if instance in self.dbcluster.service_bindings:
self.dbcluster.service_bindings.remove(instance)
else:
self.error("Internal Error: Could not unbind "
"service %s instance %s" %
(instance.service.name, instance.name))
for instance in self.instances_bound:
self.logger.client_info("%s adding binding for "
"service %s instance %s",
self.description,
instance.service.name, instance.name)
self.dbcluster.service_bindings.append(instance)
self.flush_changes()
for h in self.dbcluster.hosts:
host_chooser = Chooser(h, logger=self.logger,
required_only=False)
host_chooser.set_single(instance.service, instance, force=True)
host_chooser.flush_changes()
# Note, host plenary will be written later.
def prestash_primary(self):
def add_cluster_data(cluster, logger):
for dbhost in cluster.hosts:
host_plenary = Plenary.get_plenary(dbhost, logger=self.logger)
host_plenary.stash()
self.plenaries.append(host_plenary)
if cluster.resholder:
for dbres in cluster.resholder.resources:
resource_plenary = Plenary.get_plenary(dbres, logger=self.logger)
resource_plenary.stash()
self.plenaries.append(resource_plenary)
if isinstance(cluster, EsxCluster) and cluster.switch:
sw_plenary = Plenary.get_plenary(cluster.switch, logger=self.logger)
sw_plenary.stash()
self.plenaries.append(sw_plenary)
plenary_cluster = Plenary.get_plenary(self.dbcluster, logger=self.logger)
plenary_cluster.stash()
self.plenaries.append(plenary_cluster)
if self.dbcluster.resholder:
for dbres in self.dbcluster.resholder.resources:
resource_plenary = Plenary.get_plenary(dbres, logger=self.logger)
resource_plenary.stash()
self.plenaries.append(resource_plenary)
if isinstance(self.dbcluster, MetaCluster):
for c in self.dbcluster.members:
plenary_cluster = Plenary.get_plenary(c,
logger=self.logger)
plenary_cluster.stash()
self.plenaries.append(plenary_cluster)
add_cluster_data(c, self.logger)
else:
add_cluster_data(self.dbcluster, self.logger)
|
|
# -*- coding: utf-8 -*-
import os
try:
import ujson as json
except ImportError:
import json
import irc3
import shelve
__doc__ = '''
==========================================
:mod:`irc3.plugins.storage` Storage plugin
==========================================
Add a ``db`` attribute to the bot
..
>>> from irc3.testing import IrcBot
>>> from irc3.testing import ini2config
>>> import tempfile
>>> fd = tempfile.NamedTemporaryFile(prefix='irc3', suffix='.db')
>>> db_file = fd.name
>>> fd.close()
>>> fd = tempfile.NamedTemporaryFile(prefix='irc3', suffix='.json')
>>> json_file = fd.name
>>> fd.close()
>>> fd = tempfile.NamedTemporaryFile(prefix='irc3', suffix='.sqlite')
>>> sqlite_file = fd.name
>>> fd.close()
Usage::
>>> config = ini2config("""
... [bot]
... includes =
... irc3.plugins.storage
... storage = json://%s
... """ % json_file)
>>> bot = IrcBot(**config)
Then use it::
>>> bot.db['mykey'] = dict(key='value')
>>> 'mykey' in bot.db
True
>>> bot.db['mykey']
{'key': 'value'}
>>> bot.db.setdefault('mykey', key='default')
{'key': 'value'}
>>> bot.db.setdefault('mykey', item='default')
{'item': 'default'}
>>> bot.db.set('mykey', item='value')
>>> bot.db.setdefault('mykey', item='default')
{'item': 'value'}
>>> del bot.db['mykey']
>>> bot.db.get('mykey')
>>> bot.db.get('mykey', 'default')
'default'
>>> bot.db['mykey']
Traceback (most recent call last):
...
KeyError: 'mykey'
>>> 'mykey' in bot.db
False
>>> bot.db.setlist('mylist', ['foo', 'bar'])
>>> bot.db.getlist('mylist')
['foo', 'bar']
>>> del bot.db['mylist']
You can use an instance as key::
>>> class MyPlugin:
... pass
>>> plugin = MyPlugin()
>>> bot.db[plugin] = dict(key='value')
>>> bot.db[plugin]
{'key': 'value'}
>>> del bot.db[plugin]
>>> bot.db.get(plugin)
..
>>> bot.db.SIGINT()
You can also use shelve::
>>> config = ini2config("""
... [bot]
... includes =
... irc3.plugins.storage
... storage = shelve://%s
... """ % db_file)
>>> bot = IrcBot(**config)
>>> bot.db['mykey'] = dict(key='value')
>>> bot.db['mykey']
{'key': 'value'}
>>> del bot.db['mykey']
>>> bot.db.get('mykey')
>>> bot.db.setlist('mylist', ['foo', 'bar'])
>>> bot.db.getlist('mylist')
['foo', 'bar']
>>> del bot.db['mylist']
..
>>> bot.db.getlist('mylist', ['foo', 'bar'])
['foo', 'bar']
>>> bot.db.setlist('mylist', ['foo', 'bar'])
>>> bot.db.setlist('mylist', ['foo', 'bar'])
>>> del bot.db['mylist']
>>> bot.db.SIGINT()
Or redis::
>>> config = ini2config("""
... [bot]
... includes =
... irc3.plugins.storage
... storage = redis://localhost:6379/10
... """)
>>> bot = IrcBot(**config)
..
>>> bot.db.backend.flushdb() # require redis
>>> bot.db.SIGINT()
Then use it::
>>> bot.db['mykey'] = dict(key='value')
>>> bot.db['mykey']
{'key': 'value'}
>>> del bot.db['mykey']
>>> bot.db.get('mykey')
>>> bot.db['mykey']
Traceback (most recent call last):
...
KeyError: 'mykey'
>>> bot.db.setlist('mylist', ['foo', 'bar'])
>>> bot.db.getlist('mylist')
['foo', 'bar']
>>> del bot.db['mylist']
Or sqlite::
>>> config = ini2config("""
... [bot]
... includes =
... irc3.plugins.storage
... storage = sqlite://%s
... """ % sqlite_file)
>>> bot = IrcBot(**config)
..
>>> bot.db.backend.flushdb() # require redis
>>> bot.db.SIGINT()
Then use it::
>>> bot.db['mykey'] = dict(key='value')
>>> bot.db['mykey']
{'key': 'value'}
>>> del bot.db['mykey']
>>> bot.db.get('mykey')
>>> bot.db['mykey']
Traceback (most recent call last):
...
KeyError: 'mykey'
>>> bot.db.setlist('mylist', ['foo', 'bar'])
>>> bot.db.getlist('mylist')
['foo', 'bar']
>>> del bot.db['mylist']
Api
===
.. autoclass:: Storage
:members: __getitem__,__setitem__,__delitem__,__contains__,get,set,setdefault
'''
class Shelve:
def __init__(self, uri=None, **kwargs):
self.filename = uri[9:]
self.db = shelve.open(self.filename)
def set(self, key, value):
self.db[key] = value
self.db.sync()
def get(self, key):
return self.db[key]
def delete(self, key):
del self.db[key]
self.sync()
def contains(self, key):
return key in self.db
def sync(self):
self.db.sync()
def close(self):
self.db.close()
class JSON:
def __init__(self, uri=None, **kwargs):
self.filename = uri[7:]
if os.path.isfile(self.filename): # pragma: no cover
with open(self.filename) as fd:
self.db = json.load(fd)
else:
self.db = {}
def set(self, key, value):
self.db[key] = value
self.sync()
def get(self, key):
return self.db[key]
def delete(self, key):
del self.db[key]
self.sync()
def contains(self, key):
return key in self.db
def sync(self):
with open(self.filename, 'w') as fd:
json.dump(self.db, fd, indent=2, sort_keys=True)
def close(self):
self.sync()
class Redis:
def __init__(self, uri=None, **kwargs):
ConnectionPool = irc3.utils.maybedotted(
'redis.connection.ConnectionPool')
pool = ConnectionPool.from_url(uri)
StrictRedis = irc3.utils.maybedotted('redis.client.StrictRedis')
self.db = StrictRedis(connection_pool=pool)
def set(self, key, value):
self.db.hmset(key, value)
def get(self, key):
keys = self.db.hkeys(key)
if not keys:
raise KeyError()
values = self.db.hmget(key, keys)
keys = [k.decode('utf8') for k in keys]
values = [v.decode('utf8') for v in values]
values = dict(zip(keys, values))
return values
def delete(self, key):
self.db.delete(key)
def contains(self, key):
return self.db.exists(key)
def flushdb(self):
self.db.flushdb()
def sync(self):
self.db.save()
def close(self):
self.sync()
class SQLite:
CREATE_TABLE = """
CREATE TABLE IF NOT EXISTS
irc3_storage (
key text not null,
value text default '',
PRIMARY KEY (key)
);
"""
UPSERT = """
INSERT OR REPLACE INTO irc3_storage(key,value) VALUES(?, ?);
"""
def __init__(self, uri=None, **kwargs):
self.sqlite = irc3.utils.maybedotted('sqlite3')
self.uri = uri.split('://')[-1]
conn = self.sqlite.connect(self.uri)
cursor = conn.cursor()
cursor.execute(self.CREATE_TABLE)
conn.commit()
conn.close()
def set(self, key, value):
conn = self.sqlite.connect(self.uri)
cursor = conn.cursor()
cursor.execute(self.UPSERT, (key, json.dumps(value)))
cursor.fetchall()
conn.commit()
conn.close()
def get(self, key):
value = None
conn = self.sqlite.connect(self.uri)
cursor = conn.cursor()
cursor.execute("SELECT value FROM irc3_storage where key=?;", (key,))
for row in cursor.fetchall():
value = json.loads(row[0])
break
cursor.close()
conn.close()
if value is None:
raise KeyError(key)
return value
def delete(self, key):
conn = self.sqlite.connect(self.uri)
cursor = conn.cursor()
cursor.execute("DELETE FROM irc3_storage where key=?;", (key,))
cursor.close()
conn.commit()
conn.close()
def contains(self, key):
conn = self.sqlite.connect(self.uri)
cursor = conn.cursor()
cursor.execute("SELECT value FROM irc3_storage where key=?;", (key,))
res = False
if len(list(cursor.fetchall())) == 1:
res = True
cursor.close()
conn.close()
return res
def flushdb(self):
conn = self.sqlite.connect(self.uri)
cursor = conn.cursor()
cursor.execute("DROP TABLE IF EXISTS irc3_storage;")
cursor.execute(self.CREATE_TABLE)
cursor.close()
conn.commit()
conn.close()
def sync(self):
pass
def close(self):
pass
@irc3.plugin
class Storage:
backends = {
'shelve': Shelve,
'json': JSON,
'unix': Redis,
'redis': Redis,
'rediss': Redis,
'sqlite': SQLite,
}
def __init__(self, context):
uri = context.config.storage
name = uri.split('://', 1)[0]
try:
factory = self.backends[name]
except KeyError: # pragma: no cover
raise LookupError('No such backend %s' % name)
self.backend = factory(uri)
self.context = context
self.context.db = self
async def connection_ready(self):
meth = getattr(self.backend, 'connection_ready', None)
if meth is not None:
await meth()
def setdefault(self, key_, **kwargs):
"""Update storage value for key with kwargs iif the keys doesn't
exist. Return stored values"""
stored = self[key_]
changed = False
for k, v in kwargs.items():
if k not in stored:
stored[k] = v
changed = True
else:
kwargs[k] = stored[k]
if changed:
self[key_] = stored
return kwargs
def get(self, key_, default=None):
"""Get storage value for key or return default"""
if key_ not in self:
return default
else:
return self[key_]
def getlist(self, key_, default=None):
"""Get storage value (as list) for key or return default"""
if key_ not in self:
return default
else:
value = self[key_]
value = [(int(i), v) for i, v in value.items()]
return [v for k, v in sorted(value)]
def set(self, key_, **kwargs):
"""Update storage value for key with kwargs"""
stored = self.get(key_, dict())
changed = False
for k, v in kwargs.items():
if k not in stored or stored[k] != v:
stored[k] = v
changed = True
if changed:
self[key_] = stored
def setlist(self, key_, value):
"""Update storage value (as list)"""
value = dict([(str(i), v) for i, v in enumerate(value)])
if key_ in self:
del self[key_]
self.set(key_, **value)
def __setitem__(self, key, value):
"""Set storage value for key"""
key = getattr(key, '__module__', key)
if not isinstance(value, dict): # pragma: no cover
raise TypeError('value must be a dict')
try:
return self.backend.set(key, value)
except Exception as e: # pragma: no cover
self.context.log.exception(e)
raise
def __getitem__(self, key):
"""Get storage value for key"""
key = getattr(key, '__module__', key)
try:
return self.backend.get(key)
except KeyError:
raise KeyError(key)
except Exception as e: # pragma: no cover
self.context.log.exception(e)
raise
def __delitem__(self, key):
"""Delete key in storage"""
key = getattr(key, '__module__', key)
try:
self.backend.delete(key)
except Exception as e: # pragma: no cover
self.context.log.exception(e)
raise
def __contains__(self, key):
"""Return True if storage contains key"""
key = getattr(key, '__module__', key)
try:
return self.backend.contains(key)
except Exception as e: # pragma: no cover
self.context.log.exception(e)
raise
def SIGINT(self):
self.backend.close()
|
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.trainer.PyDataProvider2 as dp2
import collections
import swig_paddle
import numpy
__all__ = ['DataProviderConverter']
class IScanner(object):
def __init__(self, input_type, pos):
self.input_type = input_type
if not isinstance(self.input_type, dp2.InputType):
raise ValueError("input type should be dataprovider2.InputType")
self.pos = pos
# data_in_gpu is used to indicate whether to create argument on GPU
# or not in GPU mode. Now if using one thread (trainer_count=1),
# trainer uses NeuralNetwork which needs to create argument on GPU
# before calling forward function. So, set data_in_gpu to True.
# Otherwise, trainer uses MultiGradientMachine which will transfer
# data from CPU to GPU in the forward function, set data_in_gpu to
# False in this case.
self.data_in_gpu = swig_paddle.isUsingGpu(
) and swig_paddle.getTrainerCount() == 1
def scan(self, dat):
pass
def finish_scan(self, argument):
pass
class DenseScanner(IScanner):
"""
:type __mat__: numpy.ndarray
"""
def __init__(self, input_type, pos):
IScanner.__init__(self, input_type, pos)
self.__mat__ = None
def scan(self, dat):
if self.__mat__ is None:
self.__mat__ = numpy.array([dat], dtype='float32')
else:
self.__mat__ = numpy.append(self.__mat__, [dat], axis=0)
def finish_scan(self, argument):
assert isinstance(argument, swig_paddle.Arguments)
if self.__mat__.dtype != numpy.float32:
self.__mat__ = self.__mat__.astype(numpy.float32)
m = swig_paddle.Matrix.createDenseFromNumpy(self.__mat__, True,
self.data_in_gpu)
argument.setSlotValue(self.pos, m)
class SparseBinaryScanner(IScanner):
def __init__(self, input_type, pos):
IScanner.__init__(self, input_type, pos)
self.__rows__ = [0]
self.__cols__ = []
self.__height__ = 0
self.__value__ = []
def scan(self, dat):
self.extend_cols(dat)
self.__rows__.append(len(self.__cols__))
self.__height__ += 1
def extend_cols(self, dat):
self.__cols__.extend(dat)
def finish_scan(self, argument):
assert isinstance(argument, swig_paddle.Arguments)
m = swig_paddle.Matrix.createSparse(
self.__height__,
self.input_type.dim,
len(self.__cols__),
len(self.__value__) == 0,
False, # trans
False) # TODO supoort GPU
assert isinstance(m, swig_paddle.Matrix)
m.sparseCopyFrom(self.__rows__, self.__cols__, self.__value__)
argument.setSlotValue(self.pos, m)
class SparseFloatScanner(SparseBinaryScanner):
def __init__(self, input_type, pos):
SparseBinaryScanner.__init__(self, input_type, pos)
def extend_cols(self, dat):
self.__cols__.extend((x[0] for x in dat))
self.__value__.extend((x[1] for x in dat))
class IndexScanner(IScanner):
def __init__(self, input_type, pos):
IScanner.__init__(self, input_type, pos)
self.__ids__ = []
def scan(self, dat):
self.__ids__.append(dat)
def finish_scan(self, argument):
ids = swig_paddle.IVector.create(self.__ids__, self.data_in_gpu)
assert isinstance(argument, swig_paddle.Arguments)
argument.setSlotIds(self.pos, ids)
class SequenceScanner(IScanner):
def __init__(self, input_type, pos, inner_scanner, setter):
IScanner.__init__(self, input_type, pos)
self.__seq__ = [0]
self.__inner_scanner__ = inner_scanner
self.__setter__ = setter
def scan(self, dat):
self.__seq__.append(self.__seq__[-1] + self.get_size(dat))
for each in dat:
self.__inner_scanner__.scan(each)
def finish_scan(self, argument):
seq = swig_paddle.IVector.create(self.__seq__, False)
self.__setter__(argument, self.pos, seq)
self.__inner_scanner__.finish_scan(argument)
def get_size(self, dat):
if isinstance(self.__inner_scanner__, SequenceScanner):
return sum(self.__inner_scanner__.get_size(item) for item in dat)
else:
return len(dat)
class DataProviderConverter(object):
def __init__(self, input_types):
self.input_types = input_types
assert isinstance(self.input_types, collections.Sequence)
for each in self.input_types:
assert isinstance(each, dp2.InputType)
def convert(self, dat, argument=None):
if argument is None:
argument = swig_paddle.Arguments.createArguments(0)
assert isinstance(argument, swig_paddle.Arguments)
argument.resize(len(self.input_types))
scanners = [
DataProviderConverter.create_scanner(i, each_type)
for i, each_type in enumerate(self.input_types)
]
for each_sample in dat:
for each_step, scanner in zip(each_sample, scanners):
scanner.scan(each_step)
for scanner in scanners:
scanner.finish_scan(argument)
return argument
def __call__(self, dat, argument=None):
return self.convert(dat, argument)
@staticmethod
def create_scanner(i, each):
assert isinstance(each, dp2.InputType)
retv = None
if each.type == dp2.DataType.Dense:
retv = DenseScanner(each, i)
elif each.type == dp2.DataType.Index:
retv = IndexScanner(each, i)
elif each.type == dp2.DataType.SparseNonValue:
retv = SparseBinaryScanner(each, i)
elif each.type == dp2.DataType.SparseValue:
retv = SparseFloatScanner(each, i)
assert retv is not None
if each.seq_type == dp2.SequenceType.SUB_SEQUENCE:
retv = SequenceScanner(
each, i, retv,
lambda a, p, seq: a.setSlotSubSequenceStartPositions(p, seq))
if each.seq_type in [
dp2.SequenceType.SUB_SEQUENCE, dp2.SequenceType.SEQUENCE
]:
retv = SequenceScanner(
each, i, retv,
lambda a, p, seq: a.setSlotSequenceStartPositions(p, seq))
return retv
|
|
import shutil
from typing import Dict, Tuple
from dateparser import parse
import urllib3
from CommonServerPython import *
# Disable insecure warnings
urllib3.disable_warnings()
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
class Client(BaseClient):
"""
Client to use in the Zimperium integration. Overrides BaseClient
"""
def __init__(self, base_url: str, api_key: str, verify: bool):
super().__init__(base_url=base_url, verify=verify)
self._headers = {'api_key': api_key, 'Accept': 'application/json'}
self._proxies = handle_proxy()
def users_search_request(self, query: str, size: str, page: str) -> dict:
"""Search users by sending a GET request.
Args:
query: users search query.
size: response size.
page: response page.
Returns:
Response from API.
"""
params = {
'rsql': query,
'size': size,
'page': page,
}
return self._http_request(method='GET', url_suffix='/users/public/search', headers=self._headers, params=params)
def user_get_by_id_request(self, object_id: str) -> dict:
"""Retrieve user details by sending a GET request.
Args:
object_id: object ID.
Returns:
Response from API.
"""
return self._http_request(method='GET', url_suffix=f'/users/public/{object_id}', headers=self._headers)
def devices_search_request(self, query: str, size: str, page: str) -> dict:
"""Search devices by sending a GET request.
Args:
query: devices search query.
size: response size.
page: response page.
Returns:
Response from API.
"""
params = {
'rsql': query,
'size': size,
'page': page,
}
return self._http_request(method='GET', url_suffix='/devices/public/search', headers=self._headers,
params=params)
def device_get_by_id_request(self, zdid: str, device_id: str) -> dict:
"""Retrieve device details by sending a GET request.
Args:
zdid: zimperium ID.
device_id: device ID.
Returns:
Response from API.
"""
if (zdid and device_id) or (not zdid and not device_id):
raise Exception("To get device by ID, use the zdid or the device_id argument.")
if zdid:
url_suffix = f'/devices/public/{zdid}'
else:
url_suffix = f'/devices/public/deviceId/{device_id}'
return self._http_request(method='GET', url_suffix=url_suffix, headers=self._headers)
def devices_get_last_updated_request(self, last_updated: str, exclude_deleted: bool, size: str, page: str)\
-> dict:
"""Search last updated devices by sending a GET request.
Args:
last_updated: Last updated devices time frame.
exclude_deleted: whether to exclude deleted devices.
size: response size.
page: response page.
Returns:
Response from API.
"""
params = {
'fromLastUpdate': last_updated,
'excludeDeleted': exclude_deleted,
'size': size,
'page': page,
}
return self._http_request(method='GET', url_suffix='/devices/public/device_updates', headers=self._headers,
params=params)
def app_classification_get_request(self, app_hash: str, app_name: str) -> dict:
"""Retrieve device details by sending a GET request.
Args:
app_hash: application hash.
app_name: application name.
Returns:
Response from API.
"""
if (app_hash and app_name) or (not app_hash and not app_name):
raise Exception("To get application classification, use the app_hash or the app_name argument.")
if app_hash:
url_suffix = f'/malware/public/classify/hash/{app_hash}'
else:
url_suffix = f'/malware/public/classify/name/{app_name}'
return self._http_request(method='GET', url_suffix=url_suffix, headers=self._headers)
def report_get_request(self, bundle_id: str, itunes_id: str, app_hash: str, platform: str) -> dict:
"""Retrieve device details by sending a GET request.
Args:
bundle_id: bundle ID.
itunes_id: itunes ID.
app_hash: application hash.
platform: app platform
Returns:
Response from API.
"""
if not bundle_id and not itunes_id and not app_hash:
raise Exception("To get a report, use the bundle_id or the itunes_id or the app_hash argument.")
if (bundle_id and itunes_id) or (bundle_id and app_hash) or (itunes_id and app_hash):
raise Exception("To get a report, use exactly one of the arguments: bundle_id, itunes_id, app_hash.")
params = {}
if bundle_id:
url_suffix = f'/malware/public/reports/bundle/{bundle_id}'
params['platform'] = platform
elif itunes_id:
url_suffix = f'/malware/public/reports/itunes/{itunes_id}'
else:
url_suffix = f'/malware/public/reports/hash/{app_hash}'
params['platform'] = platform
return self._http_request(method='GET', url_suffix=url_suffix, headers=self._headers,
params=params)
def app_upload_for_analysis_request(self, entry_id: str) -> dict:
"""Upload an application for analysis by sending a POST request.
Args:
entry_id: entry ID.
Returns:
Response from API.
"""
file_path = demisto.getFilePath(entry_id).get('path')
file_name = demisto.getFilePath(entry_id).get('name')
if not file_path or not file_name:
raise Exception('Failed to find the file to upload for analysis.')
try:
shutil.copy(file_path, file_name)
except Exception:
raise Exception('Failed to prepare application for upload.')
try:
with open(file_path, 'rb') as file:
self._headers.update({'Content-Type': 'multipart/form-data'})
result = self._http_request(method='POST', url_suffix='/malware/public/upload/app',
headers=self._headers, files={'file1': file.read()}, timeout=240)
except Exception as err:
raise Exception(str(err))
finally:
shutil.rmtree(file_name, ignore_errors=True)
return result
def events_search_request(self, query: str, size: str, page: str, verbose: bool):
"""Search events by sending a GET request.
Args:
query: devices search query.
size: response size.
page: response page.
verbose: whether to include full event details.
Returns:
Response from API.
"""
params = {
'rsql': query,
'size': size,
'page': page,
'sort': 'deviceTime,asc',
'includeFullEventDetail': verbose,
}
return self._http_request(method='GET', url_suffix='/events/public/search', headers=self._headers,
params=params)
def test_module(client: Client, *_) -> str:
"""
Performs basic get request to get incident samples
"""
client.users_search_request(query='objectId==*', size='10', page='0')
if demisto.params().get('isFetch'):
client.events_search_request(query='eventId==*', size='10', page='0', verbose=False)
return 'ok'
def users_search(client: Client, args: Dict) -> CommandResults:
"""Search users
Args:
client: Client object with request.
args: Usually demisto.args()
Returns:
Outputs.
"""
email = str(args.get('email', ''))
query = str(args.get('query', ''))
size = str(args.get('size', '10'))
page = str(args.get('page', '0'))
if email and query:
raise Exception('Provide either the email or the query arguments.')
elif email:
search_query = f'email=={email}'
elif query:
search_query = query
else:
search_query = 'objectId==*'
users = client.users_search_request(search_query, size, page)
users_data = users.get('content')
total_elements = users.get('totalElements', '0')
table_name = ''
if not users.get('last'):
table_name = ' More users are available in the next page.'
headers = ['objectId', 'alias', 'firstName', 'middleName', 'lastName', 'email']
readable_output = tableToMarkdown(name=f"Number of users found: {total_elements}. {table_name}",
t=users_data, headers=headers, removeNull=True)
command_results = CommandResults(
outputs_prefix='Zimperium.Users',
outputs_key_field='objectId',
outputs=users_data,
readable_output=readable_output,
raw_response=users
)
return command_results
def user_get_by_id(client: Client, args: Dict) -> CommandResults:
"""Retrieve details for a single user.
Args:
client: Client object with request.
args: Usually demisto.args()
Returns:
Outputs.
"""
object_id = str(args.get('object_id', ''))
user = client.user_get_by_id_request(object_id)
headers = ['objectId', 'alias', 'firstName', 'middleName', 'lastName', 'email']
readable_output = tableToMarkdown(name="User:", t=user, headers=headers, removeNull=True)
command_results = CommandResults(
outputs_prefix='Zimperium.Users',
outputs_key_field='objectId',
outputs=user,
readable_output=readable_output,
raw_response=user
)
return command_results
def devices_search(client: Client, args: Dict) -> CommandResults:
"""Search devices
Args:
client: Client object with request.
args: Usually demisto.args()
Returns:
Outputs.
"""
query = str(args.get('query', 'deviceId==*'))
size = str(args.get('size', '10'))
page = str(args.get('page', '0'))
devices = client.devices_search_request(query, size, page)
devices_data = devices.get('content')
total_elements = devices.get('totalElements', '0')
table_name = ''
if not devices.get('last'):
table_name = ' More Devices are available in the next page.'
headers = ['deviceId', 'zdid', 'deviceHash', 'model', 'osType', 'osVersion', 'updatedDate']
readable_output = tableToMarkdown(name=f"Number of devices found: {total_elements}. {table_name}",
t=devices_data, headers=headers, removeNull=True)
command_results = CommandResults(
outputs_prefix='Zimperium.Devices',
outputs_key_field='deviceId',
outputs=devices_data,
readable_output=readable_output,
raw_response=devices
)
return command_results
def device_get_by_id(client: Client, args: Dict) -> CommandResults:
"""Retrieve details for a single device.
Args:
client: Client object with request.
args: Usually demisto.args()
Returns:
Outputs.
"""
zdid = str(args.get('zdid', ''))
device_id = str(args.get('device_id', ''))
device = client.device_get_by_id_request(zdid, device_id)
headers = ['deviceId', 'zdid', 'model', 'osType', 'osVersion', 'updatedDate', 'deviceHash']
readable_output = tableToMarkdown(name=f"Device {device_id}:", t=device, headers=headers, removeNull=True)
command_results = CommandResults(
outputs_prefix='Zimperium.Devices',
outputs_key_field='deviceId',
outputs=device,
readable_output=readable_output,
raw_response=device
)
return command_results
def devices_get_last_updated(client: Client, args: Dict) -> CommandResults:
"""Retrieve last updated devices
Args:
client: Client object with request.
args: Usually demisto.args()
Returns:
Outputs.
"""
timestamp_format = '%Y-%m-%d'
from_last_update = str(args.get('from_last_update', '1 day'))
last_updated = parse_date_range(from_last_update, date_format=timestamp_format)[0]
exclude_deleted = args.get('exclude_deleted') == 'false'
size = str(args.get('size', '10'))
page = str(args.get('page', '0'))
devices = client.devices_get_last_updated_request(last_updated, exclude_deleted, size, page)
devices_data = devices.get('content')
total_elements = devices.get('totalElements', '0')
table_name = ''
if not devices.get('last'):
table_name = ' More Devices are available in the next page.'
headers = ['deviceId', 'zdid', 'model', 'osType', 'osVersion', 'updatedDate', 'deviceHash']
readable_output = tableToMarkdown(name=f"Number of devices found: {total_elements}. {table_name}",
t=devices_data, headers=headers, removeNull=True)
command_results = CommandResults(
outputs_prefix='Zimperium.Devices',
outputs_key_field='deviceId',
outputs=devices_data,
readable_output=readable_output,
raw_response=devices
)
return command_results
def app_classification_get(client: Client, args: Dict) -> CommandResults:
"""Retrieve application classification.
Args:
client: Client object with request.
args: Usually demisto.args()
Returns:
Outputs.
"""
app_hash = str(args.get('app_hash', ''))
app_name = str(args.get('app_name', ''))
application = client.app_classification_get_request(app_hash, app_name)
if isinstance(application, dict): # an app name can have multiple results due to different versions.
application_data = application.get('content')
else: # or it can have only one result, if queried using a hash or if it has only one version.
application_data = application[0]
headers = ['objectId', 'hash', 'name', 'version', 'classification', 'score', 'privacyEnum', 'securityEnum']
readable_output = tableToMarkdown(name="Application:", t=application_data, headers=headers, removeNull=True)
command_results = CommandResults(
outputs_prefix='Zimperium.Application',
outputs_key_field='objectId',
outputs=application_data,
readable_output=readable_output,
raw_response=application
)
return command_results
def calculate_dbot_score(application_data: dict) -> int:
"""Determines app dbot score
Args:
application_data: app data
Returns:
a number representing the dbot score
"""
if not application_data: # no response from Zimperium
return 0
classification = application_data.get('classification')
if not classification:
return 0
if classification == 'Legitimate':
return 1
return 3 # classification == Malicious
def file_reputation(client: Client, args: Dict) -> List[CommandResults]:
"""Get the reputation of a hash representing an App
Args:
client: Client object with request.
args: Usually demisto.args()
Returns:
list of CommandResults.
"""
hash_list = argToList(args.get('file'))
command_results_list: List[CommandResults] = []
headers = ['objectId', 'hash', 'name', 'version', 'classification', 'score', 'privacyEnum', 'securityEnum']
for app_hash in hash_list:
try:
application = client.app_classification_get_request(app_hash, '')
application_data = application[0]
except Exception as err:
if 'Error in API call [404]' in str(err):
application_data = {'hash': app_hash}
else:
raise Exception(err)
score = calculate_dbot_score(application_data)
dbot_score = Common.DBotScore(
indicator=app_hash,
indicator_type=DBotScoreType.FILE,
integration_name='Zimperium',
score=score
)
hash_type = get_hash_type(app_hash)
if hash_type == 'md5':
file = Common.File(
md5=app_hash,
dbot_score=dbot_score
)
elif hash_type == 'sha1':
file = Common.File(
sha1=app_hash,
dbot_score=dbot_score
)
else:
file = Common.File(
sha256=app_hash,
dbot_score=dbot_score
)
if not score:
readable_output = tableToMarkdown(name=f"Hash {app_hash} reputation is unknown to Zimperium.",
t=application_data, headers=headers, removeNull=True)
else:
readable_output = tableToMarkdown(name=f"Hash {app_hash} reputation:", t=application_data, headers=headers,
removeNull=True)
command_results = CommandResults(
outputs_prefix='Zimperium.Application',
outputs_key_field='objectId',
outputs=application_data,
readable_output=readable_output,
raw_response=application_data,
indicator=file
)
command_results_list.append(command_results)
return command_results_list
def report_get(client: Client, args: Dict) -> CommandResults:
"""Retrieve a report.
Args:
client: Client object with request.
args: Usually demisto.args()
Returns:
Outputs.
"""
bundle_id = str(args.get('bundle_id', ''))
itunes_id = str(args.get('itunes_id', ''))
app_hash = str(args.get('app_hash', ''))
platform = str(args.get('platform', 'ios'))
report = client.report_get_request(bundle_id, itunes_id, app_hash, platform).get('report', {})
report_data = report.get('report')
if not report_data:
command_results = CommandResults(
readable_output='A report was not found.',
raw_response=report
)
else:
# deleting analysis metadata to not load the context
app_analysis = report_data.get('app_analysis')
if app_analysis and app_analysis.get('application_type') == 'Android':
analysis = app_analysis.get('analysis')
if analysis:
report_data['app_analysis']['analysis'] = list(analysis.keys())
app_md5 = report.get('md5') if 'md5' in report else report_data.get('app_analysis', {}).get('md5_hash')
if app_md5:
report_data.update({'md5': app_md5})
headers = ['behavior', 'md5', 'threats']
readable_output = tableToMarkdown(name="Report:", t=report_data, headers=headers, removeNull=True)
command_results = CommandResults(
outputs_prefix='Zimperium.Reports',
outputs_key_field='app_md5',
outputs=report_data,
readable_output=readable_output,
raw_response=report
)
return command_results
def events_search(client: Client, args: Dict) -> CommandResults:
"""Search events.
Args:
client: Client object with request.
args: Usually demisto.args()
Returns:
Outputs.
"""
query = str(args.get('query', 'eventId==*'))
size = str(args.get('size', '10'))
page = str(args.get('page', '0'))
verbose = str(args.get('verbose')) == 'true'
events = client.events_search_request(query, size, page, verbose)
events_data = events.get('content')
total_elements = events.get('totalElements')
if not verbose:
for event_data in events_data:
event_data.pop('eventDetail', None)
table_name = ''
if not events.get('last'):
table_name = ' More events are available in the next page.'
headers = ['eventId', 'eventName', 'eventState', 'incidentSummary', 'severity', 'persistedTime']
readable_output = tableToMarkdown(name=f"Number of events found: {total_elements}. {table_name}",
t=events_data, headers=headers, removeNull=True)
command_results = CommandResults(
outputs_prefix='Zimperium.Events',
outputs_key_field='eventId',
outputs=events_data,
readable_output=readable_output,
raw_response=events
)
return command_results
def fetch_incidents(client: Client, last_run: dict, fetch_query: str, first_fetch_time: str, max_fetch: str = '50')\
-> Tuple[dict, list]:
"""
This function will execute each interval (default is 1 minute).
Args:
client (Client): Zimperium client
last_run (dateparser.time): The greatest incident created_time we fetched from last fetch
fetch_query: fetch query to append to the persistedtime
first_fetch_time (dateparser.time): If last_run is None then fetch all incidents since first_fetch_time
max_fetch: max events to fetch
Returns:
next_run: This will be last_run in the next fetch-incidents
incidents: Incidents that will be created in Demisto
"""
timestamp_format = '%Y-%m-%dT%H:%M:%S.%fZ'
if not last_run: # if first time fetching
next_run = {
'time': parse_date_range(first_fetch_time, date_format=timestamp_format)[0],
'last_event_ids': []
}
else:
next_run = last_run
query = f"persistedTime=gt={next_run.get('time')}"
if fetch_query:
query += f";{fetch_query}"
events = client.events_search_request(query=query, size=max_fetch, page='0', verbose=False)
events_data = events.get('content')
incidents = []
if events_data:
last_event_ids = last_run.get('last_event_ids', [])
new_event_ids = []
last_event_created_time = None
for event_data in events_data:
event_data.pop('eventDetail', None) # deleting eventDetail to not load the context
event_id = event_data.get('eventId')
if event_id not in last_event_ids: # check that event was not fetched in the last fetch
last_event_created_time = parse(event_data.get('persistedTime'))
incident = {
'name': event_data.get('incidentSummary'),
'occurred': last_event_created_time.strftime(timestamp_format),
'severity': event_severity_to_dbot_score(event_data.get('severity')),
'rawJSON': json.dumps(event_data)
}
incidents.extend([incident])
new_event_ids.extend([event_id])
if new_event_ids and last_event_created_time:
next_run = {
'time': last_event_created_time.strftime(timestamp_format),
'last_event_ids': json.dumps(new_event_ids) # save the event IDs from the last fetch
}
demisto.debug(f'Zimperium last fetch data: {str(next_run)}')
return next_run, incidents
def event_severity_to_dbot_score(severity_str: str):
"""Converts an severity string to DBot score representation
alert severity. Can be one of:
Low -> 1
Medium -> 2
High -> 3
Args:
severity_str: String representation of severity.
Returns:
Dbot representation of severity
"""
severity = severity_str.lower()
if severity == 'low':
return 1
if severity == 'important':
return 2
if severity == 'critical':
return 3
demisto.info(f'Zimperium incident severity: {severity} is not known. Setting as unknown(DBotScore of 0).')
return 0
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
api_key = params.get('api_key')
base_url = urljoin(params.get('url'), '/api/v1/')
verify = not params.get('insecure', False)
# fetch params
fetch_query = params.get('fetch_query')
max_fetch = min('50', params.get('max_fetch', '50'))
first_fetch_time = params.get('fetch_time', '3 days').strip()
command = demisto.command()
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(base_url=base_url, api_key=api_key, verify=verify)
commands = {
'zimperium-events-search': events_search,
'zimperium-users-search': users_search,
'zimperium-user-get-by-id': user_get_by_id,
'zimperium-devices-search': devices_search,
'zimperium-device-get-by-id': device_get_by_id,
'zimperium-devices-get-last-updated': devices_get_last_updated,
'zimperium-app-classification-get': app_classification_get,
'file': file_reputation,
'zimperium-report-get': report_get,
}
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
return_results(test_module(client))
elif demisto.command() == 'fetch-incidents':
next_run, incidents = fetch_incidents(
client=client,
last_run=demisto.getLastRun(),
fetch_query=fetch_query,
first_fetch_time=first_fetch_time,
max_fetch=max_fetch
)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif command in commands:
return_results(commands[command](client, demisto.args()))
else:
raise NotImplementedError(f'Command "{command}" is not implemented.')
except Exception as err:
if 'Resource not found' in str(err):
return_results('Object was not found in Zimperium, please make sure your arguments are correct.')
else:
return_error(str(err), err)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for source_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import os
import sys
import tempfile
import zipfile
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import source_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.util import tf_inspect
def line_number_above():
"""Get lineno of the AST node immediately above this function's call site.
It is assumed that there is no empty line(s) between the call site and the
preceding AST node.
Returns:
The lineno of the preceding AST node, at the same level of the AST.
If the preceding AST spans multiple lines:
- In Python 3.8+, the lineno of the first line is returned.
- In older Python versions, the lineno of the last line is returned.
"""
# https://bugs.python.org/issue12458: In Python 3.8, traceback started
# to return the lineno of the first line of a multi-line continuation block,
# instead of that of the last line. Therefore, in Python 3.8+, we use `ast` to
# get the lineno of the first line.
call_site_lineno = tf_inspect.stack()[1][2]
if sys.version_info < (3, 8):
return call_site_lineno - 1
else:
with open(__file__, "rb") as f:
source_text = f.read().decode("utf-8")
source_tree = ast.parse(source_text)
prev_node = _find_preceding_ast_node(source_tree, call_site_lineno)
return prev_node.lineno
def _find_preceding_ast_node(node, lineno):
"""Find the ast node immediately before and not including lineno."""
for i, child_node in enumerate(node.body):
if child_node.lineno == lineno:
return node.body[i - 1]
if hasattr(child_node, "body"):
found_node = _find_preceding_ast_node(child_node, lineno)
if found_node:
return found_node
class GuessIsTensorFlowLibraryTest(test_util.TensorFlowTestCase):
def setUp(self):
self.curr_file_path = os.path.normpath(os.path.abspath(__file__))
def tearDown(self):
ops.reset_default_graph()
def testGuessedBaseDirIsProbablyCorrect(self):
# In the non-pip world, code resides in "tensorflow/"
# In the pip world, after virtual pip, code resides in "tensorflow_core/"
# So, we have to check both of them
self.assertIn(
os.path.basename(source_utils._TENSORFLOW_BASEDIR),
["tensorflow", "tensorflow_core"])
def testUnitTestFileReturnsFalse(self):
self.assertFalse(
source_utils.guess_is_tensorflow_py_library(self.curr_file_path))
def testSourceUtilModuleReturnsTrue(self):
self.assertTrue(
source_utils.guess_is_tensorflow_py_library(source_utils.__file__))
@test_util.run_v1_only("Tensor.op is not available in TF 2.x")
def testFileInPythonKernelsPathReturnsTrue(self):
x = constant_op.constant(42.0, name="x")
self.assertTrue(
source_utils.guess_is_tensorflow_py_library(x.op.traceback[-1][0]))
def testDebuggerExampleFilePathReturnsFalse(self):
self.assertFalse(
source_utils.guess_is_tensorflow_py_library(os.path.normpath(
"site-packages/tensorflow/python/debug/examples/debug_mnist.py")))
self.assertFalse(
source_utils.guess_is_tensorflow_py_library(os.path.normpath(
"site-packages/tensorflow/python/debug/examples/v1/example_v1.py")))
self.assertFalse(
source_utils.guess_is_tensorflow_py_library(os.path.normpath(
"site-packages/tensorflow/python/debug/examples/v2/example_v2.py")))
self.assertFalse(
source_utils.guess_is_tensorflow_py_library(os.path.normpath(
"site-packages/tensorflow/python/debug/examples/v3/example_v3.py")))
def testReturnsFalseForNonPythonFile(self):
self.assertFalse(
source_utils.guess_is_tensorflow_py_library(
os.path.join(os.path.dirname(self.curr_file_path), "foo.cc")))
def testReturnsFalseForStdin(self):
self.assertFalse(source_utils.guess_is_tensorflow_py_library("<stdin>"))
def testReturnsFalseForEmptyFileName(self):
self.assertFalse(source_utils.guess_is_tensorflow_py_library(""))
class SourceHelperTest(test_util.TensorFlowTestCase):
def createAndRunGraphHelper(self):
"""Create and run a TensorFlow Graph to generate debug dumps.
This is intentionally done in separate method, to make it easier to test
the stack-top mode of source annotation.
"""
self.dump_root = self.get_temp_dir()
self.curr_file_path = os.path.abspath(
tf_inspect.getfile(tf_inspect.currentframe()))
# Run a simple TF graph to generate some debug dumps that can be used in
# source annotation.
with session.Session() as sess:
self.u_init = constant_op.constant(
np.array([[5.0, 3.0], [-1.0, 0.0]]), shape=[2, 2], name="u_init")
self.u_init_line_number = line_number_above()
self.u = variables.Variable(self.u_init, name="u")
self.u_line_number = line_number_above()
self.v_init = constant_op.constant(
np.array([[2.0], [-1.0]]), shape=[2, 1], name="v_init")
self.v_init_line_number = line_number_above()
self.v = variables.Variable(self.v_init, name="v")
self.v_line_number = line_number_above()
self.w = math_ops.matmul(self.u, self.v, name="w")
self.w_line_number = line_number_above()
self.evaluate(self.u.initializer)
self.evaluate(self.v.initializer)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=["file://%s" % self.dump_root])
run_metadata = config_pb2.RunMetadata()
sess.run(self.w, options=run_options, run_metadata=run_metadata)
self.dump = debug_data.DebugDumpDir(
self.dump_root, partition_graphs=run_metadata.partition_graphs)
self.dump.set_python_graph(sess.graph)
def setUp(self):
self.createAndRunGraphHelper()
self.helper_line_number = line_number_above()
def tearDown(self):
if os.path.isdir(self.dump_root):
file_io.delete_recursively(self.dump_root)
ops.reset_default_graph()
def testAnnotateWholeValidSourceFileGivesCorrectResult(self):
source_annotation = source_utils.annotate_source(self.dump,
self.curr_file_path)
self.assertIn(self.u_init.op.name,
source_annotation[self.u_init_line_number])
self.assertIn(self.u.op.name, source_annotation[self.u_line_number])
self.assertIn(self.v_init.op.name,
source_annotation[self.v_init_line_number])
self.assertIn(self.v.op.name, source_annotation[self.v_line_number])
self.assertIn(self.w.op.name, source_annotation[self.w_line_number])
# In the non-stack-top (default) mode, the helper line should be annotated
# with all the ops as well.
self.assertIn(self.u_init.op.name,
source_annotation[self.helper_line_number])
self.assertIn(self.u.op.name, source_annotation[self.helper_line_number])
self.assertIn(self.v_init.op.name,
source_annotation[self.helper_line_number])
self.assertIn(self.v.op.name, source_annotation[self.helper_line_number])
self.assertIn(self.w.op.name, source_annotation[self.helper_line_number])
def testAnnotateWithStackTopGivesCorrectResult(self):
source_annotation = source_utils.annotate_source(
self.dump, self.curr_file_path, file_stack_top=True)
self.assertIn(self.u_init.op.name,
source_annotation[self.u_init_line_number])
self.assertIn(self.u.op.name, source_annotation[self.u_line_number])
self.assertIn(self.v_init.op.name,
source_annotation[self.v_init_line_number])
self.assertIn(self.v.op.name, source_annotation[self.v_line_number])
self.assertIn(self.w.op.name, source_annotation[self.w_line_number])
# In the stack-top mode, the helper line should not have been annotated.
self.assertNotIn(self.helper_line_number, source_annotation)
def testAnnotateSubsetOfLinesGivesCorrectResult(self):
source_annotation = source_utils.annotate_source(
self.dump,
self.curr_file_path,
min_line=self.u_line_number,
max_line=self.u_line_number + 1)
self.assertIn(self.u.op.name, source_annotation[self.u_line_number])
self.assertNotIn(self.v_line_number, source_annotation)
def testAnnotateDumpedTensorsGivesCorrectResult(self):
source_annotation = source_utils.annotate_source(
self.dump, self.curr_file_path, do_dumped_tensors=True)
# Note: Constant Tensors u_init and v_init may not get dumped due to
# constant-folding.
self.assertIn(self.u.name, source_annotation[self.u_line_number])
self.assertIn(self.v.name, source_annotation[self.v_line_number])
self.assertIn(self.w.name, source_annotation[self.w_line_number])
self.assertNotIn(self.u.op.name, source_annotation[self.u_line_number])
self.assertNotIn(self.v.op.name, source_annotation[self.v_line_number])
self.assertNotIn(self.w.op.name, source_annotation[self.w_line_number])
self.assertIn(self.u.name, source_annotation[self.helper_line_number])
self.assertIn(self.v.name, source_annotation[self.helper_line_number])
self.assertIn(self.w.name, source_annotation[self.helper_line_number])
def testCallingAnnotateSourceWithoutPythonGraphRaisesException(self):
self.dump.set_python_graph(None)
with self.assertRaises(ValueError):
source_utils.annotate_source(self.dump, self.curr_file_path)
def testCallingAnnotateSourceOnUnrelatedSourceFileDoesNotError(self):
# Create an unrelated source file.
unrelated_source_path = tempfile.mktemp()
with open(unrelated_source_path, "wt") as source_file:
source_file.write("print('hello, world')\n")
self.assertEqual({},
source_utils.annotate_source(self.dump,
unrelated_source_path))
# Clean up unrelated source file.
os.remove(unrelated_source_path)
def testLoadingPythonSourceFileWithNonAsciiChars(self):
source_path = tempfile.mktemp()
with open(source_path, "wb") as source_file:
source_file.write(u"print('\U0001f642')\n".encode("utf-8"))
source_lines, _ = source_utils.load_source(source_path)
self.assertEqual(source_lines, [u"print('\U0001f642')", u""])
# Clean up unrelated source file.
os.remove(source_path)
def testLoadNonexistentNonParPathFailsWithIOError(self):
bad_path = os.path.join(self.get_temp_dir(), "nonexistent.py")
with self.assertRaisesRegex(IOError,
"neither exists nor can be loaded.*par.*"):
source_utils.load_source(bad_path)
def testLoadingPythonSourceFileInParFileSucceeds(self):
# Create the .par file first.
temp_file_path = os.path.join(self.get_temp_dir(), "model.py")
with open(temp_file_path, "wb") as f:
f.write(b"import tensorflow as tf\nx = tf.constant(42.0)\n")
par_path = os.path.join(self.get_temp_dir(), "train_model.par")
with zipfile.ZipFile(par_path, "w") as zf:
zf.write(temp_file_path, os.path.join("tensorflow_models", "model.py"))
source_path = os.path.join(par_path, "tensorflow_models", "model.py")
source_lines, _ = source_utils.load_source(source_path)
self.assertEqual(
source_lines, ["import tensorflow as tf", "x = tf.constant(42.0)", ""])
def testLoadingPythonSourceFileInParFileFailsRaisingIOError(self):
# Create the .par file first.
temp_file_path = os.path.join(self.get_temp_dir(), "model.py")
with open(temp_file_path, "wb") as f:
f.write(b"import tensorflow as tf\nx = tf.constant(42.0)\n")
par_path = os.path.join(self.get_temp_dir(), "train_model.par")
with zipfile.ZipFile(par_path, "w") as zf:
zf.write(temp_file_path, os.path.join("tensorflow_models", "model.py"))
source_path = os.path.join(par_path, "tensorflow_models", "nonexistent.py")
with self.assertRaisesRegex(IOError,
"neither exists nor can be loaded.*par.*"):
source_utils.load_source(source_path)
@test_util.run_v1_only("Sessions are not available in TF 2.x")
class ListSourceAgainstDumpTest(test_util.TensorFlowTestCase):
def createAndRunGraphWithWhileLoop(self):
"""Create and run a TensorFlow Graph with a while loop to generate dumps."""
self.dump_root = self.get_temp_dir()
self.curr_file_path = os.path.abspath(
tf_inspect.getfile(tf_inspect.currentframe()))
# Run a simple TF graph to generate some debug dumps that can be used in
# source annotation.
with session.Session() as sess:
loop_body = lambda i: math_ops.add(i, 2)
self.traceback_first_line = line_number_above()
loop_cond = lambda i: math_ops.less(i, 16)
i = constant_op.constant(10, name="i")
loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=["file://%s" % self.dump_root])
run_metadata = config_pb2.RunMetadata()
sess.run(loop, options=run_options, run_metadata=run_metadata)
self.dump = debug_data.DebugDumpDir(
self.dump_root, partition_graphs=run_metadata.partition_graphs)
self.dump.set_python_graph(sess.graph)
def setUp(self):
self.createAndRunGraphWithWhileLoop()
def tearDown(self):
if os.path.isdir(self.dump_root):
file_io.delete_recursively(self.dump_root)
ops.reset_default_graph()
def testGenerateSourceList(self):
source_list = source_utils.list_source_files_against_dump(self.dump)
# Assert that the file paths are sorted and unique.
file_paths = [item[0] for item in source_list]
self.assertEqual(sorted(file_paths), file_paths)
self.assertEqual(len(set(file_paths)), len(file_paths))
# Assert that each item of source_list has length 6.
for item in source_list:
self.assertTrue(isinstance(item, tuple))
self.assertEqual(6, len(item))
# The while loop body should have executed 3 times. The following table
# lists the tensors and how many times each of them is dumped.
# Tensor name # of times dumped:
# i:0 1
# while/Enter:0 1
# while/Merge:0 4
# while/Merge:1 4
# while/Less/y:0 4
# while/Less:0 4
# while/LoopCond:0 4
# while/Switch:0 1
# while/Switch:1 3
# while/Identity:0 3
# while/Add/y:0 3
# while/Add:0 3
# while/NextIteration:0 3
# while/Exit:0 1
# ----------------------------
# (Total) 39
#
# The total number of nodes is 12.
# The total number of tensors is 14 (2 of the nodes have 2 outputs:
# while/Merge, while/Switch).
_, is_tf_py_library, num_nodes, num_tensors, num_dumps, first_line = (
source_list[file_paths.index(self.curr_file_path)])
self.assertFalse(is_tf_py_library)
self.assertEqual(12, num_nodes)
self.assertEqual(14, num_tensors)
self.assertEqual(39, num_dumps)
self.assertEqual(self.traceback_first_line, first_line)
def testGenerateSourceListWithNodeNameFilter(self):
source_list = source_utils.list_source_files_against_dump(
self.dump, node_name_regex_allowlist=r"while/Add.*")
# Assert that the file paths are sorted.
file_paths = [item[0] for item in source_list]
self.assertEqual(sorted(file_paths), file_paths)
self.assertEqual(len(set(file_paths)), len(file_paths))
# Assert that each item of source_list has length 4.
for item in source_list:
self.assertTrue(isinstance(item, tuple))
self.assertEqual(6, len(item))
# Due to the node-name filtering the result should only contain 2 nodes
# and 2 tensors. The total number of dumped tensors should be 6:
# while/Add/y:0 3
# while/Add:0 3
_, is_tf_py_library, num_nodes, num_tensors, num_dumps, _ = (
source_list[file_paths.index(self.curr_file_path)])
self.assertFalse(is_tf_py_library)
self.assertEqual(2, num_nodes)
self.assertEqual(2, num_tensors)
self.assertEqual(6, num_dumps)
def testGenerateSourceListWithPathRegexFilter(self):
curr_file_basename = os.path.basename(self.curr_file_path)
source_list = source_utils.list_source_files_against_dump(
self.dump,
path_regex_allowlist=(".*" + curr_file_basename.replace(".", "\\.") +
"$"))
self.assertEqual(1, len(source_list))
(file_path, is_tf_py_library, num_nodes, num_tensors, num_dumps,
first_line) = source_list[0]
self.assertEqual(self.curr_file_path, file_path)
self.assertFalse(is_tf_py_library)
self.assertEqual(12, num_nodes)
self.assertEqual(14, num_tensors)
self.assertEqual(39, num_dumps)
self.assertEqual(self.traceback_first_line, first_line)
if __name__ == "__main__":
googletest.main()
|
|
import copy
import datetime
import decimal
import math
import warnings
from itertools import tee
from django.db import connection
from django.db.models.query_utils import QueryWrapper
from django.conf import settings
from django import forms
from django.core import exceptions, validators
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.functional import curry
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode, force_unicode, smart_str
from django.utils.ipv6 import clean_ipv6_address
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
BLANK_CHOICE_NONE = [("", "None")]
class FieldDoesNotExist(Exception):
pass
# A guide to Field parameters:
#
# * name: The name of the field specifed in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
class Field(object):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _(u'Value %r is not a valid choice.'),
'null': _(u'This field cannot be null.'),
'blank': _(u'This field cannot be blank.'),
'unique': _(u'%(model_name)s with this %(field_label)s '
u'already exists.'),
}
# Generic field type description, usually overriden by subclasses
def _description(self):
return _(u'Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if (self.empty_strings_allowed and
connection.features.interprets_empty_strings_as_nulls):
self.null = True
self.rel = rel
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date, self.unique_for_month = (unique_for_date,
unique_for_month)
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't
# explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self.validators = self.default_validators + validators
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
def __cmp__(self, other):
# This is needed because bisect does not take a comparison function.
return cmp(self.creation_counter, other.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
memodict[id(self)] = obj
return obj
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
def run_validators(self, value):
if value in validators.EMPTY_VALUES:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError, e:
if hasattr(e, 'code') and e.code in self.error_messages:
message = self.error_messages[e.code]
if e.params:
message = message % e.params
errors.append(message)
else:
errors.extend(e.messages)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self._choices and value:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
msg = self.error_messages['invalid_choice'] % value
raise exceptions.ValidationError(msg)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'])
if not self.blank and value in validators.EMPTY_VALUES:
raise exceptions.ValidationError(self.error_messages['blank'])
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific DATA_TYPES dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return (connection.creation.data_types[self.get_internal_type()]
% data)
except KeyError:
return None
@property
def unique(self):
return self._unique or self.primary_key
def set_attributes_from_name(self, name):
if not self.name:
self.name = name
self.attname, self.column = self.get_attname_column()
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name):
self.set_attributes_from_name(name)
self.model = cls
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name,
curry(cls._get_FIELD_display, field=self))
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""
Returns field's value just before saving.
"""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
"""
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""
Returns field's value prepared for saving into a database.
"""
return self.get_db_prep_value(value, connection=connection,
prepared=False)
def get_prep_lookup(self, lookup_type, value):
"""
Perform preliminary non-db specific lookup checks and conversions
"""
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
if lookup_type in (
'regex', 'iregex', 'month', 'day', 'week_day', 'search',
'contains', 'icontains', 'iexact', 'startswith', 'istartswith',
'endswith', 'iendswith', 'isnull'
):
return value
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return self.get_prep_value(value)
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
elif lookup_type == 'year':
try:
return int(value)
except ValueError:
raise ValueError("The __year lookup type requires an integer "
"argument")
raise TypeError("Field has invalid lookup: %s" % lookup_type)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
"""
Returns field's value prepared for database lookup.
"""
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabel_aliases method, it will need to
# be invoked before the final SQL is evaluated
if hasattr(value, 'relabel_aliases'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('regex', 'iregex', 'month', 'day', 'week_day',
'search'):
return [value]
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return [self.get_db_prep_value(value, connection=connection,
prepared=prepared)]
elif lookup_type in ('range', 'in'):
return [self.get_db_prep_value(v, connection=connection,
prepared=prepared) for v in value]
elif lookup_type in ('contains', 'icontains'):
return ["%%%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'iexact':
return [connection.ops.prep_for_iexact_query(value)]
elif lookup_type in ('startswith', 'istartswith'):
return ["%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type in ('endswith', 'iendswith'):
return ["%%%s" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'isnull':
return []
elif lookup_type == 'year':
if self.get_internal_type() == 'DateField':
return connection.ops.year_lookup_bounds_for_date_field(value)
else:
return connection.ops.year_lookup_bounds(value)
def has_default(self):
"""
Returns a boolean of whether this field has a default value.
"""
return self.default is not NOT_PROVIDED
def get_default(self):
"""
Returns the default value for this field.
"""
if self.has_default():
if callable(self.default):
return self.default()
return force_unicode(self.default, strings_only=True)
if (not self.empty_strings_allowed or (self.null and
not connection.features.interprets_empty_strings_as_nulls)):
return None
return ""
def get_validator_unique_lookup_type(self):
return '%s__exact' % self.name
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
first_choice = include_blank and blank_choice or []
if self.choices:
return first_choice + list(self.choices)
rel_model = self.rel.to
if hasattr(self.rel, 'get_related_field'):
lst = [(getattr(x, self.rel.get_related_field().attname),
smart_unicode(x))
for x in rel_model._default_manager.complex_filter(
self.rel.limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_unicode(x))
for x in rel_model._default_manager.complex_filter(
self.rel.limit_choices_to)]
return first_choice + lst
def get_choices_default(self):
return self.get_choices()
def get_flatchoices(self, include_blank=True,
blank_choice=BLANK_CHOICE_DASH):
"""
Returns flattened choices with a default blank choice included.
"""
first_choice = include_blank and blank_choice or []
return first_choice + list(self.flatchoices)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_unicode(self._get_val_from_obj(obj))
def bind(self, fieldmapping, original, bound_field_class):
return bound_field_class(self, fieldmapping, original)
def _get_choices(self):
if hasattr(self._choices, 'next'):
choices, self._choices = tee(self._choices)
return choices
else:
return self._choices
choices = property(_get_choices)
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice,value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=forms.CharField, **kwargs):
"""
Returns a django.forms.Field instance for this database Field.
"""
defaults = {'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in kwargs.keys():
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
return form_class(**defaults)
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
"""
return getattr(obj, self.attname)
def __repr__(self):
"""
Displays the module, class and name of the field.
"""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value must be an integer."),
}
def __init__(self, *args, **kwargs):
assert kwargs.get('primary_key', False) is True, \
"%ss must have primary_key=True." % self.__class__.__name__
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "AutoField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def validate(self, value, model_instance):
pass
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def contribute_to_class(self, cls, name):
assert not cls._meta.has_auto_field, \
"A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value must be either True or False."),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
if 'default' not in kwargs and not kwargs.get('null'):
kwargs['default'] = False
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(BooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = (self.null or
not (self.has_default() or 'initial' in kwargs))
defaults = {'choices': self.get_choices(
include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, basestring) or value is None:
return value
return smart_unicode(value)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
# TODO: Maybe move this into contrib, because it's specialized.
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _(u'Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
class DateField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value has an invalid date format. It must be "
u"in YYYY-MM-DD format."),
'invalid_date': _(u"'%s' value has the correct format (YYYY-MM-DD) "
u"but it is an invalid date."),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
value = smart_str(value)
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
msg = self.error_messages['invalid_date'] % value
raise exceptions.ValidationError(msg)
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name):
super(DateField,self).contribute_to_class(cls, name)
if not self.null:
setattr(cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=True))
setattr(cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=False))
def get_prep_lookup(self, lookup_type, value):
# For "__month", "__day", and "__week_day" lookups, convert the value
# to an int so the database backend always sees a consistent type.
if lookup_type in ('month', 'day', 'week_day'):
return int(value)
return super(DateField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_date(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value has an invalid format. It must be in "
u"YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."),
'invalid_date': _(u"'%s' value has the correct format "
u"(YYYY-MM-DD) but it is an invalid date."),
'invalid_datetime': _(u"'%s' value has the correct format "
u"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
u"but it is an invalid date/time."),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day)
value = smart_str(value)
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
msg = self.error_messages['invalid_datetime'] % value
raise exceptions.ValidationError(msg)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
msg = self.error_messages['invalid_date'] % value
raise exceptions.ValidationError(msg)
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
# get_prep_lookup is inherited from DateField
def get_prep_value(self, value):
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
warnings.warn(u"DateTimeField received a naive datetime (%s)"
u" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_datetime(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value must be a decimal number."),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def _format(self, value):
if isinstance(value, basestring) or value is None:
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.util.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import util
return util.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.value_to_db_decimal(self.to_python(value),
self.max_digits, self.decimal_places)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("E-mail address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 75)
CharField.__init__(self, *args, **kwargs)
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None,
recursive=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
kwargs['max_length'] = kwargs.get('max_length', 100)
Field.__init__(self, verbose_name, name, **kwargs)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be an integer."),
}
description = _("Integer")
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def get_prep_lookup(self, lookup_type, value):
if ((lookup_type == 'gte' or lookup_type == 'lt')
and isinstance(value, float)):
value = math.ceil(value)
return super(IntegerField, self).get_prep_lookup(lookup_type, value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "IPAddressField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.IPAddressField}
defaults.update(kwargs)
return super(IPAddressField, self).formfield(**defaults)
class GenericIPAddressField(Field):
empty_strings_allowed = True
description = _("IP address")
default_error_messages = {}
def __init__(self, protocol='both', unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.default_validators, invalid_error_message = \
validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 39
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value and ':' in value:
return clean_ipv6_address(value,
self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return value or None
def get_prep_value(self, value):
if value and ':' in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return value
def formfield(self, **kwargs):
defaults = {'form_class': forms.GenericIPAddressField}
defaults.update(kwargs)
return super(GenericIPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(NullBooleanField, self).get_prep_lookup(lookup_type,
value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerField(IntegerField):
description = _("Integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(IntegerField):
description = _("Integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super(SlugField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def get_prep_value(self, value):
if isinstance(value, basestring) or value is None:
return value
return smart_unicode(value)
def formfield(self, **kwargs):
defaults = {'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value has an invalid format. It must be in "
u"HH:MM[:ss[.uuuuuu]] format."),
'invalid_time': _(u"'%s' value has the correct format "
u"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."),
}
description = _("Time")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
value = smart_str(value)
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
msg = self.error_messages['invalid_time'] % value
raise exceptions.ValidationError(msg)
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_time(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
description = _("URL")
def __init__(self, verbose_name=None, name=None, verify_exists=False,
**kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
CharField.__init__(self, verbose_name, name, **kwargs)
self.validators.append(
validators.URLValidator(verify_exists=verify_exists))
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
|
|
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# HTTPSClientAuthConnection code comes courtesy of ActiveState website:
# http://code.activestate.com/recipes/
# 577548-https-httplib-client-connection-with-certificate-v/
import collections
import copy
import errno
import functools
import os
import re
try:
from eventlet.green import socket
from eventlet.green import ssl
except ImportError:
import socket
import ssl
import osprofiler.web
try:
import sendfile # noqa
SENDFILE_SUPPORTED = True
except ImportError:
SENDFILE_SUPPORTED = False
from oslo_log import log as logging
from oslo_utils import encodeutils
import six
from six.moves import http_client
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
import six.moves.urllib.parse as urlparse
from glance.common import auth
from glance.common import exception
from glance.common import utils
from glance import i18n
LOG = logging.getLogger(__name__)
_ = i18n._
# common chunk size for get and put
CHUNKSIZE = 65536
VERSION_REGEX = re.compile(r"/?v[0-9\.]+")
def handle_unauthenticated(func):
"""
Wrap a function to re-authenticate and retry.
"""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except exception.NotAuthenticated:
self._authenticate(force_reauth=True)
return func(self, *args, **kwargs)
return wrapped
def handle_redirects(func):
"""
Wrap the _do_request function to handle HTTP redirects.
"""
MAX_REDIRECTS = 5
@functools.wraps(func)
def wrapped(self, method, url, body, headers):
for _ in range(MAX_REDIRECTS):
try:
return func(self, method, url, body, headers)
except exception.RedirectException as redirect:
if redirect.url is None:
raise exception.InvalidRedirect()
url = redirect.url
raise exception.MaxRedirectsExceeded(redirects=MAX_REDIRECTS)
return wrapped
class HTTPSClientAuthConnection(http_client.HTTPSConnection):
"""
Class to make a HTTPS connection, with support for
full client-based SSL Authentication
:see http://code.activestate.com/recipes/
577548-https-httplib-client-connection-with-certificate-v/
"""
def __init__(self, host, port, key_file, cert_file,
ca_file, timeout=None, insecure=False):
http_client.HTTPSConnection.__init__(self, host, port,
key_file=key_file,
cert_file=cert_file)
self.key_file = key_file
self.cert_file = cert_file
self.ca_file = ca_file
self.timeout = timeout
self.insecure = insecure
def connect(self):
"""
Connect to a host on a given (SSL) port.
If ca_file is pointing somewhere, use it to check Server Certificate.
Redefined/copied and extended from httplib.py:1105 (Python 2.6.x).
This is needed to pass cert_reqs=ssl.CERT_REQUIRED as parameter to
ssl.wrap_socket(), which forces SSL to check server certificate against
our client certificate.
"""
sock = socket.create_connection((self.host, self.port), self.timeout)
if self._tunnel_host:
self.sock = sock
self._tunnel()
# Check CA file unless 'insecure' is specified
if self.insecure is True:
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=ssl.CERT_NONE)
else:
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
ca_certs=self.ca_file,
cert_reqs=ssl.CERT_REQUIRED)
class BaseClient(object):
"""A base client class"""
DEFAULT_PORT = 80
DEFAULT_DOC_ROOT = None
# Standard CA file locations for Debian/Ubuntu, RedHat/Fedora,
# Suse, FreeBSD/OpenBSD
DEFAULT_CA_FILE_PATH = ('/etc/ssl/certs/ca-certificates.crt:'
'/etc/pki/tls/certs/ca-bundle.crt:'
'/etc/ssl/ca-bundle.pem:'
'/etc/ssl/cert.pem')
OK_RESPONSE_CODES = (
http_client.OK,
http_client.CREATED,
http_client.ACCEPTED,
http_client.NO_CONTENT,
)
REDIRECT_RESPONSE_CODES = (
http_client.MOVED_PERMANENTLY,
http_client.FOUND,
http_client.SEE_OTHER,
http_client.USE_PROXY,
http_client.TEMPORARY_REDIRECT,
)
def __init__(self, host, port=None, timeout=None, use_ssl=False,
auth_token=None, creds=None, doc_root=None, key_file=None,
cert_file=None, ca_file=None, insecure=False,
configure_via_auth=True):
"""
Creates a new client to some service.
:param host: The host where service resides
:param port: The port where service resides
:param timeout: Connection timeout.
:param use_ssl: Should we use HTTPS?
:param auth_token: The auth token to pass to the server
:param creds: The credentials to pass to the auth plugin
:param doc_root: Prefix for all URLs we request from host
:param key_file: Optional PEM-formatted file that contains the private
key.
If use_ssl is True, and this param is None (the
default), then an environ variable
GLANCE_CLIENT_KEY_FILE is looked for. If no such
environ variable is found, ClientConnectionError
will be raised.
:param cert_file: Optional PEM-formatted certificate chain file.
If use_ssl is True, and this param is None (the
default), then an environ variable
GLANCE_CLIENT_CERT_FILE is looked for. If no such
environ variable is found, ClientConnectionError
will be raised.
:param ca_file: Optional CA cert file to use in SSL connections
If use_ssl is True, and this param is None (the
default), then an environ variable
GLANCE_CLIENT_CA_FILE is looked for.
:param insecure: Optional. If set then the server's certificate
will not be verified.
:param configure_via_auth: Optional. Defaults to True. If set, the
URL returned from the service catalog for the image
endpoint will **override** the URL supplied to in
the host parameter.
"""
self.host = host
self.port = port or self.DEFAULT_PORT
self.timeout = timeout
# A value of '0' implies never timeout
if timeout == 0:
self.timeout = None
self.use_ssl = use_ssl
self.auth_token = auth_token
self.creds = creds or {}
self.connection = None
self.configure_via_auth = configure_via_auth
# doc_root can be a nullstring, which is valid, and why we
# cannot simply do doc_root or self.DEFAULT_DOC_ROOT below.
self.doc_root = (doc_root if doc_root is not None
else self.DEFAULT_DOC_ROOT)
self.key_file = key_file
self.cert_file = cert_file
self.ca_file = ca_file
self.insecure = insecure
self.auth_plugin = self.make_auth_plugin(self.creds, self.insecure)
self.connect_kwargs = self.get_connect_kwargs()
def get_connect_kwargs(self):
# Both secure and insecure connections have a timeout option
connect_kwargs = {'timeout': self.timeout}
if self.use_ssl:
if self.key_file is None:
self.key_file = os.environ.get('GLANCE_CLIENT_KEY_FILE')
if self.cert_file is None:
self.cert_file = os.environ.get('GLANCE_CLIENT_CERT_FILE')
if self.ca_file is None:
self.ca_file = os.environ.get('GLANCE_CLIENT_CA_FILE')
# Check that key_file/cert_file are either both set or both unset
if self.cert_file is not None and self.key_file is None:
msg = _("You have selected to use SSL in connecting, "
"and you have supplied a cert, "
"however you have failed to supply either a "
"key_file parameter or set the "
"GLANCE_CLIENT_KEY_FILE environ variable")
raise exception.ClientConnectionError(msg)
if self.key_file is not None and self.cert_file is None:
msg = _("You have selected to use SSL in connecting, "
"and you have supplied a key, "
"however you have failed to supply either a "
"cert_file parameter or set the "
"GLANCE_CLIENT_CERT_FILE environ variable")
raise exception.ClientConnectionError(msg)
if (self.key_file is not None and
not os.path.exists(self.key_file)):
msg = _("The key file you specified %s does not "
"exist") % self.key_file
raise exception.ClientConnectionError(msg)
connect_kwargs['key_file'] = self.key_file
if (self.cert_file is not None and
not os.path.exists(self.cert_file)):
msg = _("The cert file you specified %s does not "
"exist") % self.cert_file
raise exception.ClientConnectionError(msg)
connect_kwargs['cert_file'] = self.cert_file
if (self.ca_file is not None and
not os.path.exists(self.ca_file)):
msg = _("The CA file you specified %s does not "
"exist") % self.ca_file
raise exception.ClientConnectionError(msg)
if self.ca_file is None:
for ca in self.DEFAULT_CA_FILE_PATH.split(":"):
if os.path.exists(ca):
self.ca_file = ca
break
connect_kwargs['ca_file'] = self.ca_file
connect_kwargs['insecure'] = self.insecure
return connect_kwargs
def configure_from_url(self, url):
"""
Setups the connection based on the given url.
The form is:
<http|https>://<host>:port/doc_root
"""
LOG.debug("Configuring from URL: %s", url)
parsed = urlparse.urlparse(url)
self.use_ssl = parsed.scheme == 'https'
self.host = parsed.hostname
self.port = parsed.port or 80
self.doc_root = parsed.path.rstrip('/')
# We need to ensure a version identifier is appended to the doc_root
if not VERSION_REGEX.match(self.doc_root):
if self.DEFAULT_DOC_ROOT:
doc_root = self.DEFAULT_DOC_ROOT.lstrip('/')
self.doc_root += '/' + doc_root
msg = ("Appending doc_root %(doc_root)s to URL %(url)s" %
{'doc_root': doc_root, 'url': url})
LOG.debug(msg)
# ensure connection kwargs are re-evaluated after the service catalog
# publicURL is parsed for potential SSL usage
self.connect_kwargs = self.get_connect_kwargs()
def make_auth_plugin(self, creds, insecure):
"""
Returns an instantiated authentication plugin.
"""
strategy = creds.get('strategy', 'noauth')
plugin = auth.get_plugin_from_strategy(strategy, creds, insecure,
self.configure_via_auth)
return plugin
def get_connection_type(self):
"""
Returns the proper connection type
"""
if self.use_ssl:
return HTTPSClientAuthConnection
else:
return http_client.HTTPConnection
def _authenticate(self, force_reauth=False):
"""
Use the authentication plugin to authenticate and set the auth token.
:param force_reauth: For re-authentication to bypass cache.
"""
auth_plugin = self.auth_plugin
if not auth_plugin.is_authenticated or force_reauth:
auth_plugin.authenticate()
self.auth_token = auth_plugin.auth_token
management_url = auth_plugin.management_url
if management_url and self.configure_via_auth:
self.configure_from_url(management_url)
@handle_unauthenticated
def do_request(self, method, action, body=None, headers=None,
params=None):
"""
Make a request, returning an HTTP response object.
:param method: HTTP verb (GET, POST, PUT, etc.)
:param action: Requested path to append to self.doc_root
:param body: Data to send in the body of the request
:param headers: Headers to send with the request
:param params: Key/value pairs to use in query string
:returns: HTTP response object
"""
if not self.auth_token:
self._authenticate()
url = self._construct_url(action, params)
# NOTE(ameade): We need to copy these kwargs since they can be altered
# in _do_request but we need the originals if handle_unauthenticated
# calls this function again.
return self._do_request(method=method, url=url,
body=copy.deepcopy(body),
headers=copy.deepcopy(headers))
def _construct_url(self, action, params=None):
"""
Create a URL object we can use to pass to _do_request().
"""
action = urlparse.quote(action)
path = '/'.join([self.doc_root or '', action.lstrip('/')])
scheme = "https" if self.use_ssl else "http"
netloc = "%s:%d" % (self.host, self.port)
if isinstance(params, dict):
for (key, value) in params.items():
if value is None:
del params[key]
continue
if not isinstance(value, six.string_types):
value = str(value)
params[key] = encodeutils.safe_encode(value)
query = urlparse.urlencode(params)
else:
query = None
url = urlparse.ParseResult(scheme, netloc, path, '', query, '')
log_msg = _("Constructed URL: %s")
LOG.debug(log_msg, url.geturl())
return url
def _encode_headers(self, headers):
"""
Encodes headers.
Note: This should be used right before
sending anything out.
:param headers: Headers to encode
:returns: Dictionary with encoded headers'
names and values
"""
to_str = encodeutils.safe_encode
return {to_str(h): to_str(v) for h, v in six.iteritems(headers)}
@handle_redirects
def _do_request(self, method, url, body, headers):
"""
Connects to the server and issues a request. Handles converting
any returned HTTP error status codes to OpenStack/Glance exceptions
and closing the server connection. Returns the result data, or
raises an appropriate exception.
:param method: HTTP method ("GET", "POST", "PUT", etc...)
:param url: urlparse.ParsedResult object with URL information
:param body: data to send (as string, filelike or iterable),
or None (default)
:param headers: mapping of key/value pairs to add as headers
:note
If the body param has a read attribute, and method is either
POST or PUT, this method will automatically conduct a chunked-transfer
encoding and use the body as a file object or iterable, transferring
chunks of data using the connection's send() method. This allows large
objects to be transferred efficiently without buffering the entire
body in memory.
"""
if url.query:
path = url.path + "?" + url.query
else:
path = url.path
try:
connection_type = self.get_connection_type()
headers = self._encode_headers(headers or {})
headers.update(osprofiler.web.get_trace_id_headers())
if 'x-auth-token' not in headers and self.auth_token:
headers['x-auth-token'] = self.auth_token
c = connection_type(url.hostname, url.port, **self.connect_kwargs)
def _pushing(method):
return method.lower() in ('post', 'put')
def _simple(body):
return body is None or isinstance(body, six.string_types)
def _filelike(body):
return hasattr(body, 'read')
def _sendbody(connection, iter):
connection.endheaders()
for sent in iter:
# iterator has done the heavy lifting
pass
def _chunkbody(connection, iter):
connection.putheader('Transfer-Encoding', 'chunked')
connection.endheaders()
for chunk in iter:
connection.send('%x\r\n%s\r\n' % (len(chunk), chunk))
connection.send('0\r\n\r\n')
# Do a simple request or a chunked request, depending
# on whether the body param is file-like or iterable and
# the method is PUT or POST
#
if not _pushing(method) or _simple(body):
# Simple request...
c.request(method, path, body, headers)
elif _filelike(body) or self._iterable(body):
c.putrequest(method, path)
use_sendfile = self._sendable(body)
# According to HTTP/1.1, Content-Length and Transfer-Encoding
# conflict.
for header, value in headers.items():
if use_sendfile or header.lower() != 'content-length':
c.putheader(header, str(value))
iter = utils.chunkreadable(body)
if use_sendfile:
# send actual file without copying into userspace
_sendbody(c, iter)
else:
# otherwise iterate and chunk
_chunkbody(c, iter)
else:
raise TypeError('Unsupported image type: %s' % body.__class__)
res = c.getresponse()
def _retry(res):
return res.getheader('Retry-After')
status_code = self.get_status_code(res)
if status_code in self.OK_RESPONSE_CODES:
return res
elif status_code in self.REDIRECT_RESPONSE_CODES:
raise exception.RedirectException(res.getheader('Location'))
elif status_code == http_client.UNAUTHORIZED:
raise exception.NotAuthenticated(res.read())
elif status_code == http_client.FORBIDDEN:
raise exception.Forbidden(res.read())
elif status_code == http_client.NOT_FOUND:
raise exception.NotFound(res.read())
elif status_code == http_client.CONFLICT:
raise exception.Duplicate(res.read())
elif status_code == http_client.BAD_REQUEST:
raise exception.Invalid(res.read())
elif status_code == http_client.MULTIPLE_CHOICES:
raise exception.MultipleChoices(body=res.read())
elif status_code == http_client.REQUEST_ENTITY_TOO_LARGE:
raise exception.LimitExceeded(retry=_retry(res),
body=res.read())
elif status_code == http_client.INTERNAL_SERVER_ERROR:
raise exception.ServerError()
elif status_code == http_client.SERVICE_UNAVAILABLE:
raise exception.ServiceUnavailable(retry=_retry(res))
else:
raise exception.UnexpectedStatus(status=status_code,
body=res.read())
except (socket.error, IOError) as e:
raise exception.ClientConnectionError(e)
def _seekable(self, body):
# pipes are not seekable, avoids sendfile() failure on e.g.
# cat /path/to/image | glance add ...
# or where add command is launched via popen
try:
os.lseek(body.fileno(), 0, os.SEEK_CUR)
return True
except OSError as e:
return (e.errno != errno.ESPIPE)
def _sendable(self, body):
return (SENDFILE_SUPPORTED and
hasattr(body, 'fileno') and
self._seekable(body) and
not self.use_ssl)
def _iterable(self, body):
return isinstance(body, collections.Iterable)
def get_status_code(self, response):
"""
Returns the integer status code from the response, which
can be either a Webob.Response (used in testing) or httplib.Response
"""
if hasattr(response, 'status_int'):
return response.status_int
else:
return response.status
def _extract_params(self, actual_params, allowed_params):
"""
Extract a subset of keys from a dictionary. The filters key
will also be extracted, and each of its values will be returned
as an individual param.
:param actual_params: dict of keys to filter
:param allowed_params: list of keys that 'actual_params' will be
reduced to
:retval subset of 'params' dict
"""
try:
# expect 'filters' param to be a dict here
result = dict(actual_params.get('filters'))
except TypeError:
result = {}
for allowed_param in allowed_params:
if allowed_param in actual_params:
result[allowed_param] = actual_params[allowed_param]
return result
|
|
from __future__ import absolute_import
import responses
import os.path
from mock import patch
from sentry.models import Event, File, Release, ReleaseFile
from sentry.testutils import TestCase
BASE64_SOURCEMAP = 'data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoiZ2VuZXJhdGVkLmpzIiwic291cmNlcyI6WyIvdGVzdC5qcyJdLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiO0FBQUEiLCJzb3VyY2VzQ29udGVudCI6WyJjb25zb2xlLmxvZyhcImhlbGxvLCBXb3JsZCFcIikiXX0='
def get_fixture_path(name):
return os.path.join(os.path.dirname(__file__), 'fixtures', name)
def load_fixture(name):
with open(get_fixture_path(name)) as fp:
return fp.read()
class JavascriptIntegrationTest(TestCase):
@patch('sentry.lang.javascript.processor.fetch_file')
def test_source_expansion(self, mock_fetch_file):
data = {
'message': 'hello',
'platform': 'javascript',
'sentry.interfaces.Exception': {
'values': [{
'type': 'Error',
'stacktrace': {
'frames': [
{
'abs_path': 'http://example.com/foo.js',
'filename': 'foo.js',
'lineno': 4,
'colno': 0,
},
{
'abs_path': 'http://example.com/foo.js',
'filename': 'foo.js',
'lineno': 1,
'colno': 0,
},
],
},
}],
}
}
mock_fetch_file.return_value.body = '\n'.join('hello world')
resp = self._postWithHeader(data)
assert resp.status_code, 200
mock_fetch_file.assert_called_once_with(
'http://example.com/foo.js',
project=self.project,
release=None,
allow_scraping=True,
)
event = Event.objects.get()
exception = event.interfaces['sentry.interfaces.Exception']
frame_list = exception.values[0].stacktrace.frames
frame = frame_list[0]
assert frame.pre_context == ['h', 'e', 'l']
assert frame.context_line == 'l'
assert frame.post_context == ['o', ' ', 'w', 'o', 'r']
frame = frame_list[1]
assert frame.pre_context is None
assert frame.context_line == 'h'
assert frame.post_context == ['e', 'l', 'l', 'o', ' ']
@patch('sentry.lang.javascript.processor.fetch_file')
@patch('sentry.lang.javascript.processor.discover_sourcemap')
def test_inlined_sources(self, mock_discover_sourcemap, mock_fetch_file):
data = {
'message': 'hello',
'platform': 'javascript',
'sentry.interfaces.Exception': {
'values': [{
'type': 'Error',
'stacktrace': {
'frames': [
{
'abs_path': 'http://example.com/test.min.js',
'filename': 'test.js',
'lineno': 1,
'colno': 0,
},
],
},
}],
}
}
mock_discover_sourcemap.return_value = BASE64_SOURCEMAP
mock_fetch_file.return_value.url = 'http://example.com/test.min.js'
mock_fetch_file.return_value.body = '\n'.join('<generated source>')
resp = self._postWithHeader(data)
assert resp.status_code, 200
mock_fetch_file.assert_called_once_with(
'http://example.com/test.min.js',
project=self.project,
release=None,
allow_scraping=True,
)
event = Event.objects.get()
exception = event.interfaces['sentry.interfaces.Exception']
frame_list = exception.values[0].stacktrace.frames
frame = frame_list[0]
assert not frame.pre_context
assert frame.context_line == 'console.log("hello, World!")'
assert not frame.post_context
assert frame.data['sourcemap'] == 'http://example.com/test.min.js'
@responses.activate
def test_sourcemap_source_expansion(self):
responses.add(responses.GET, 'http://example.com/file.min.js',
body=load_fixture('file.min.js'))
responses.add(responses.GET, 'http://example.com/file1.js',
body=load_fixture('file1.js'))
responses.add(responses.GET, 'http://example.com/file2.js',
body=load_fixture('file2.js'))
responses.add(responses.GET, 'http://example.com/file.sourcemap.js',
body=load_fixture('file.sourcemap.js'))
data = {
'message': 'hello',
'platform': 'javascript',
'sentry.interfaces.Exception': {
'values': [{
'type': 'Error',
'stacktrace': {
'frames': [
{
'abs_path': 'http://example.com/file.min.js',
'filename': 'file.min.js',
'lineno': 1,
'colno': 39,
},
],
},
}],
}
}
resp = self._postWithHeader(data)
assert resp.status_code, 200
event = Event.objects.get()
assert not event.data['errors']
exception = event.interfaces['sentry.interfaces.Exception']
frame_list = exception.values[0].stacktrace.frames
frame = frame_list[0]
assert frame.pre_context == [
'function add(a, b) {',
'\t"use strict";',
]
assert frame.context_line == '\treturn a + b;'
assert frame.post_context == ['}']
@responses.activate
def test_expansion_via_release_artifacts(self):
project = self.project
release = Release.objects.create(
project=project,
version='abc',
)
f1 = File.objects.create(
name='file.min.js',
type='release.file',
headers={'Content-Type': 'application/json'},
)
f1.putfile(open(get_fixture_path('file.min.js'), 'rb'))
ReleaseFile.objects.create(
name='http://example.com/{}'.format(f1.name),
release=release,
project=project,
file=f1,
)
f2 = File.objects.create(
name='file1.js',
type='release.file',
headers={'Content-Type': 'application/json'},
)
f2.putfile(open(get_fixture_path('file1.js'), 'rb'))
ReleaseFile.objects.create(
name='http://example.com/{}'.format(f2.name),
release=release,
project=project,
file=f2,
)
f3 = File.objects.create(
name='file2.js',
type='release.file',
headers={'Content-Type': 'application/json'},
)
f3.putfile(open(get_fixture_path('file2.js'), 'rb'))
ReleaseFile.objects.create(
name='http://example.com/{}'.format(f3.name),
release=release,
project=project,
file=f3,
)
f4 = File.objects.create(
name='file.sourcemap.js',
type='release.file',
headers={'Content-Type': 'application/json'},
)
f4.putfile(open(get_fixture_path('file.sourcemap.js'), 'rb'))
ReleaseFile.objects.create(
name='http://example.com/{}'.format(f4.name),
release=release,
project=project,
file=f4,
)
data = {
'message': 'hello',
'platform': 'javascript',
'release': 'abc',
'sentry.interfaces.Exception': {
'values': [{
'type': 'Error',
'stacktrace': {
'frames': [
{
'abs_path': 'http://example.com/file.min.js',
'filename': 'file.min.js',
'lineno': 1,
'colno': 39,
},
],
},
}],
}
}
resp = self._postWithHeader(data)
assert resp.status_code, 200
event = Event.objects.get()
assert not event.data['errors']
exception = event.interfaces['sentry.interfaces.Exception']
frame_list = exception.values[0].stacktrace.frames
frame = frame_list[0]
assert frame.pre_context == [
'function add(a, b) {',
'\t"use strict";',
]
assert frame.context_line == '\treturn a + b;'
assert frame.post_context == ['}']
|
|
from StringIO import StringIO
from ast import literal_eval
from collections import defaultdict
from contextlib import contextmanager
import logging
import os
import re
from threading import Thread
import uuid
# noinspection PyUnresolvedReferences
from boto.sdb.domain import Domain
# noinspection PyUnresolvedReferences
from boto.s3.bucket import Bucket
# noinspection PyUnresolvedReferences
from boto.s3.connection import S3Connection
# noinspection PyUnresolvedReferences
from boto.sdb.connection import SDBConnection
from boto.sdb.item import Item
import boto.s3
from boto.exception import SDBResponseError, S3ResponseError
import itertools
import time
from toil.jobStores.abstractJobStore import AbstractJobStore, NoSuchJobException, \
ConcurrentFileModificationException, NoSuchFileException
from toil.batchJob import BatchJob
log = logging.getLogger( __name__ )
# FIXME: Command length is currently limited to 1024 characters
# NB: Number of messages per batchjob is limited to 256-x, 1024 bytes each, with x being the number of
# other attributes in the item
# FIXME: enforce SimpleDB limits early
class AWSJobStore( AbstractJobStore ):
"""
A batchjob store that uses Amazon's S3 for file storage and SimpleDB for storing batchjob info and
enforcing strong consistency on the S3 file storage. The schema in SimpleDB is as follows:
Jobs are stored in the "xyz.jobs" domain where xyz is the name prefix this batchjob store was
constructed with. Each item in that domain uses the batchjob store batchjob ID (jobStoreID) as the item
name. The command, memory and cpu fields of a batchjob will be stored as attributes. The messages
field of a batchjob will be stored as a multivalued attribute.
"""
# FIXME: Eliminate after consolidating behaviour with FileJobStore
resetJobInLoadState = True
"""Whether to reset the messages, remainingRetryCount and children attributes of a batchjob when
it is loaded by loadToilState."""
def fileExists(self, jobStoreFileID ):
return bool(self.versions.get_item(item_name=jobStoreFileID, consistent_read=True))
def jobs( self ):
for attempt in retry_sdb( ):
with attempt:
result = list( self.jobDomain.select(
query="select * from `{domain}` ".format( domain=self.jobDomain.name ),
consistent_read=True ) )
jobList = []
for jobItem in result:
yield AWSJob.fromItem(jobItem)
def create( self, command, memory, cpu, disk,updateID=None,
predecessorNumber=0 ):
jobStoreID = self._newJobID( )
log.debug( "Creating batchjob %s for '%s'",
jobStoreID, '<no command>' if command is None else command )
batchjob = AWSJob( jobStoreID=jobStoreID,
command=command, memory=memory, cpu=cpu, disk=disk,
remainingRetryCount=self._defaultTryCount( ), logJobStoreFileID=None,
updateID=updateID, predecessorNumber=predecessorNumber)
for attempt in retry_sdb( ):
with attempt:
assert self.jobDomain.put_attributes( item_name=jobStoreID,
attributes=batchjob.toItem( ) )
return batchjob
def __init__( self, region, namePrefix, config=None ):
log.debug( "Instantiating %s for region %s and name prefix '%s'",
self.__class__, region, namePrefix )
self.region = region
self.namePrefix = namePrefix
self.jobDomain = None
self.versions = None
self.files = None
self.stats = None
self.db = self._connectSimpleDB( )
self.s3 = self._connectS3( )
create = config is not None
self.jobDomain = self._getOrCreateDomain( 'jobs', create )
self.versions = self._getOrCreateDomain( 'versions', create )
self.files = self._getOrCreateBucket( 'files', create, versioning=True )
self.stats = self._getOrCreateBucket( 'stats', create, versioning=True )
super( AWSJobStore, self ).__init__( config=config )
def exists( self, jobStoreID ):
for attempt in retry_sdb( ):
with attempt:
return bool( self.jobDomain.get_attributes( item_name=jobStoreID,
attribute_name=[ ],
consistent_read=True ) )
def getPublicUrl( self, jobStoreFileID):
"""
For Amazon SimpleDB requests, use HTTP GET requests that are URLs with query strings.
http://awsdocs.s3.amazonaws.com/SDB/latest/sdb-dg.pdf
Create url, check if valid, return.
"""
key = self.files.get_key( key_name=jobStoreFileID)
return key.generate_url(expires_in=3600) # one hour
def getSharedPublicUrl(self, FileName):
jobStoreFileID = self._newFileID( FileName )
return self.getPublicUrl(jobStoreFileID)
def load( self, jobStoreID ):
# TODO: check if mentioning individual attributes is faster than using *
for attempt in retry_sdb( ):
with attempt:
result = list( self.jobDomain.select(
query="select * from `{domain}` "
"where itemName() = '{jobStoreID}'".format( domain=self.jobDomain.name,
jobStoreID=jobStoreID ),
consistent_read=True ) )
if len(result)!=1:
raise NoSuchJobException(jobStoreID)
batchjob = AWSJob.fromItem(result[0])
if batchjob is None:
raise NoSuchJobException( jobStoreID )
log.debug( "Loaded batchjob %s", jobStoreID )
return batchjob
def update( self, batchjob ):
log.debug( "Updating batchjob %s", batchjob.jobStoreID )
for attempt in retry_sdb( ):
with attempt:
assert self.jobDomain.put_attributes( item_name=batchjob.jobStoreID,
attributes=batchjob.toItem( ) )
def delete( self, jobStoreID ):
# remove batchjob and replace with jobStoreId.
log.debug( "Deleting batchjob %s", jobStoreID )
for attempt in retry_sdb( ):
with attempt:
self.jobDomain.delete_attributes( item_name=jobStoreID )
for attempt in retry_sdb( ):
with attempt:
items = list( self.versions.select(
query="select * from `%s` "
"where jobStoreID='%s'" % (self.versions.name, jobStoreID),
consistent_read=True ) )
if items:
log.debug( "Deleting %d file(s) associated with batchjob %s", len( items ), jobStoreID )
for attempt in retry_sdb( ):
with attempt:
self.versions.batch_delete_attributes( { item.name: None for item in items } )
for item in items:
if 'version' in item:
self.files.delete_key( key_name=item.name,
version_id=item[ 'version' ] )
else:
self.files.delete_key( key_name=item.name)
def writeFile( self, jobStoreID, localFilePath ):
jobStoreFileID = self._newFileID( )
firstVersion = self._upload( jobStoreFileID, localFilePath )
self._registerFile( jobStoreFileID, jobStoreID=jobStoreID, newVersion=firstVersion )
log.debug( "Wrote initial version %s of file %s for batchjob %s from path '%s'",
firstVersion, jobStoreFileID, jobStoreID, localFilePath )
return jobStoreFileID
@contextmanager
def writeFileStream( self, jobStoreID ):
jobStoreFileID = self._newFileID( )
with self._uploadStream( jobStoreFileID, self.files ) as (writable, key):
yield writable, jobStoreFileID
firstVersion = key.version_id
assert firstVersion is not None
self._registerFile( jobStoreFileID, jobStoreID=jobStoreID, newVersion=firstVersion )
log.debug( "Wrote initial version %s of file %s for batchjob %s",
firstVersion, jobStoreFileID, jobStoreID )
@contextmanager
def writeSharedFileStream( self, sharedFileName ):
assert self._validateSharedFileName( sharedFileName )
jobStoreFileID = self._newFileID( sharedFileName )
oldVersion = self._getFileVersion( jobStoreFileID )
with self._uploadStream( jobStoreFileID, self.files ) as (writable, key):
yield writable
newVersion = key.version_id
jobStoreId = str( self.sharedFileJobID ) if oldVersion is None else None
self._registerFile( jobStoreFileID,
jobStoreID=jobStoreId, oldVersion=oldVersion, newVersion=newVersion )
if oldVersion is None:
log.debug( "Wrote initial version %s of shared file %s (%s)",
newVersion, sharedFileName, jobStoreFileID )
else:
log.debug( "Wrote version %s of file %s (%s), replacing version %s",
newVersion, sharedFileName, jobStoreFileID, oldVersion )
def updateFile( self, jobStoreFileID, localFilePath ):
oldVersion = self._getFileVersion( jobStoreFileID )
newVersion = self._upload( jobStoreFileID, localFilePath )
self._registerFile( jobStoreFileID, oldVersion=oldVersion, newVersion=newVersion )
log.debug( "Wrote version %s of file %s from path '%s', replacing version %s",
newVersion, jobStoreFileID, localFilePath, oldVersion )
@contextmanager
def updateFileStream( self, jobStoreFileID ):
oldVersion = self._getFileVersion( jobStoreFileID )
with self._uploadStream( jobStoreFileID, self.files ) as (writable, key):
yield writable
newVersion = key.version_id
self._registerFile( jobStoreFileID, oldVersion=oldVersion, newVersion=newVersion )
log.debug( "Wrote version %s of file %s, replacing version %s",
newVersion, jobStoreFileID, oldVersion )
def readFile( self, jobStoreFileID, localFilePath ):
version = self._getFileVersion( jobStoreFileID )
if version is None: raise NoSuchFileException( jobStoreFileID )
log.debug( "Reading version %s of file %s to path '%s'",
version, jobStoreFileID, localFilePath )
self._download( jobStoreFileID, localFilePath, version )
@contextmanager
def readFileStream( self, jobStoreFileID ):
version = self._getFileVersion( jobStoreFileID )
if version is None: raise NoSuchFileException( jobStoreFileID )
log.debug( "Reading version %s of file %s", version, jobStoreFileID )
with self._downloadStream( jobStoreFileID, version, self.files ) as readable:
yield readable
@contextmanager
def readSharedFileStream( self, sharedFileName ):
assert self._validateSharedFileName( sharedFileName )
jobStoreFileID = self._newFileID( sharedFileName )
version = self._getFileVersion( jobStoreFileID )
if version is None: raise NoSuchFileException( jobStoreFileID )
log.debug( "Read version %s from shared file %s (%s)",
version, sharedFileName, jobStoreFileID )
with self._downloadStream( jobStoreFileID, version, self.files ) as readable:
yield readable
def deleteFile( self, jobStoreFileID ):
version, bucket = self._getFileVersionAndBucket( jobStoreFileID )
if bucket:
for attempt in retry_sdb( ):
with attempt:
if version:
self.versions.delete_attributes( jobStoreFileID,
expected_values=[ 'version', version ] )
else:
self.versions.delete_attributes( jobStoreFileID)
bucket.delete_key( key_name=jobStoreFileID, version_id=version )
if version:
log.debug( "Deleted version %s of file %s", version, jobStoreFileID )
else:
log.debug( "Deleted unversioned file %s", version, jobStoreFileID )
else:
log.debug( "File %s does not exist", jobStoreFileID)
def getEmptyFileStoreID( self, jobStoreID ):
jobStoreFileID = self._newFileID( )
self._registerFile( jobStoreFileID, jobStoreID=jobStoreID )
log.debug( "Registered empty file %s for batchjob %s", jobStoreFileID, jobStoreID )
return jobStoreFileID
def writeStatsAndLogging( self, statsAndLoggingString ):
jobStoreFileId = self._newFileID( )
with self._uploadStream( jobStoreFileId, self.stats, multipart=False ) as (writeable, key):
writeable.write( statsAndLoggingString )
firstVersion = key.version_id
self._registerFile( jobStoreFileId, bucketName='stats', newVersion=firstVersion )
def readStatsAndLogging( self, statsCallBackFn ):
itemsProcessed = 0
for attempt in retry_sdb( ):
with attempt:
items = list( self.versions.select(
query="select * from `%s` "
"where bucketName='stats'" % (self.versions.name,),
consistent_read=True ) )
for item in items:
with self._downloadStream( item.name, item[ 'version' ], self.stats ) as readable:
statsCallBackFn( readable )
self.deleteFile( item.name )
itemsProcessed += 1
return itemsProcessed
# Dots in bucket names should be avoided because bucket names are used in HTTPS bucket
# URLs where the may interfere with the certificate common name. We use a double
# underscore as a separator instead.
bucketNameRe = re.compile( r'^[a-z0-9][a-z0-9-]+[a-z0-9]$' )
nameSeparator = '--'
@classmethod
def _parseArgs( cls, jobStoreString ):
region, namePrefix = jobStoreString.split( ':' )
# See http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html,
# reserve 10 characters for separator and suffixes
if not cls.bucketNameRe.match( namePrefix ):
raise ValueError( "Invalid name prefix '%s'. Name prefixes must contain only digits, "
"hyphens or lower-case letters and must not start or end in a "
"hyphen." % namePrefix )
# reserve 13 for separator and suffix
if len( namePrefix ) > 50:
raise ValueError( "Invalid name prefix '%s'. Name prefixes may not be longer than 50 "
"characters." % namePrefix )
if '--' in namePrefix:
raise ValueError( "Invalid name prefix '%s'. Name prefixes may not contain "
"%s." % (namePrefix, cls.nameSeparator) )
return region, namePrefix
def _connectSimpleDB( self ):
"""
rtype: SDBConnection
"""
db = boto.sdb.connect_to_region( self.region )
if db is None:
raise ValueError( "Could not connect to SimpleDB. Make sure '%s' is a valid SimpleDB "
"region." % self.region )
assert db is not None
return db
def _connectS3( self ):
"""
:rtype: S3Connection
"""
s3 = boto.s3.connect_to_region( self.region )
if s3 is None:
raise ValueError( "Could not connect to S3. Make sure '%s' is a valid S3 region." %
self.region )
return s3
def _getOrCreateBucket( self, bucket_name, create=False, versioning=False ):
"""
:rtype Bucket
"""
bucket_name = self.namePrefix + self.nameSeparator + bucket_name
assert self.bucketNameRe.match( bucket_name )
assert 3 <= len( bucket_name ) <= 63
try:
bucket = self.s3.get_bucket( bucket_name, validate=True )
assert versioning is self.__getBucketVersioning( bucket )
return bucket
except S3ResponseError as e:
if e.error_code == 'NoSuchBucket' and create:
bucket = self.s3.create_bucket( bucket_name, location=self.region )
if versioning:
bucket.configure_versioning( versioning )
return bucket
else:
raise
def _getOrCreateDomain( self, domain_name, create=False ):
"""
:rtype : Domain
"""
domain_name = self.namePrefix + self.nameSeparator + domain_name
for i in itertools.count( ):
try:
return self.db.get_domain( domain_name )
except SDBResponseError as e:
if e.error_code == 'NoSuchDomain':
if i == 0 and create:
self.db.create_domain( domain_name )
else:
log.warn( "Creation of '%s' still pending, retrying in 5s" % domain_name )
time.sleep( 5 )
def _newJobID( self ):
return str( uuid.uuid4( ) )
# A dummy batchjob ID under which all shared files are stored.
sharedFileJobID = uuid.UUID( '891f7db6-e4d9-4221-a58e-ab6cc4395f94' )
def _newFileID( self, sharedFileName=None ):
if sharedFileName is None:
return str( uuid.uuid4( ) )
else:
return str( uuid.uuid5( self.sharedFileJobID, str(sharedFileName) ) )
def _getFileVersionAndBucket( self, jobStoreFileID ):
"""
:rtype: tuple(str version, AWS bucket)
"""
for attempt in retry_sdb( ):
with attempt:
item = self.versions.get_attributes( item_name=jobStoreFileID,
attribute_name=[ 'version', 'bucketName' ],
consistent_read=True )
bucketName = item.get( 'bucketName', None )
if bucketName is None:
return None, None
else:
return item.get( 'version', None ), getattr( self, bucketName )
def _getFileVersion( self, jobStoreFileID, expectedBucket=None ):
version, bucket = self._getFileVersionAndBucket( jobStoreFileID )
if bucket is None:
assert version is None
else:
if expectedBucket is None:
expectedBucket = self.files
assert bucket is expectedBucket
return version
_s3_part_size = 50 * 1024 * 1024
def _upload( self, jobStoreFileID, localFilePath ):
file_size, file_time = self._fileSizeAndTime( localFilePath )
if file_size <= self._s3_part_size:
key = self.files.new_key( key_name=jobStoreFileID )
key.name = jobStoreFileID
key.set_contents_from_filename( localFilePath )
version = key.version_id
else:
with open( localFilePath, 'rb' ) as f:
upload = self.files.initiate_multipart_upload( key_name=jobStoreFileID )
try:
start = 0
part_num = itertools.count( )
while start < file_size:
end = min( start + self._s3_part_size, file_size )
assert f.tell( ) == start
upload.upload_part_from_file( fp=f,
part_num=next( part_num ) + 1,
size=end - start )
start = end
assert f.tell( ) == file_size == start
except:
upload.cancel_upload( )
raise
else:
version = upload.complete_upload( ).version_id
key = self.files.get_key( jobStoreFileID )
assert key.size == file_size
assert self._fileSizeAndTime( localFilePath ) == (file_size, file_time) #why do this? No one can touch the file while it is uploaded?
return version
@contextmanager
def _uploadStream( self, jobStoreFileID, bucket, multipart=True ):
key = bucket.new_key( key_name=jobStoreFileID )
assert key.version_id is None
readable_fh, writable_fh = os.pipe( )
with os.fdopen( readable_fh, 'r' ) as readable:
with os.fdopen( writable_fh, 'w' ) as writable:
def reader( ):
try:
upload = bucket.initiate_multipart_upload( key_name=jobStoreFileID )
try:
for part_num in itertools.count( ):
# FIXME: Consider using a key.set_contents_from_stream and rip ...
# FIXME: ... the query_args logic from upload_part_from_file in ...
# FIXME: ... in MultipartUpload. Possible downside is that ...
# FIXME: ... implicit retries won't work.
buf = readable.read( self._s3_part_size )
# There must be at least one part, even if the file is empty.
if len( buf ) == 0 and part_num > 0: break
upload.upload_part_from_file( fp=StringIO( buf ),
# S3 part numbers are 1-based
part_num=part_num + 1 )
if len( buf ) == 0: break
except:
upload.cancel_upload( )
raise
else:
key.version_id = upload.complete_upload( ).version_id
except:
log.exception( 'Exception in reader thread' )
def simpleReader( ):
log.debug( "Using single part upload" )
try:
buf = StringIO( readable.read( ) )
assert key.set_contents_from_file( fp=buf ) == buf.len
except:
log.exception( "Exception in simple reader thread" )
thread = Thread( target=reader if multipart else simpleReader )
thread.start( )
# Yield the key now with version_id unset. When reader() returns
# key.version_id will be set.
yield writable, key
# The writable is now closed. This will send EOF to the readable and cause that
# thread to finish.
thread.join( )
assert key.version_id is not None
def _download( self, jobStoreFileID, localFilePath, version ):
key = self.files.get_key( jobStoreFileID, validate=False )
key.get_contents_to_filename( localFilePath, version_id=version )
@contextmanager
def _downloadStream( self, jobStoreFileID, version, bucket ):
key = bucket.get_key( jobStoreFileID, validate=False )
readable_fh, writable_fh = os.pipe( )
with os.fdopen( readable_fh, 'r' ) as readable:
with os.fdopen( writable_fh, 'w' ) as writable:
def writer( ):
key.get_contents_to_file( writable, version_id=version )
# This close() will send EOF to the reading end and ultimately cause the
# yield to return. It also makes the implict .close() done by the enclosing
# "with" context redundant but that should be ok since .close() on file
# objects are idempotent.
writable.close( )
thread = Thread( target=writer )
thread.start( )
yield readable
thread.join( )
def _registerFile( self, jobStoreFileID,
bucketName='files', jobStoreID=None, newVersion=None, oldVersion=None ):
"""
Register a a file in the store
:param jobStoreFileID: the file's ID, mandatory
:param bucketName: the name of the S3 bucket the file was placed in
:param jobStoreID: the ID of the batchjob owning the file, only allowed for first version of
file or when file is registered without content
:param newVersion: the file's new version or None if the file is to be registered without
content, in which case jobStoreId must be passed
:param oldVersion: the expected previous version of the file or None if newVersion is the
first version or file is registered without content
"""
# Must pass either jobStoreID or newVersion, or both
assert jobStoreID is not None or newVersion is not None
# Must pass newVersion if passing oldVersion
assert oldVersion is None or newVersion is not None
attributes = dict( bucketName=bucketName )
if newVersion is not None:
attributes[ 'version' ] = newVersion
if jobStoreID is not None:
attributes[ 'jobStoreID' ] = jobStoreID
# False stands for absence
expected = [ 'version', False if oldVersion is None else oldVersion ]
try:
for attempt in retry_sdb( ):
with attempt:
assert self.versions.put_attributes( item_name=jobStoreFileID,
attributes=attributes,
expected_value=expected )
if oldVersion is not None:
bucket = getattr( self, bucketName )
bucket.delete_key( jobStoreFileID, version_id=oldVersion )
except SDBResponseError as e:
if e.error_code == 'ConditionalCheckFailed':
raise ConcurrentFileModificationException( jobStoreFileID )
else:
raise
def _fileSizeAndTime( self, localFilePath ):
file_stat = os.stat( localFilePath )
file_size, file_time = file_stat.st_size, file_stat.st_mtime
return file_size, file_time
versionings = dict( Enabled=True, Disabled=False, Suspended=None )
def __getBucketVersioning( self, bucket ):
"""
A valueable lesson in how to feck up a simple tri-state boolean.
For newly created buckets get_versioning_status returns None. We map that to False.
TBD: This may actually be a result of eventual consistency
Otherwise, the 'Versioning' entry in the dictionary returned by get_versioning_status can
be 'Enabled', 'Suspended' or 'Disabled' which we map to True, None and False
respectively. Calling configure_versioning with False on a bucket will cause
get_versioning_status to then return 'Suspended' for some reason.
"""
status = bucket.get_versioning_status( )
return bool( status ) and self.versionings[ status[ 'Versioning' ] ]
def deleteJobStore( self ):
for bucket in (self.files, self.stats):
if bucket is not None:
for upload in bucket.list_multipart_uploads( ):
upload.cancel_upload( )
if self.__getBucketVersioning( bucket ) in (True, None):
for key in list( bucket.list_versions( ) ):
bucket.delete_key( key.name, version_id=key.version_id )
else:
for key in list( bucket.list( ) ):
key.delete( )
bucket.delete( )
for domain in (self.versions, self.jobDomain):
if domain is not None:
domain.delete( )
# Boto converts all attribute values to strings by default, so an attribute value of None would
# becomes 'None' in SimpleDB. To truly represent attribute values of None, we'd have to always
# call delete_attributes in addition to put_attributes but there is no way to do that atomically.
# Instead we map None to the empty string and vice versa. The same applies to empty iterables.
# The empty iterable is a no-op for put_attributes, so we map that to '' instead. This means that
# we can't serialize [''] or '' because the former would be deserialized as [] and the latter as
# None.
def toNoneable( v ):
return v if v else None
def fromNoneable( v ):
assert v != ""
return '' if v is None else v
sort_prefix_length = 3
def toSet( vs ):
"""
:param vs: list[str] | str
:return: set(str) | set()
Lists returned by simpleDB is not guaranteed to be in their original order, but because we are converting them
to sets, the loss of order is not a problem.
>>> toSet(["x", "y", "z"])
set(['y', 'x', 'z'])
Instead of a set, a single String can also be returned by SimpleDB.
>>> toSet("x")
set(['x'])
An empty set is serialized as ""
>>> toSet("")
set([])
"""
return set(vs) if vs else set()
def fromSet( vs ):
"""
:type vs: set(str)
:rtype str|list[str]
Empty set becomes empty string
>>> fromSet(set())
''
Singleton set becomes its sole element
>>> fromSet({'x'})
'x'
Set elements are unordered, so sort_prefixes used in fromList are not needed here.
>>> fromSet({'x','y'})
['y', 'x']
Only sets with non-empty strings are allowed
>>> fromSet(set(['']))
Traceback (most recent call last):
...
AssertionError
>>> fromSet({'x',''})
Traceback (most recent call last):
...
AssertionError
>>> fromSet({'x',1})
Traceback (most recent call last):
...
AssertionError
"""
if len( vs ) == 0:
return ""
elif len( vs ) == 1:
v = vs.pop()
assert isinstance( v, basestring ) and v
return v
else:
assert len( vs ) <= 256
assert all( isinstance( v, basestring ) and v for v in vs )
return list(vs)
def toList( vs ):
"""
:param vs: list[str] | str
:return: list[str] | []
Lists are not guaranteed to be in their original order, so they are sorted based on a prefixed string.
>>> toList(["000x", "001y", "002z"])
['x', 'y', 'z']
Instead of a List of length 1, a single String will be returned by SimpleDB.
A single element is can only have 1 order, no need to sort.
>>> toList("x")
['x']
An empty list is serialized as ""
>>> toList("")
[]
"""
if isinstance( vs, basestring ):
return [ vs ] if vs else [ ]
else:
return [ v[ sort_prefix_length: ] for v in sorted( vs ) ]
def fromList( vs ):
"""
:type vs: list[str]
:rtype str|list[str]
Empty lists becomes empty string
>>> fromList([])
''
Singleton list becomes its sole element
>>> fromList(['x'])
'x'
Lists elements are prefixed with their position because lists don't retain their order in SDB
>>> fromList(['x','y'])
['000x', '001y']
Only lists with non-empty strings are allowed
>>> fromList([''])
Traceback (most recent call last):
...
AssertionError
>>> fromList(['x',''])
Traceback (most recent call last):
...
AssertionError
>>> fromList(['x',1])
Traceback (most recent call last):
...
AssertionError
"""
if len( vs ) == 0:
return ''
elif len( vs ) == 1:
v = vs[ 0 ]
assert isinstance( v, basestring ) and v
return v
else:
assert len( vs ) <= 256
assert all( isinstance( v, basestring ) and v for v in vs )
return [ str( i ).zfill( sort_prefix_length ) + v for i, v in enumerate( vs ) ]
def passThrough( v ): return v
def skip( _ ): return None
class AWSJob( BatchJob ):
"""
A Batchjob that can be converted to and from a SimpleDB Item
"""
fromItemTransform = defaultdict( lambda: passThrough,
predecessorNumber=int,
memory=float,
disk=float,
cpu=float,
updateID=str,
command=toNoneable,
stack=lambda v:map( literal_eval, toList( v )),
jobsToDelete=toList,
predecessorsFinished=toSet,
remainingRetryCount=int,
logJobStoreFileID=toNoneable )
@classmethod
def fromItem( cls, item, jobStoreID=None ):
"""
:type item: Item
:rtype: AWSJob
"""
if jobStoreID is None: jobStoreID = item.name
try:
del item[ 'parentJobStoreID' ]
except KeyError:
pass
item = { k: cls.fromItemTransform[ k ]( v ) for k, v in item.iteritems( ) }
return cls( jobStoreID=jobStoreID, **item )
toItemTransform = defaultdict( lambda: passThrough,
command=fromNoneable,
jobStoreID=skip,
updateID=str,
children=skip,
stack=lambda v: fromList( map( repr, v ) ),
logJobStoreFileID=fromNoneable,
predecessorsFinished=fromSet,
jobsToDelete=fromList ,
predecessorNumber=str,
remainingRetryCount=str)
def toItem( self, parentJobStoreID=None ):
"""
:rtype: Item
"""
item = self.toDict( )
if parentJobStoreID is not None:
item[ 'parentJobStoreID' ] = parentJobStoreID
item = ((k, self.toItemTransform[ k ]( v )) for k, v in item.iteritems( ))
return { k: v for k, v in item if v is not None }
# FIXME: This was lifted from cgcloud-lib where we use it for EC2 retries. The only difference
# FIXME: ... between that code and this is the name of the exception.
a_short_time = 5
a_long_time = 60 * 60
def no_such_domain( e ):
return e.error_code.endswith( 'NoSuchDomain' )
def true( _ ):
return True
def false( _ ):
return False
def retry_sdb( retry_after=a_short_time,
retry_for=10 * a_short_time,
retry_while=no_such_domain ):
"""
Retry an SDB operation while the failure matches a given predicate and until a given timeout
expires, waiting a given amount of time in between attempts. This function is a generator
that yields contextmanagers. See doctests below for example usage.
:param retry_after: the delay in seconds between attempts
:param retry_for: the timeout in seconds.
:param retry_while: a callable with one argument, an instance of SDBResponseError, returning
True if another attempt should be made or False otherwise
:return: a generator yielding contextmanagers
Retry for a limited amount of time:
>>> i = 0
>>> for attempt in retry_sdb( retry_after=0, retry_for=.1, retry_while=true ):
... with attempt:
... i += 1
... raise SDBResponseError( 'foo', 'bar' )
Traceback (most recent call last):
...
SDBResponseError: SDBResponseError: foo bar
<BLANKLINE>
>>> i > 1
True
Do exactly one attempt:
>>> i = 0
>>> for attempt in retry_sdb( retry_for=0 ):
... with attempt:
... i += 1
... raise SDBResponseError( 'foo', 'bar' )
Traceback (most recent call last):
...
SDBResponseError: SDBResponseError: foo bar
<BLANKLINE>
>>> i
1
Don't retry on success
>>> i = 0
>>> for attempt in retry_sdb( retry_after=0, retry_for=.1, retry_while=true ):
... with attempt:
... i += 1
>>> i
1
Don't retry on unless condition returns
>>> i = 0
>>> for attempt in retry_sdb( retry_after=0, retry_for=.1, retry_while=false ):
... with attempt:
... i += 1
... raise SDBResponseError( 'foo', 'bar' )
Traceback (most recent call last):
...
SDBResponseError: SDBResponseError: foo bar
<BLANKLINE>
>>> i
1
"""
if retry_for > 0:
go = [ None ]
@contextmanager
def repeated_attempt( ):
try:
yield
except SDBResponseError as e:
if time.time( ) + retry_after < expiration:
if retry_while( e ):
log.info( '... got %s, trying again in %is ...', e.error_code, retry_after )
time.sleep( retry_after )
else:
log.info( 'Exception failed predicate, giving up.' )
raise
else:
log.info( 'Retry timeout expired, giving up.' )
raise
else:
go.pop( )
expiration = time.time( ) + retry_for
while go:
yield repeated_attempt( )
else:
@contextmanager
def single_attempt( ):
yield
yield single_attempt( )
|
|
# Copyright 2012 Nicira, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from lxml import etree
import mock
import os_vif
from os_vif import exception as osv_exception
from os_vif import objects as osv_objects
from os_vif.objects import fields as osv_fields
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils.fixture import uuidsentinel as uuids
from nova import exception
from nova.network import model as network_model
from nova import objects
from nova.pci import utils as pci_utils
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.fixtures import libvirt as fakelibvirt
from nova.tests.unit.virt import fakelibosinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import vif
CONF = cfg.CONF
class LibvirtVifTestCase(test.NoDBTestCase):
gateway_bridge_4 = network_model.IP(address='101.168.1.1', type='gateway')
dns_bridge_4 = network_model.IP(address='8.8.8.8', type=None)
ips_bridge_4 = [network_model.IP(address='101.168.1.9', type=None)]
subnet_bridge_4 = network_model.Subnet(
cidr='101.168.1.0/24',
dns=[dns_bridge_4],
gateway=gateway_bridge_4,
routes=None,
dhcp_server='191.168.1.1')
gateway_bridge_6 = network_model.IP(address='101:1db9::1', type='gateway')
subnet_bridge_6 = network_model.Subnet(cidr='101:1db9::/64',
dns=None,
gateway=gateway_bridge_6,
ips=None,
routes=None)
network_bridge = network_model.Network(id=uuids.network,
bridge='br0',
label=None,
subnets=[subnet_bridge_4, subnet_bridge_6],
bridge_interface='eth0',
vlan=99, mtu=9000)
vif_bridge = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_BRIDGE,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=None)
network_bridge_neutron = network_model.Network(id=uuids.network,
bridge=None,
label=None,
subnets=[subnet_bridge_4, subnet_bridge_6],
bridge_interface='eth0',
vlan=99)
vif_bridge_neutron = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_bridge_neutron,
type=None,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=uuids.ovs)
network_ovs = network_model.Network(id=uuids.network,
bridge='br0',
label=None,
subnets=[subnet_bridge_4, subnet_bridge_6],
bridge_interface=None,
vlan=99, mtu=1000)
network_ivs = network_model.Network(id=uuids.network,
bridge='br0',
label=None,
subnets=[subnet_bridge_4, subnet_bridge_6],
bridge_interface=None,
vlan=99)
vif_agilio_ovs = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_AGILIO_OVS,
details={'port_filter': False},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=uuids.ovs)
vif_agilio_ovs_direct = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_AGILIO_OVS,
vnic_type=network_model.VNIC_TYPE_DIRECT,
ovs_interfaceid=uuids.ovs,
devname='tap-xxx-yyy-zzz',
profile={'pci_slot': '0000:0a:00.1'})
vif_agilio_ovs_forwarder = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_AGILIO_OVS,
vnic_type=network_model.VNIC_TYPE_VIRTIO_FORWARDER,
profile={'pci_slot': '0000:0a:00.1'},
details={
network_model.VIF_DETAILS_VHOSTUSER_MODE: 'client',
network_model.VIF_DETAILS_VHOSTUSER_SOCKET: '/tmp/usv-xxx-yyy-zzz',
network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: True},
ovs_interfaceid=uuids.ovs, mtu=1500)
vif_ovs = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_OVS,
details={'port_filter': False},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=uuids.ovs)
vif_ovs_direct = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_ovs,
vnic_type=network_model.VNIC_TYPE_DIRECT,
profile={'pci_slot': '0000:0a:00.1'},
type=network_model.VIF_TYPE_OVS,
details={'port_filter': False},
ovs_interfaceid=uuids.ovs)
vif_ovs_filter_cap = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_OVS,
details={'port_filter': True},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=uuids.ovs,
delegate_create=True)
vif_ovs_legacy = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=None,
devname=None,
ovs_interfaceid=None)
vif_ivs = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_ivs,
type=network_model.VIF_TYPE_IVS,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=uuids.ovs)
vif_none = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=None,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=None)
network_8021 = network_model.Network(id=uuids.network,
bridge=None,
label=None,
subnets=[subnet_bridge_4,
subnet_bridge_6],
interface='eth0',
vlan=99)
vif_8021qbh = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_802_QBH,
vnic_type=network_model.VNIC_TYPE_DIRECT,
ovs_interfaceid=None,
details={
network_model.VIF_DETAILS_PROFILEID:
'MyPortProfile'},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_hw_veb = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_HW_VEB,
vnic_type=network_model.VNIC_TYPE_DIRECT,
ovs_interfaceid=None,
details={
network_model.VIF_DETAILS_VLAN: 100},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_hw_veb_arq = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_HW_VEB,
vnic_type=network_model.VNIC_TYPE_ACCELERATOR_DIRECT,
ovs_interfaceid=None,
details={
network_model.VIF_DETAILS_VLAN: 100},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_hw_veb_trusted = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_HW_VEB,
vnic_type=network_model.VNIC_TYPE_DIRECT,
ovs_interfaceid=None,
details={
network_model.VIF_DETAILS_VLAN: 100},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1',
'trusted': 'True'})
vif_hostdev_physical = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_HOSTDEV,
vnic_type=network_model.VNIC_TYPE_DIRECT_PHYSICAL,
ovs_interfaceid=None,
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_hw_veb_macvtap = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_HW_VEB,
vnic_type=network_model.VNIC_TYPE_MACVTAP,
ovs_interfaceid=None,
details={network_model.VIF_DETAILS_VLAN: 100},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_8021qbg = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_802_QBG,
ovs_interfaceid=None,
qbg_params=network_model.VIF8021QbgParams(
managerid="xxx-yyy-zzz",
typeid="aaa-bbb-ccc",
typeidversion="1",
instanceid="ddd-eee-fff"))
network_midonet = network_model.Network(id=uuids.network,
label=None,
bridge=None,
subnets=[subnet_bridge_4],
interface='eth0')
network_vrouter = network_model.Network(id=uuids.network,
label=None,
bridge=None,
subnets=[subnet_bridge_4, subnet_bridge_6],
interface='eth0')
vif_vrouter = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_vrouter,
type=network_model.VIF_TYPE_VROUTER,
devname='tap-xxx-yyy-zzz')
vif_vrouter_direct = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_vrouter,
type=network_model.VIF_TYPE_VROUTER,
vnic_type=network_model.VNIC_TYPE_DIRECT,
profile={'pci_slot': '0000:0a:00.1'},
devname='tap-xxx-yyy-zzz')
vif_vrouter_forwarder = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_vrouter,
type=network_model.VIF_TYPE_VROUTER,
vnic_type=network_model.VNIC_TYPE_VIRTIO_FORWARDER,
profile={'pci_slot': '0000:0a:00.1'},
details={
network_model.VIF_DETAILS_VHOSTUSER_MODE: 'server',
network_model.VIF_DETAILS_VHOSTUSER_SOCKET: '/tmp/usv-xxx-yyy-zzz',
network_model.VIF_DETAILS_VHOSTUSER_VROUTER_PLUG: True})
vif_contrail_vrouter = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_vrouter,
type=network_model.VIF_TYPE_VHOSTUSER,
details={
network_model.VIF_DETAILS_VHOSTUSER_MODE: 'server',
network_model.VIF_DETAILS_VHOSTUSER_SOCKET: '/tmp/usv-xxx-yyy-zzz',
network_model.VIF_DETAILS_VHOSTUSER_VROUTER_PLUG: True})
vif_ib_hostdev = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_IB_HOSTDEV,
vnic_type=network_model.VNIC_TYPE_DIRECT,
ovs_interfaceid=None,
details={network_model.VIF_DETAILS_VLAN: 100},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_midonet = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_midonet,
type=network_model.VIF_TYPE_MIDONET,
devname='tap-xxx-yyy-zzz')
vif_tap = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
type=network_model.VIF_TYPE_TAP,
devname='tap-xxx-yyy-zzz')
vif_iovisor = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_IOVISOR,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=None)
vif_vhostuser = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_VHOSTUSER,
details={
network_model.VIF_DETAILS_VHOSTUSER_MODE: 'client',
network_model.VIF_DETAILS_VHOSTUSER_SOCKET: '/tmp/vif-xxx-yyy-zzz'
})
vif_vhostuser_ovs = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_VHOSTUSER,
details={
network_model.VIF_DETAILS_VHOSTUSER_MODE: 'client',
network_model.VIF_DETAILS_VHOSTUSER_SOCKET: '/tmp/usv-xxx-yyy-zzz',
network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: True},
ovs_interfaceid=uuids.ovs, mtu=1500)
vif_vhostuser_no_path = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_VHOSTUSER,
details={network_model.VIF_DETAILS_VHOSTUSER_MODE: 'client'})
vif_macvtap_vlan = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_MACVTAP,
details={
network_model.VIF_DETAILS_VLAN: 1,
network_model.VIF_DETAILS_PHYS_INTERFACE: 'eth0',
network_model.VIF_DETAILS_MACVTAP_SOURCE: 'eth0.1',
network_model.VIF_DETAILS_MACVTAP_MODE: 'vepa'})
vif_macvtap_flat = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_MACVTAP,
details={
network_model.VIF_DETAILS_PHYS_INTERFACE: 'eth0',
network_model.VIF_DETAILS_MACVTAP_SOURCE: 'eth0',
network_model.VIF_DETAILS_MACVTAP_MODE: 'bridge'})
vif_macvtap_exception = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_MACVTAP)
instance = objects.Instance(id=1,
uuid='f0000000-0000-0000-0000-000000000001',
project_id=723)
flavor_1vcpu = objects.Flavor(vcpus=1, memory=512, root_gb=1)
flavor_2vcpu = objects.Flavor(vcpus=2, memory=512, root_gb=1)
bandwidth = {
'quota:vif_inbound_peak': '200',
'quota:vif_outbound_peak': '20',
'quota:vif_inbound_average': '100',
'quota:vif_outbound_average': '10',
'quota:vif_inbound_burst': '300',
'quota:vif_outbound_burst': '30'
}
def setup_os_vif_objects(self):
self.os_vif_network = osv_objects.network.Network(
id="b82c1929-051e-481d-8110-4669916c7915",
label="Demo Net",
subnets=osv_objects.subnet.SubnetList(
objects=[]),
mtu=9000)
self.os_vif_bridge = osv_objects.vif.VIFBridge(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
plugin="linux_bridge",
vif_name="nicdc065497-3c",
bridge_name="br100",
has_traffic_filtering=False,
network=self.os_vif_network)
self.os_vif_ovs_prof = osv_objects.vif.VIFPortProfileOpenVSwitch(
interface_id="07bd6cea-fb37-4594-b769-90fc51854ee9",
profile_id="fishfood",
create_port=True)
self.os_vif_repr_prof = osv_objects.vif.VIFPortProfileOVSRepresentor(
interface_id="07bd6cea-fb37-4594-b769-90fc51854ee9",
profile_id="fishfood",
representor_name='nicdc065497-3c',
representor_address='0000:0a:00.1')
self.os_vif_agilio_ovs = osv_objects.vif.VIFOpenVSwitch(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
plugin="agilio_ovs",
vif_name="nicdc065497-3c",
bridge_name="br0",
port_profile=self.os_vif_ovs_prof,
network=self.os_vif_network)
self.os_vif_agilio_forwarder = osv_objects.vif.VIFVHostUser(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
plugin="agilio_ovs",
vif_name="nicdc065497-3c",
path='/var/run/openvswitch/vhudc065497-3c',
mode='client',
port_profile=self.os_vif_repr_prof,
network=self.os_vif_network)
self.os_vif_agilio_direct = osv_objects.vif.VIFHostDevice(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
plugin="agilio_ovs",
vif_name="nicdc065497-3c",
dev_type=osv_fields.VIFHostDeviceDevType.ETHERNET,
dev_address='0000:0a:00.1',
port_profile=self.os_vif_repr_prof,
network=self.os_vif_network)
self.os_vif_ovs = osv_objects.vif.VIFOpenVSwitch(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
unplugin="linux_bridge",
vif_name="nicdc065497-3c",
bridge_name="br0",
port_profile=self.os_vif_ovs_prof,
network=self.os_vif_network)
self.os_vif_ovs_hybrid = osv_objects.vif.VIFBridge(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
unplugin="linux_bridge",
vif_name="nicdc065497-3c",
bridge_name="br0",
port_profile=self.os_vif_ovs_prof,
has_traffic_filtering=False,
network=self.os_vif_network)
self.os_vif_vhostuser = osv_objects.vif.VIFVHostUser(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
plugin="openvswitch",
vif_name="vhudc065497-3c",
path='/var/run/openvswitch/vhudc065497-3c',
mode='client',
port_profile=self.os_vif_ovs_prof,
network=self.os_vif_network)
self.os_vif_hostdevice_ethernet = osv_objects.vif.VIFHostDevice(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
plugin="linux_bridge",
vif_name="nicdc065497-3c",
dev_type=osv_fields.VIFHostDeviceDevType.ETHERNET,
dev_address='0000:0a:00.1',
network=self.os_vif_network)
self.os_vif_hostdevice_generic = osv_objects.vif.VIFHostDevice(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
plugin="linux_bridge",
vif_name="nicdc065497-3c",
dev_type=osv_fields.VIFHostDeviceDevType.GENERIC,
dev_address='0000:0a:00.1',
network=self.os_vif_network)
self.os_vif_inst_info = osv_objects.instance_info.InstanceInfo(
uuid="d5b1090c-9e00-4fa4-9504-4b1494857970",
name="instance-000004da",
project_id="2f37d7f6-e51a-4a1f-8b6e-b0917ffc8390")
def setUp(self):
super(LibvirtVifTestCase, self).setUp()
self.useFixture(nova_fixtures.LibvirtFixture(stub_os_vif=False))
# os_vif.initialize is typically done in nova-compute startup
os_vif.initialize()
self.setup_os_vif_objects()
# multiqueue configuration is host OS specific
_a = mock.patch('os.uname')
self.mock_uname = _a.start()
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '5.10.13-200-generic', '', 'x86_64')
self.addCleanup(_a.stop)
def _get_node(self, xml):
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
return ret[0]
def _assertMacEquals(self, node, vif):
mac = node.find("mac").get("address")
self.assertEqual(mac, vif['address'])
def _assertTypeEquals(self, node, type, attr, source, br_want):
self.assertEqual(node.get("type"), type)
br_name = node.find(attr).get(source)
self.assertEqual(br_name, br_want)
def _assertTypeAndMacEquals(self, node, type, attr, source, vif,
br_want=None):
self._assertTypeEquals(node, type, attr, source, br_want)
self._assertMacEquals(node, vif)
def _assertModel(self, xml, model_want=None, driver_want=None):
node = self._get_node(xml)
if model_want is None:
ret = node.findall("model")
self.assertEqual(len(ret), 0)
else:
model = node.find("model").get("type")
self.assertEqual(model, model_want)
if driver_want is None:
ret = node.findall("driver")
self.assertEqual(len(ret), 0)
else:
driver = node.find("driver").get("name")
self.assertEqual(driver, driver_want)
def _assertTypeAndPciEquals(self, node, type, vif):
self.assertEqual(node.get("type"), type)
self._assertPciEqual(node, vif, type="pci")
def _assertPciEqual(self, node, vif, type=None):
address = node.find("source").find("address")
if type:
addr_type = address.get("type")
self.assertEqual(type, addr_type)
pci_slot = "%(domain)s:%(bus)s:%(slot)s.%(func)s" % {
'domain': address.get("domain")[2:],
'bus': address.get("bus")[2:],
'slot': address.get("slot")[2:],
'func': address.get("function")[2:]}
pci_slot_want = vif['profile']['pci_slot']
self.assertEqual(pci_slot, pci_slot_want)
def _assertQueueSizeEquals(self, node, rx_want, tx_want):
rx_queue_size = node.find("driver").get("rx_queue_size")
tx_queue_size = node.find("driver").get("tx_queue_size")
self.assertEqual(rx_queue_size, rx_want)
self.assertEqual(tx_queue_size, tx_want)
def _assertXmlEqual(self, expectedXmlstr, actualXmlstr):
if not isinstance(actualXmlstr, str):
actualXmlstr = etree.tostring(actualXmlstr, encoding='unicode',
pretty_print=True)
self.assertXmlEqual(expectedXmlstr, actualXmlstr)
def _get_conf(self):
conf = vconfig.LibvirtConfigGuest()
conf.virt_type = "qemu"
conf.name = "fake-name"
conf.uuid = "fake-uuid"
conf.memory = 100 * 1024
conf.vcpus = 4
return conf
def _get_instance_xml(self, driver, vif, image_meta=None, flavor=None):
if flavor is None:
flavor = objects.Flavor(name='m1.small',
memory_mb=128,
vcpus=1,
root_gb=0,
ephemeral_gb=0,
swap=0,
extra_specs=dict(self.bandwidth),
deleted_at=None,
deleted=0,
created_at=None, flavorid=1,
is_public=True, vcpu_weight=None,
id=2, disabled=False, rxtx_factor=1.0)
if image_meta is None:
image_meta = objects.ImageMeta.from_dict({})
conf = self._get_conf()
nic = driver.get_config(self.instance, vif, image_meta,
flavor, CONF.libvirt.virt_type)
# TODO(stephenfin): There doesn't appear to be any reason we should do
# this: just return 'nic.to_xml()' and remove '_get_node'
conf.add_device(nic)
return conf.to_xml()
def _test_virtio_multiqueue(self, vcpus, want_queues):
self.flags(use_virtio_for_bridges=True,
virt_type='kvm',
group='libvirt')
flavor = objects.Flavor(name='m1.small',
memory_mb=128,
vcpus=vcpus,
root_gb=0,
ephemeral_gb=0,
swap=0,
deleted_at=None,
deleted=0,
created_at=None, flavorid=1,
is_public=True, vcpu_weight=None,
id=2, disabled=False, rxtx_factor=1.0,
extra_specs={})
d = vif.LibvirtGenericVIFDriver()
image_meta = objects.ImageMeta.from_dict(
{'properties': {'hw_vif_model': 'virtio',
'hw_vif_multiqueue_enabled': 'true'}})
xml = self._get_instance_xml(d, self.vif_bridge,
image_meta, flavor)
node = self._get_node(xml)
driver = node.find("driver").get("name")
self.assertEqual(driver, 'vhost')
queues = node.find("driver").get("queues")
self.assertEqual(queues, want_queues)
def test_virtio_multiqueue(self):
self._test_virtio_multiqueue(4, '4')
def test_virtio_multiqueue_in_kernel_2(self):
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '2.6.32-21-generic', '', '')
self._test_virtio_multiqueue(10, '1')
def test_virtio_multiqueue_in_kernel_3(self):
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '3.19.0-47-generic', '', '')
self._test_virtio_multiqueue(10, '8')
def test_virtio_multiqueue_in_kernel_4(self):
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '4.2.0-35-generic', '', '')
self._test_virtio_multiqueue(10, '10')
def test_virtio_multiqueue_in_kernel_2_max_queues(self):
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '2.6.32-21-generic', '', '')
self.flags(max_queues=2, group='libvirt')
self._test_virtio_multiqueue(10, '2')
def test_virtio_multiqueue_in_kernel_3_max_queues(self):
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '3.19.0-47-generic', '', '')
self.flags(max_queues=2, group='libvirt')
self._test_virtio_multiqueue(10, '2')
def test_virtio_multiqueue_in_kernel_4_max_queues(self):
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '4.2.0-35-generic', '', '')
self.flags(max_queues=2, group='libvirt')
self._test_virtio_multiqueue(10, '2')
def test_vhostuser_os_vif_multiqueue(self):
d = vif.LibvirtGenericVIFDriver()
image_meta = objects.ImageMeta.from_dict(
{'properties': {'hw_vif_model': 'virtio',
'hw_vif_multiqueue_enabled': 'true'}})
flavor = objects.Flavor(
id=2,
name='m1.small',
memory_mb=128,
vcpus=4,
root_gb=0,
ephemeral_gb=0,
swap=0,
deleted_at=None,
deleted=0,
created_at=None, flavorid=1,
is_public=True, vcpu_weight=None,
disabled=False,
extra_specs={})
conf = d.get_base_config(None, 'ca:fe:de:ad:be:ef', image_meta,
flavor, 'kvm', 'normal')
self.assertEqual(4, conf.vhost_queues)
self.assertEqual('vhost', conf.driver_name)
d._set_config_VIFVHostUser(self.instance, self.os_vif_vhostuser, conf)
self.assertEqual(4, conf.vhost_queues)
self.assertIsNone(conf.driver_name)
def _test_virtio_config_queue_sizes(
self, vnic_type=network_model.VNIC_TYPE_NORMAL):
self.flags(rx_queue_size=512, group='libvirt')
self.flags(tx_queue_size=1024, group='libvirt')
v = vif.LibvirtGenericVIFDriver()
conf = v.get_base_config(
None, 'ca:fe:de:ad:be:ef', {},
objects.Flavor(vcpus=2), 'kvm', vnic_type)
return v, conf
def test_virtio_vhost_queue_sizes(self):
_, conf = self._test_virtio_config_queue_sizes()
self.assertEqual(512, conf.vhost_rx_queue_size)
self.assertIsNone(conf.vhost_tx_queue_size)
def test_virtio_vhost_queue_sizes_vnic_type_direct(self):
_, conf = self._test_virtio_config_queue_sizes(
vnic_type=network_model.VNIC_TYPE_DIRECT)
self.assertIsNone(conf.vhost_rx_queue_size)
self.assertIsNone(conf.vhost_tx_queue_size)
def test_virtio_vhost_queue_sizes_vnic_type_direct_physical(self):
_, conf = self._test_virtio_config_queue_sizes(
vnic_type=network_model.VNIC_TYPE_DIRECT_PHYSICAL)
self.assertIsNone(conf.vhost_rx_queue_size)
self.assertIsNone(conf.vhost_tx_queue_size)
def test_virtio_vhost_queue_sizes_vnic_type_macvtap(self):
_, conf = self._test_virtio_config_queue_sizes(
vnic_type=network_model.VNIC_TYPE_MACVTAP)
self.assertEqual(512, conf.vhost_rx_queue_size)
self.assertIsNone(conf.vhost_tx_queue_size)
def test_virtio_vhost_queue_sizes_vnic_type_virtio_forwarder(self):
_, conf = self._test_virtio_config_queue_sizes(
vnic_type=network_model.VNIC_TYPE_VIRTIO_FORWARDER)
self.assertEqual(512, conf.vhost_rx_queue_size)
self.assertIsNone(conf.vhost_tx_queue_size)
def test_virtio_vhostuser_osvif_queue_sizes(self):
v, conf = self._test_virtio_config_queue_sizes()
v._set_config_VIFVHostUser(self.instance, self.os_vif_vhostuser, conf)
self.assertEqual(512, conf.vhost_rx_queue_size)
self.assertEqual(1024, conf.vhost_tx_queue_size)
def test_multiple_nics(self):
conf = self._get_conf()
# Tests multiple nic configuration and that target_dev is
# set for each
nics = [{'net_type': 'bridge',
'mac_addr': '00:00:00:00:00:0b',
'source_dev': 'b_source_dev',
'target_dev': 'b_target_dev'},
{'net_type': 'ethernet',
'mac_addr': '00:00:00:00:00:0e',
'source_dev': 'e_source_dev',
'target_dev': 'e_target_dev'},
{'net_type': 'direct',
'mac_addr': '00:00:00:00:00:0d',
'source_dev': 'd_source_dev',
'target_dev': 'd_target_dev'}]
for nic in nics:
nic_conf = vconfig.LibvirtConfigGuestInterface()
nic_conf.net_type = nic['net_type']
nic_conf.target_dev = nic['target_dev']
nic_conf.mac_addr = nic['mac_addr']
nic_conf.source_dev = nic['source_dev']
conf.add_device(nic_conf)
xml = conf.to_xml()
doc = etree.fromstring(xml)
for nic in nics:
path = "./devices/interface/[@type='%s']" % nic['net_type']
node = doc.find(path)
self.assertEqual(nic['net_type'], node.get("type"))
self.assertEqual(nic['mac_addr'],
node.find("mac").get("address"))
self.assertEqual(nic['target_dev'],
node.find("target").get("dev"))
def test_model_novirtio(self):
self.flags(use_virtio_for_bridges=False,
virt_type='kvm',
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_bridge)
self._assertModel(xml)
def test_model_kvm(self):
self.flags(use_virtio_for_bridges=True,
virt_type='kvm',
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_bridge)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
def test_model_parallels(self):
self.flags(use_virtio_for_bridges=True,
virt_type='parallels',
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_bridge)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
def test_model_kvm_qemu_parallels_custom(self):
for virt in ('kvm', 'qemu', 'parallels'):
self.flags(use_virtio_for_bridges=True,
virt_type=virt,
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
if virt == 'parallels':
supported = (network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000)
elif virt == 'qemu':
supported = (network_model.VIF_MODEL_LAN9118,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_E1000E,
network_model.VIF_MODEL_SPAPR_VLAN,
network_model.VIF_MODEL_VMXNET3)
else:
supported = (network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_E1000E,
network_model.VIF_MODEL_SPAPR_VLAN,
network_model.VIF_MODEL_VMXNET3)
for model in supported:
image_meta = objects.ImageMeta.from_dict(
{'properties': {'hw_vif_model': model}})
xml = self._get_instance_xml(d, self.vif_bridge,
image_meta)
self._assertModel(xml, model)
@mock.patch.object(vif.designer, 'set_vif_guest_frontend_config')
def test_model_with_osinfo(self, mock_set):
self.flags(use_virtio_for_bridges=True,
virt_type='kvm',
group='libvirt')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.osinfo.libosinfo',
fakelibosinfo))
d = vif.LibvirtGenericVIFDriver()
image_meta = {'properties': {'os_name': 'fedora22'}}
image_meta = objects.ImageMeta.from_dict(image_meta)
flavor = objects.Flavor(
id=2,
name='m1.small',
memory_mb=128,
vcpus=4,
root_gb=0,
ephemeral_gb=0,
swap=0,
deleted_at=None,
deleted=0,
created_at=None, flavorid=1,
is_public=True, vcpu_weight=None,
disabled=False,
extra_specs={})
d.get_base_config(None, 'ca:fe:de:ad:be:ef', image_meta,
flavor, 'kvm', 'normal')
mock_set.assert_called_once_with(mock.ANY, 'ca:fe:de:ad:be:ef',
'virtio', None, None, None)
@mock.patch.object(vif.designer, 'set_vif_guest_frontend_config',
wraps=vif.designer.set_vif_guest_frontend_config)
def _test_model_sriov(self, vinc_type, mock_set):
"""Direct attach vNICs shouldn't retrieve info from image_meta."""
self.flags(use_virtio_for_bridges=True,
virt_type='kvm',
group='libvirt')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.osinfo.libosinfo',
fakelibosinfo))
d = vif.LibvirtGenericVIFDriver()
image_meta = objects.ImageMeta.from_dict(
{'properties': {'hw_vif_model': 'virtio'}})
conf = d.get_base_config(None, 'ca:fe:de:ad:be:ef', image_meta,
None, 'kvm', vinc_type)
mock_set.assert_called_once_with(mock.ANY, 'ca:fe:de:ad:be:ef',
None, None, None, None)
self.assertIsNone(conf.vhost_queues)
self.assertIsNone(conf.driver_name)
self.assertIsNone(conf.model)
def test_model_sriov_direct(self):
self._test_model_sriov(network_model.VNIC_TYPE_DIRECT)
def test_model_accelerator_direct(self):
self._test_model_sriov(network_model.VNIC_TYPE_ACCELERATOR_DIRECT)
def test_model_qemu(self):
vif_objs = [
self.vif_bridge,
self.vif_8021qbg,
self.vif_iovisor,
self.vif_ovs,
]
self.flags(use_virtio_for_bridges=True,
virt_type='qemu',
group='libvirt')
for vif_obj in vif_objs:
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, vif_obj)
doc = etree.fromstring(xml)
bandwidth = doc.find('./devices/interface/bandwidth')
self.assertIsNotNone(bandwidth)
inbound = bandwidth.find('inbound')
self.assertEqual(inbound.get("average"),
self.bandwidth['quota:vif_inbound_average'])
self.assertEqual(inbound.get("peak"),
self.bandwidth['quota:vif_inbound_peak'])
self.assertEqual(inbound.get("burst"),
self.bandwidth['quota:vif_inbound_burst'])
outbound = bandwidth.find('outbound')
self.assertEqual(outbound.get("average"),
self.bandwidth['quota:vif_outbound_average'])
self.assertEqual(outbound.get("peak"),
self.bandwidth['quota:vif_outbound_peak'])
self.assertEqual(outbound.get("burst"),
self.bandwidth['quota:vif_outbound_burst'])
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO, "qemu")
def test_generic_driver_none(self):
d = vif.LibvirtGenericVIFDriver()
self.assertRaises(exception.NovaException,
self._get_instance_xml,
d,
self.vif_none)
def _check_bridge_driver(self, d, vif, br_want):
xml = self._get_instance_xml(d, vif)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
self.vif_bridge, br_want)
def test_generic_driver_bridge(self):
d = vif.LibvirtGenericVIFDriver()
self._check_bridge_driver(d,
self.vif_bridge,
self.vif_bridge['network']['bridge'])
@mock.patch.object(pci_utils, 'get_ifname_by_pci_address')
@mock.patch.object(pci_utils, 'get_vf_num_by_pci_address', return_value=1)
@mock.patch('nova.privsep.linux_net.set_device_macaddr')
@mock.patch('nova.privsep.linux_net.set_device_macaddr_and_vlan')
def _test_hw_veb_op(self, op, vlan, mock_set_macaddr_and_vlan,
mock_set_macaddr, mock_get_vf_num,
mock_get_ifname):
mock_get_ifname.side_effect = ['eth1', 'eth13']
vlan_id = int(vlan)
port_state = 'up' if vlan_id > 0 else 'down'
mac = ('00:00:00:00:00:00' if op.__name__ == 'unplug'
else self.vif_hw_veb_macvtap['address'])
calls = {
'get_ifname':
[mock.call(self.vif_hw_veb_macvtap['profile']['pci_slot'],
pf_interface=True),
mock.call(self.vif_hw_veb_macvtap['profile']['pci_slot'])],
'get_vf_num':
[mock.call(self.vif_hw_veb_macvtap['profile']['pci_slot'])],
'set_macaddr': [mock.call('eth13', mac, port_state=port_state)]
}
op(self.instance, self.vif_hw_veb_macvtap)
mock_get_ifname.assert_has_calls(calls['get_ifname'])
mock_get_vf_num.assert_has_calls(calls['get_vf_num'])
mock_set_macaddr.assert_has_calls(calls['set_macaddr'])
mock_set_macaddr_and_vlan.assert_called_once_with(
'eth1', 1, mock.ANY, vlan_id)
def test_plug_hw_veb(self):
d = vif.LibvirtGenericVIFDriver()
details = self.vif_hw_veb_macvtap['details']
vlan = details[network_model.VIF_DETAILS_VLAN]
self._test_hw_veb_op(d.plug, vlan)
def test_unplug_hw_veb(self):
d = vif.LibvirtGenericVIFDriver()
self._test_hw_veb_op(d.unplug, 0)
@mock.patch('nova.virt.libvirt.vif.set_vf_trusted')
def test_plug_hw_veb_trusted(self, mset_vf_trusted):
d = vif.LibvirtGenericVIFDriver()
d.plug(self.instance, self.vif_hw_veb_trusted)
mset_vf_trusted.assert_called_once_with('0000:0a:00.1', True)
@mock.patch('nova.virt.libvirt.vif.set_vf_trusted')
def test_unplug_hw_veb_trusted(self, mset_vf_trusted):
d = vif.LibvirtGenericVIFDriver()
d.unplug(self.instance, self.vif_hw_veb_trusted)
mset_vf_trusted.assert_called_once_with('0000:0a:00.1', False)
@mock.patch('nova.privsep.libvirt.unplug_plumgrid_vif',
side_effect=processutils.ProcessExecutionError)
def test_unplug_iovisor(self, mock_unplug):
d = vif.LibvirtGenericVIFDriver()
d.unplug(self.instance, self.vif_iovisor)
@mock.patch('nova.privsep.linux_net.device_exists')
@mock.patch('nova.privsep.libvirt.plug_plumgrid_vif')
@mock.patch('nova.privsep.linux_net.create_tap_dev')
def test_plug_iovisor(self, mock_create_tap_dev, mock_plug, device_exists):
device_exists.return_value = True
d = vif.LibvirtGenericVIFDriver()
d.plug(self.instance, self.vif_iovisor)
mock_plug.assert_has_calls(
[mock.call('tap-xxx-yyy-zzz',
self.vif_iovisor['id'],
self.vif_iovisor['address'],
self.vif_iovisor['network']['id'],
self.instance.project_id)])
def test_direct_plug_with_port_filter_cap_no_nova_firewall(self):
d = vif.LibvirtGenericVIFDriver()
br_want = self.vif_midonet['devname']
xml = self._get_instance_xml(d, self.vif_ovs_filter_cap)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_ovs_filter_cap, br_want)
def test_ib_hostdev_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_ib_hostdev)
doc = etree.fromstring(xml)
node = doc.findall('./devices/hostdev')[0]
self.assertEqual(1, len(node))
self._assertPciEqual(node, self.vif_ib_hostdev)
def test_midonet_ethernet_vif_driver(self):
d = vif.LibvirtGenericVIFDriver()
br_want = self.vif_midonet['devname']
xml = self._get_instance_xml(d, self.vif_midonet)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_midonet, br_want)
def test_tap_ethernet_vif_driver(self):
d = vif.LibvirtGenericVIFDriver()
br_want = self.vif_tap['devname']
xml = self._get_instance_xml(d, self.vif_tap)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_tap, br_want)
@mock.patch('nova.privsep.linux_net.device_exists', return_value=True)
@mock.patch('nova.privsep.linux_net.set_device_mtu')
@mock.patch('nova.privsep.linux_net.create_tap_dev')
def test_plug_tap_kvm_virtio(
self, mock_create_tap_dev, mock_set_mtu, mock_device_exists):
d1 = vif.LibvirtGenericVIFDriver()
ins = objects.Instance(
id=1, uuid='f0000000-0000-0000-0000-000000000001',
image_ref=uuids.image_ref, flavor=self.flavor_2vcpu,
project_id=723, system_metadata={}
)
d1.plug(ins, self.vif_tap)
mock_create_tap_dev.assert_called_once_with(
'tap-xxx-yyy-zzz', None, multiqueue=False)
mock_create_tap_dev.reset_mock()
d2 = vif.LibvirtGenericVIFDriver()
mq_ins = objects.Instance(
id=1, uuid='f0000000-0000-0000-0000-000000000001',
image_ref=uuids.image_ref, flavor=self.flavor_2vcpu,
system_metadata={
'image_hw_vif_multiqueue_enabled': 'True'
},
)
d2.plug(mq_ins, self.vif_tap)
mock_create_tap_dev.assert_called_once_with(
'tap-xxx-yyy-zzz', None, multiqueue=True)
@mock.patch('nova.privsep.linux_net.device_exists', return_value=True)
@mock.patch('nova.privsep.linux_net.set_device_mtu')
@mock.patch('nova.privsep.linux_net.create_tap_dev')
def test_plug_tap_mq_ignored_1vcpu(
self, mock_create_tap_dev, mock_set_mtu, mock_device_exists):
d1 = vif.LibvirtGenericVIFDriver()
mq_ins = objects.Instance(
id=1, uuid='f0000000-0000-0000-0000-000000000001',
image_ref=uuids.image_ref, flavor=self.flavor_1vcpu,
project_id=723, system_metadata={
'image_hw_vif_multiqueue_enabled': 'True',
}
)
d1.plug(mq_ins, self.vif_tap)
mock_create_tap_dev.assert_called_once_with(
'tap-xxx-yyy-zzz', None, multiqueue=False)
@mock.patch('nova.privsep.linux_net.device_exists', return_value=True)
@mock.patch('nova.privsep.linux_net.set_device_mtu')
@mock.patch('nova.privsep.linux_net.create_tap_dev')
def test_plug_tap_mq_ignored_virt_type(
self, mock_create_tap_dev, mock_set_mtu, mock_device_exists):
self.flags(use_virtio_for_bridges=True,
virt_type='lxc',
group='libvirt')
d1 = vif.LibvirtGenericVIFDriver()
ins = objects.Instance(
id=1, uuid='f0000000-0000-0000-0000-000000000001',
image_ref=uuids.image_ref, flavor=self.flavor_2vcpu,
project_id=723,
system_metadata={
'image_hw_vif_multiqueue_enabled': 'True'
},
)
d1.plug(ins, self.vif_tap)
mock_create_tap_dev.assert_called_once_with(
'tap-xxx-yyy-zzz', None, multiqueue=False)
@mock.patch('nova.privsep.linux_net.device_exists', return_value=True)
@mock.patch('nova.privsep.linux_net.set_device_mtu')
@mock.patch('nova.privsep.linux_net.create_tap_dev')
def test_plug_tap_mq_ignored_vif_model(
self, mock_create_tap_dev, mock_set_mtu, mock_device_exists):
d1 = vif.LibvirtGenericVIFDriver()
ins = objects.Instance(
id=1, uuid='f0000000-0000-0000-0000-000000000001',
image_ref=uuids.image_ref, flavor=self.flavor_2vcpu,
project_id=723,
system_metadata={
'image_hw_vif_multiqueue_enabled': 'True',
'image_hw_vif_model': 'e1000',
},
)
d1.plug(ins, self.vif_tap)
mock_create_tap_dev.assert_called_once_with(
'tap-xxx-yyy-zzz', None, multiqueue=False)
def test_unplug_tap(self):
d = vif.LibvirtGenericVIFDriver()
d.unplug(self.instance, self.vif_tap)
def test_generic_8021qbh_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_8021qbh)
node = self._get_node(xml)
self._assertTypeAndPciEquals(node, "hostdev", self.vif_8021qbh)
self._assertMacEquals(node, self.vif_8021qbh)
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "802.1Qbh")
profile_id_found = False
for p_elem in vp.findall("parameters"):
details = self.vif_8021qbh["details"]
profile_id = p_elem.get("profileid", None)
if profile_id:
self.assertEqual(profile_id,
details[network_model.VIF_DETAILS_PROFILEID])
profile_id_found = True
self.assertTrue(profile_id_found)
def test_hw_veb_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_hw_veb)
node = self._get_node(xml)
self._assertTypeAndPciEquals(node, "hostdev", self.vif_hw_veb)
self._assertMacEquals(node, self.vif_hw_veb)
conf = vconfig.LibvirtConfigGuestInterface()
conf.parse_dom(node)
self.assertEqual(conf.vlan, self.vif_hw_veb["details"]["vlan"])
def test_hostdev_physical_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_hostdev_physical)
doc = etree.fromstring(xml)
node = doc.findall('./devices/hostdev')[0]
self.assertEqual(1, len(node))
self._assertPciEqual(node, self.vif_hostdev_physical)
@mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
return_value='eth1')
def test_hw_veb_driver_macvtap(self, mock_get_ifname):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_hw_veb_macvtap)
node = self._get_node(xml)
self.assertEqual(node.get("type"), "direct")
self._assertTypeEquals(node, "direct", "source",
"dev", "eth1")
self._assertTypeEquals(node, "direct", "source",
"mode", "passthrough")
self._assertMacEquals(node, self.vif_hw_veb_macvtap)
vlan = node.find("vlan").find("tag").get("id")
vlan_want = self.vif_hw_veb["details"]["vlan"]
self.assertEqual(int(vlan), vlan_want)
def test_driver_macvtap_vlan(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_macvtap_vlan)
node = self._get_node(xml)
self.assertEqual(node.get("type"), "direct")
self._assertTypeEquals(node, "direct", "source",
"dev", "eth0.1")
self._assertTypeEquals(node, "direct", "source",
"mode", "vepa")
self._assertMacEquals(node, self.vif_macvtap_vlan)
def test_driver_macvtap_flat(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_macvtap_flat)
node = self._get_node(xml)
self.assertEqual(node.get("type"), "direct")
self._assertTypeEquals(node, "direct", "source",
"dev", "eth0")
self._assertTypeEquals(node, "direct", "source",
"mode", "bridge")
self._assertMacEquals(node, self.vif_macvtap_flat)
def test_driver_macvtap_exception(self):
d = vif.LibvirtGenericVIFDriver()
e = self.assertRaises(exception.VifDetailsMissingMacvtapParameters,
self._get_instance_xml,
d,
self.vif_macvtap_exception)
self.assertIn('macvtap_source', str(e))
self.assertIn('macvtap_mode', str(e))
self.assertIn('physical_interface', str(e))
def test_hw_veb_driver_arq(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_hw_veb_arq)
node = self._get_node(xml)
self._assertTypeAndPciEquals(node, "hostdev", self.vif_hw_veb_arq)
self._assertMacEquals(node, self.vif_hw_veb_arq)
conf = vconfig.LibvirtConfigGuestInterface()
conf.parse_dom(node)
self.assertEqual(conf.vlan, self.vif_hw_veb_arq["details"]["vlan"])
@mock.patch('nova.virt.libvirt.vif.ensure_vlan')
def test_macvtap_plug_vlan(self, ensure_vlan_mock):
d = vif.LibvirtGenericVIFDriver()
d.plug(self.instance, self.vif_macvtap_vlan)
ensure_vlan_mock.assert_called_once_with(1, 'eth0', interface='eth0.1')
@mock.patch('nova.virt.libvirt.vif.ensure_vlan')
def test_macvtap_plug_flat(self, ensure_vlan_mock):
d = vif.LibvirtGenericVIFDriver()
d.plug(self.instance, self.vif_macvtap_flat)
self.assertFalse(ensure_vlan_mock.called)
def test_generic_iovisor_driver(self):
d = vif.LibvirtGenericVIFDriver()
br_want = self.vif_iovisor['devname']
xml = self._get_instance_xml(d, self.vif_iovisor)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_iovisor, br_want)
def test_generic_8021qbg_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_8021qbg)
node = self._get_node(xml)
self._assertTypeEquals(node, "direct", "source", "dev", "eth0")
self._assertMacEquals(node, self.vif_8021qbg)
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "802.1Qbg")
manager_id_found = False
type_id_found = False
typeversion_id_found = False
instance_id_found = False
for p_elem in vp.findall("parameters"):
wantparams = self.vif_8021qbg['qbg_params']
manager_id = p_elem.get("managerid", None)
type_id = p_elem.get("typeid", None)
typeversion_id = p_elem.get("typeidversion", None)
instance_id = p_elem.get("instanceid", None)
if manager_id:
self.assertEqual(manager_id,
wantparams['managerid'])
manager_id_found = True
if type_id:
self.assertEqual(type_id,
wantparams['typeid'])
type_id_found = True
if typeversion_id:
self.assertEqual(typeversion_id,
wantparams['typeidversion'])
typeversion_id_found = True
if instance_id:
self.assertEqual(instance_id,
wantparams['instanceid'])
instance_id_found = True
self.assertTrue(manager_id_found)
self.assertTrue(type_id_found)
self.assertTrue(typeversion_id_found)
self.assertTrue(instance_id_found)
def test_vhostuser_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_vhostuser)
node = self._get_node(xml)
self.assertEqual(node.get("type"),
network_model.VIF_TYPE_VHOSTUSER)
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "mode", "client")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "path", "/tmp/vif-xxx-yyy-zzz")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "type", "unix")
self._assertMacEquals(node, self.vif_vhostuser)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
def test_vhostuser_driver_queue_sizes(self):
self.flags(rx_queue_size=512, group='libvirt')
self.flags(tx_queue_size=1024, group='libvirt')
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_vhostuser)
node = self._get_node(xml)
self._assertQueueSizeEquals(node, "512", "1024")
def test_vhostuser_driver_no_path(self):
d = vif.LibvirtGenericVIFDriver()
self.assertRaises(exception.VifDetailsMissingVhostuserSockPath,
self._get_instance_xml,
d,
self.vif_vhostuser_no_path)
def test_vhostuser_driver_ovs(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d,
self.vif_vhostuser_ovs)
node = self._get_node(xml)
self.assertEqual(node.get("type"),
network_model.VIF_TYPE_VHOSTUSER)
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "mode", "client")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "path", "/tmp/usv-xxx-yyy-zzz")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "type", "unix")
self._assertMacEquals(node, self.vif_vhostuser_ovs)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
def test_ovs_direct(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_ovs_direct)
node = self._get_node(xml)
self._assertTypeAndPciEquals(node,
"hostdev",
self.vif_ovs_direct)
self._assertMacEquals(node, self.vif_ovs_direct)
def test_agilio_ovs_direct(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_agilio_ovs_direct)
node = self._get_node(xml)
self._assertTypeAndPciEquals(node,
"hostdev",
self.vif_agilio_ovs_direct)
self._assertMacEquals(node, self.vif_agilio_ovs_direct)
def test_agilio_ovs_forwarder(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d,
self.vif_agilio_ovs_forwarder)
node = self._get_node(xml)
self.assertEqual(node.get("type"),
network_model.VIF_TYPE_VHOSTUSER)
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "mode", "client")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "path", "/tmp/usv-xxx-yyy-zzz")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "type", "unix")
self._assertMacEquals(node, self.vif_agilio_ovs_forwarder)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
def test_ivs_ethernet_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_ivs)
node = self._get_node(xml)
dev_want = self.vif_ivs['devname']
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_ivs, dev_want)
script = node.find("script")
self.assertIsNone(script)
def test_vrouter(self):
"""Test for the Contrail / Tungsten Fabric kernel datapath."""
d = vif.LibvirtGenericVIFDriver()
dev_want = self.vif_vrouter['devname']
xml = self._get_instance_xml(d, self.vif_vrouter)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_vrouter, dev_want)
def test_vrouter_direct(self):
"""Test for Contrail / Tungsten Fabric direct offloaded datapath."""
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_vrouter_direct)
node = self._get_node(xml)
self._assertTypeAndPciEquals(node,
"hostdev",
self.vif_vrouter_direct)
self._assertMacEquals(node, self.vif_vrouter_direct)
def test_vrouter_forwarder(self):
"""Test for Contrail / Tungsten Fabric indirect offloaded datapath."""
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d,
self.vif_vrouter_forwarder)
node = self._get_node(xml)
self.assertEqual(node.get("type"),
network_model.VIF_TYPE_VHOSTUSER)
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "mode", "server")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "path", "/tmp/usv-xxx-yyy-zzz")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "type", "unix")
self._assertMacEquals(node, self.vif_vrouter_forwarder)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
def test_contrail_vrouter(self):
"""Test for the Contrail / Tungsten Fabric DPDK datapath."""
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d,
self.vif_contrail_vrouter)
node = self._get_node(xml)
self.assertEqual(node.get("type"),
network_model.VIF_TYPE_VHOSTUSER)
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "mode", "server")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "path", "/tmp/usv-xxx-yyy-zzz")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "type", "unix")
self._assertMacEquals(node, self.vif_contrail_vrouter)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
@mock.patch.object(os_vif, "plug")
def _test_osvif_plug(self, fail, mock_plug,
mock_convert_vif, mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_bridge
mock_convert_inst.return_value = self.os_vif_inst_info
d = vif.LibvirtGenericVIFDriver()
if fail:
mock_plug.side_effect = osv_exception.ExceptionBase("Wibble")
self.assertRaises(exception.NovaException,
d.plug,
self.instance, self.vif_bridge)
else:
d.plug(self.instance, self.vif_bridge)
mock_plug.assert_called_once_with(self.os_vif_bridge,
self.os_vif_inst_info)
def test_osvif_plug_normal(self):
self._test_osvif_plug(False)
def test_osvif_plug_fail(self):
self._test_osvif_plug(True)
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
@mock.patch.object(os_vif, "unplug")
def _test_osvif_unplug(self, fail, mock_unplug,
mock_convert_vif, mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_bridge
mock_convert_inst.return_value = self.os_vif_inst_info
d = vif.LibvirtGenericVIFDriver()
if fail:
mock_unplug.side_effect = osv_exception.ExceptionBase("Wibble")
self.assertRaises(exception.NovaException,
d.unplug,
self.instance, self.vif_bridge)
else:
d.unplug(self.instance, self.vif_bridge)
mock_unplug.assert_called_once_with(self.os_vif_bridge,
self.os_vif_inst_info)
def test_osvif_unplug_normal(self):
self._test_osvif_unplug(False)
def test_osvif_unplug_fail(self):
self._test_osvif_unplug(True)
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def _test_config_os_vif(self, os_vif_model, vif_model,
expected_xml, mock_convert_vif,
mock_convert_inst):
mock_convert_vif.return_value = os_vif_model
mock_convert_inst.return_value = self.os_vif_inst_info
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, vif_model)
node = self._get_node(xml)
node_xml = etree.tostring(node).decode()
self._assertXmlEqual(expected_xml, node_xml)
def test_config_os_vif_bridge(self):
os_vif_type = self.os_vif_bridge
vif_type = self.vif_bridge
expected_xml = """
<interface type="bridge">
<mac address="22:52:25:62:e2:aa"/>
<model type="virtio"/>
<source bridge="br100"/>
<mtu size="9000"/>
<target dev="nicdc065497-3c"/>
<bandwidth>
<inbound average="100" peak="200" burst="300"/>
<outbound average="10" peak="20" burst="30"/>
</bandwidth>
</interface>"""
self._test_config_os_vif(os_vif_type, vif_type, expected_xml)
def test_config_os_vif_bridge_nofw(self):
os_vif_type = self.os_vif_bridge
vif_type = self.vif_bridge
expected_xml = """
<interface type="bridge">
<mac address="22:52:25:62:e2:aa"/>
<model type="virtio"/>
<source bridge="br100"/>
<mtu size="9000"/>
<target dev="nicdc065497-3c"/>
<bandwidth>
<inbound average="100" peak="200" burst="300"/>
<outbound average="10" peak="20" burst="30"/>
</bandwidth>
</interface>"""
self._test_config_os_vif(os_vif_type, vif_type, expected_xml)
def test_config_os_vif_vhostuser(self):
os_vif_type = self.os_vif_vhostuser
vif_type = self.vif_vhostuser
expected_xml = """
<interface type="vhostuser">
<mac address="22:52:25:62:e2:aa"/>
<model type="virtio"/>
<source mode="client"
path="/var/run/openvswitch/vhudc065497-3c" type="unix"/>
<target dev="vhudc065497-3c"/>
</interface>"""
self._test_config_os_vif(os_vif_type, vif_type, expected_xml)
def test_config_os_vif_agilio_ovs_fallthrough(self):
os_vif_type = self.os_vif_agilio_ovs
vif_type = self.vif_agilio_ovs
expected_xml = """
<interface type="ethernet">
<mac address="22:52:25:62:e2:aa"/>
<model type="virtio"/>
<mtu size="9000"/>
<target dev="nicdc065497-3c"/>
<bandwidth>
<inbound average="100" peak="200" burst="300"/>
<outbound average="10" peak="20" burst="30"/>
</bandwidth>
</interface>"""
self._test_config_os_vif(os_vif_type, vif_type, expected_xml)
def test_config_os_vif_agilio_ovs_forwarder(self):
os_vif_type = self.os_vif_agilio_forwarder
vif_type = self.vif_agilio_ovs_forwarder
expected_xml = """
<interface type="vhostuser">
<mac address="22:52:25:62:e2:aa"/>
<model type="virtio"/>
<source mode="client"
path="/var/run/openvswitch/vhudc065497-3c" type="unix"/>
<target dev="nicdc065497-3c"/>
</interface>"""
self._test_config_os_vif(os_vif_type, vif_type, expected_xml)
def test_config_os_vif_agilio_ovs_direct(self):
os_vif_type = self.os_vif_agilio_direct
vif_type = self.vif_agilio_ovs_direct
expected_xml = """
<interface type="hostdev" managed="yes">
<mac address="22:52:25:62:e2:aa"/>
<source>
<address type="pci" domain="0x0000"
bus="0x0a" slot="0x00" function="0x1"/>
</source>
</interface>"""
self._test_config_os_vif(os_vif_type, vif_type, expected_xml)
def test_config_os_vif_ovs(self):
os_vif_type = self.os_vif_ovs
vif_type = self.vif_ovs
expected_xml = """
<interface type="ethernet">
<mac address="22:52:25:62:e2:aa"/>
<model type="virtio"/>
<mtu size="9000"/>
<target dev="nicdc065497-3c"/>
<bandwidth>
<inbound average="100" peak="200" burst="300"/>
<outbound average="10" peak="20" burst="30"/>
</bandwidth>
</interface>"""
self._test_config_os_vif(os_vif_type, vif_type, expected_xml)
def test_config_os_vif_ovs_hybrid(self):
os_vif_type = self.os_vif_ovs_hybrid
vif_type = self.vif_ovs
expected_xml = """
<interface type="bridge">
<mac address="22:52:25:62:e2:aa"/>
<model type="virtio"/>
<source bridge="br0"/>
<mtu size="9000"/>
<target dev="nicdc065497-3c"/>
<bandwidth>
<inbound average="100" peak="200" burst="300"/>
<outbound average="10" peak="20" burst="30"/>
</bandwidth>
</interface>"""
self._test_config_os_vif(os_vif_type, vif_type, expected_xml)
def test_config_os_vif_hostdevice_ethernet(self):
os_vif_type = self.os_vif_hostdevice_ethernet
vif_type = self.vif_bridge
expected_xml = """
<interface type="hostdev" managed="yes">
<mac address="22:52:25:62:e2:aa"/>
<source>
<address type="pci" domain="0x0000"
bus="0x0a" slot="0x00" function="0x1"/>
</source>
</interface>"""
self._test_config_os_vif(os_vif_type, vif_type, expected_xml)
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def test_config_os_vif_hostdevice_generic(self, mock_convert_vif,
mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_hostdevice_generic
mock_convert_inst.return_value = self.os_vif_inst_info
d = vif.LibvirtGenericVIFDriver()
self.assertRaises(exception.InternalError,
self._get_instance_xml, d, self.vif_bridge)
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Nova common internal object model"""
import contextlib
import datetime
import functools
import traceback
import netaddr
import oslo_messaging as messaging
from oslo_utils import versionutils
from oslo_versionedobjects import base as ovoo_base
from oslo_versionedobjects import exception as ovoo_exc
import six
from nova import exception
from nova import objects
from nova.objects import fields as obj_fields
from nova import utils
def get_attrname(name):
"""Return the mangled name of the attribute's underlying storage."""
# FIXME(danms): This is just until we use o.vo's class properties
# and object base.
return '_obj_' + name
class NovaObjectRegistry(ovoo_base.VersionedObjectRegistry):
def registration_hook(self, cls, index):
# NOTE(danms): This is called when an object is registered,
# and is responsible for maintaining nova.objects.$OBJECT
# as the highest-versioned implementation of a given object.
version = versionutils.convert_version_to_tuple(cls.VERSION)
if not hasattr(objects, cls.obj_name()):
setattr(objects, cls.obj_name(), cls)
else:
cur_version = versionutils.convert_version_to_tuple(
getattr(objects, cls.obj_name()).VERSION)
if version >= cur_version:
setattr(objects, cls.obj_name(), cls)
remotable_classmethod = ovoo_base.remotable_classmethod
remotable = ovoo_base.remotable
class NovaObject(ovoo_base.VersionedObject):
"""Base class and object factory.
This forms the base of all objects that can be remoted or instantiated
via RPC. Simply defining a class that inherits from this base class
will make it remotely instantiatable. Objects should implement the
necessary "get" classmethod routines as well as "save" object methods
as appropriate.
"""
OBJ_SERIAL_NAMESPACE = 'nova_object'
OBJ_PROJECT_NAMESPACE = 'nova'
# NOTE(ndipanov): This is nova-specific
@staticmethod
def should_migrate_data():
"""A check that can be used to inhibit online migration behavior
This is usually used to check if all services that will be accessing
the db directly are ready for the new format.
"""
raise NotImplementedError()
# NOTE(danms): This has some minor change between the nova and o.vo
# version, so avoid inheriting it for the moment so we can make that
# transition separately for clarity.
def obj_reset_changes(self, fields=None, recursive=False):
"""Reset the list of fields that have been changed.
.. note::
- This is NOT "revert to previous values"
- Specifying fields on recursive resets will only be honored at the
top level. Everything below the top will reset all.
:param fields: List of fields to reset, or "all" if None.
:param recursive: Call obj_reset_changes(recursive=True) on
any sub-objects within the list of fields
being reset.
"""
if recursive:
for field in self.obj_get_changes():
# Ignore fields not in requested set (if applicable)
if fields and field not in fields:
continue
# Skip any fields that are unset
if not self.obj_attr_is_set(field):
continue
value = getattr(self, field)
# Don't reset nulled fields
if value is None:
continue
# Reset straight Object and ListOfObjects fields
if isinstance(self.fields[field], obj_fields.ObjectField):
value.obj_reset_changes(recursive=True)
elif isinstance(self.fields[field],
obj_fields.ListOfObjectsField):
for thing in value:
thing.obj_reset_changes(recursive=True)
if fields:
self._changed_fields -= set(fields)
else:
self._changed_fields.clear()
# NOTE(danms): This is nova-specific
@contextlib.contextmanager
def obj_alternate_context(self, context):
original_context = self._context
self._context = context
try:
yield
finally:
self._context = original_context
# NOTE(danms): This is nova-specific
@contextlib.contextmanager
def obj_as_admin(self):
"""Context manager to make an object call as an admin.
This temporarily modifies the context embedded in an object to
be elevated() and restores it after the call completes. Example
usage:
with obj.obj_as_admin():
obj.save()
"""
if self._context is None:
raise exception.OrphanedObjectError(method='obj_as_admin',
objtype=self.obj_name())
original_context = self._context
self._context = self._context.elevated()
try:
yield
finally:
self._context = original_context
class NovaObjectDictCompat(ovoo_base.VersionedObjectDictCompat):
def __iter__(self):
for name in self.obj_fields:
if (self.obj_attr_is_set(name) or
name in self.obj_extra_fields):
yield name
def keys(self):
return list(self)
class NovaTimestampObject(object):
"""Mixin class for db backed objects with timestamp fields.
Sqlalchemy models that inherit from the oslo_db TimestampMixin will include
these fields and the corresponding objects will benefit from this mixin.
"""
fields = {
'created_at': obj_fields.DateTimeField(nullable=True),
'updated_at': obj_fields.DateTimeField(nullable=True),
}
class NovaPersistentObject(object):
"""Mixin class for Persistent objects.
This adds the fields that we use in common for most persistent objects.
"""
fields = {
'created_at': obj_fields.DateTimeField(nullable=True),
'updated_at': obj_fields.DateTimeField(nullable=True),
'deleted_at': obj_fields.DateTimeField(nullable=True),
'deleted': obj_fields.BooleanField(default=False),
}
class ObjectListBase(ovoo_base.ObjectListBase):
# NOTE(danms): These are for transition to using the oslo
# base object and can be removed when we move to it.
@classmethod
def _obj_primitive_key(cls, field):
return 'nova_object.%s' % field
@classmethod
def _obj_primitive_field(cls, primitive, field,
default=obj_fields.UnspecifiedDefault):
key = cls._obj_primitive_key(field)
if default == obj_fields.UnspecifiedDefault:
return primitive[key]
else:
return primitive.get(key, default)
class NovaObjectSerializer(messaging.NoOpSerializer):
"""A NovaObject-aware Serializer.
This implements the Oslo Serializer interface and provides the
ability to serialize and deserialize NovaObject entities. Any service
that needs to accept or return NovaObjects as arguments or result values
should pass this to its RPCClient and RPCServer objects.
"""
@property
def conductor(self):
if not hasattr(self, '_conductor'):
from nova import conductor
self._conductor = conductor.API()
return self._conductor
def _process_object(self, context, objprim):
try:
objinst = NovaObject.obj_from_primitive(objprim, context=context)
except ovoo_exc.IncompatibleObjectVersion:
objver = objprim['nova_object.version']
if objver.count('.') == 2:
# NOTE(danms): For our purposes, the .z part of the version
# should be safe to accept without requiring a backport
objprim['nova_object.version'] = \
'.'.join(objver.split('.')[:2])
return self._process_object(context, objprim)
objname = objprim['nova_object.name']
version_manifest = ovoo_base.obj_tree_get_versions(objname)
if objname in version_manifest:
objinst = self.conductor.object_backport_versions(
context, objprim, version_manifest)
else:
raise
return objinst
def _process_iterable(self, context, action_fn, values):
"""Process an iterable, taking an action on each value.
:param:context: Request context
:param:action_fn: Action to take on each item in values
:param:values: Iterable container of things to take action on
:returns: A new container of the same type (except set) with
items from values having had action applied.
"""
iterable = values.__class__
if issubclass(iterable, dict):
return iterable(**{k: action_fn(context, v)
for k, v in six.iteritems(values)})
else:
# NOTE(danms, gibi) A set can't have an unhashable value inside,
# such as a dict. Convert the set to list, which is fine, since we
# can't send them over RPC anyway. We convert it to list as this
# way there will be no semantic change between the fake rpc driver
# used in functional test and a normal rpc driver.
if iterable == set:
iterable = list
return iterable([action_fn(context, value) for value in values])
def serialize_entity(self, context, entity):
if isinstance(entity, (tuple, list, set, dict)):
entity = self._process_iterable(context, self.serialize_entity,
entity)
elif (hasattr(entity, 'obj_to_primitive') and
callable(entity.obj_to_primitive)):
entity = entity.obj_to_primitive()
return entity
def deserialize_entity(self, context, entity):
if isinstance(entity, dict) and 'nova_object.name' in entity:
entity = self._process_object(context, entity)
elif isinstance(entity, (tuple, list, set, dict)):
entity = self._process_iterable(context, self.deserialize_entity,
entity)
return entity
def obj_to_primitive(obj):
"""Recursively turn an object into a python primitive.
A NovaObject becomes a dict, and anything that implements ObjectListBase
becomes a list.
"""
if isinstance(obj, ObjectListBase):
return [obj_to_primitive(x) for x in obj]
elif isinstance(obj, NovaObject):
result = {}
for key in obj.obj_fields:
if obj.obj_attr_is_set(key) or key in obj.obj_extra_fields:
result[key] = obj_to_primitive(getattr(obj, key))
return result
elif isinstance(obj, netaddr.IPAddress):
return str(obj)
elif isinstance(obj, netaddr.IPNetwork):
return str(obj)
else:
return obj
def obj_make_dict_of_lists(context, list_cls, obj_list, item_key):
"""Construct a dictionary of object lists, keyed by item_key.
:param:context: Request context
:param:list_cls: The ObjectListBase class
:param:obj_list: The list of objects to place in the dictionary
:param:item_key: The object attribute name to use as a dictionary key
"""
obj_lists = {}
for obj in obj_list:
key = getattr(obj, item_key)
if key not in obj_lists:
obj_lists[key] = list_cls()
obj_lists[key].objects = []
obj_lists[key].objects.append(obj)
for key in obj_lists:
obj_lists[key]._context = context
obj_lists[key].obj_reset_changes()
return obj_lists
def obj_make_list(context, list_obj, item_cls, db_list, **extra_args):
"""Construct an object list from a list of primitives.
This calls item_cls._from_db_object() on each item of db_list, and
adds the resulting object to list_obj.
:param:context: Request context
:param:list_obj: An ObjectListBase object
:param:item_cls: The NovaObject class of the objects within the list
:param:db_list: The list of primitives to convert to objects
:param:extra_args: Extra arguments to pass to _from_db_object()
:returns: list_obj
"""
list_obj.objects = []
for db_item in db_list:
item = item_cls._from_db_object(context, item_cls(), db_item,
**extra_args)
list_obj.objects.append(item)
list_obj._context = context
list_obj.obj_reset_changes()
return list_obj
def serialize_args(fn):
"""Decorator that will do the arguments serialization before remoting."""
def wrapper(obj, *args, **kwargs):
args = [utils.strtime(arg) if isinstance(arg, datetime.datetime)
else arg for arg in args]
for k, v in six.iteritems(kwargs):
if k == 'exc_val' and v:
kwargs[k] = six.text_type(v)
elif k == 'exc_tb' and v and not isinstance(v, six.string_types):
kwargs[k] = ''.join(traceback.format_tb(v))
elif isinstance(v, datetime.datetime):
kwargs[k] = utils.strtime(v)
if hasattr(fn, '__call__'):
return fn(obj, *args, **kwargs)
# NOTE(danms): We wrap a descriptor, so use that protocol
return fn.__get__(None, obj)(*args, **kwargs)
# NOTE(danms): Make this discoverable
wrapper.remotable = getattr(fn, 'remotable', False)
wrapper.original_fn = fn
return (functools.wraps(fn)(wrapper) if hasattr(fn, '__call__')
else classmethod(wrapper))
def obj_equal_prims(obj_1, obj_2, ignore=None):
"""Compare two primitives for equivalence ignoring some keys.
This operation tests the primitives of two objects for equivalence.
Object primitives may contain a list identifying fields that have been
changed - this is ignored in the comparison. The ignore parameter lists
any other keys to be ignored.
:param:obj1: The first object in the comparison
:param:obj2: The second object in the comparison
:param:ignore: A list of fields to ignore
:returns: True if the primitives are equal ignoring changes
and specified fields, otherwise False.
"""
def _strip(prim, keys):
if isinstance(prim, dict):
for k in keys:
prim.pop(k, None)
for v in prim.values():
_strip(v, keys)
if isinstance(prim, list):
for v in prim:
_strip(v, keys)
return prim
if ignore is not None:
keys = ['nova_object.changes'] + ignore
else:
keys = ['nova_object.changes']
prim_1 = _strip(obj_1.obj_to_primitive(), keys)
prim_2 = _strip(obj_2.obj_to_primitive(), keys)
return prim_1 == prim_2
|
|
import numpy as np
from scipy.special import erf
#--> The lnlikes
#-> The lnlike calculated with generalized chi square
#------------------------------------------------------------------------------#
# Note:
# When I compare the two different methods to compare the upperlimits, the
# results are surprisingly consistent with each other. Both of them could
# obtain the reasonable posterier probability distribution and I cannot tell
# which one is better than the other.
#------------------------------------------------------------------------------#
#The generalized chi-square function with Sawicki (2012)'s method.
def ChiSq_0(data, model, unct=None, flag=None):
'''
This is a generalized chi-square function that allows y to be upperlimits. The
upper limits are properly deal with using the method mentioned by Sawicki (2012).
Parameters
----------
data : float array
The observed data and upperlimits.
model : float array
The model.
unct : float array or Nobe by default
The uncertainties.
flag : float array or None by default
The flag of upperlimits, 0 for detection and 1 for upperlimits.
Returns
-------
chsq : float
The Chi square
Notes
-----
This chi-square form consider the x and y asymmetrically except for some special
situations.
'''
if unct is None:
unct = np.ones_like(data)
if flag is None:
flag = np.zeros_like(data)
fltr_dtc = flag == 0
fltr_non = flag == 1
if np.sum(fltr_dtc)>0:
wrsd_dtc = (data[fltr_dtc] - model[fltr_dtc])/unct[fltr_dtc] #The weighted residual
chsq_dtc = np.sum(wrsd_dtc**2) + np.sum( np.log(2 * np.pi * unct[fltr_dtc]**2.0) )
else:
chsq_dtc = 0.
if np.sum(fltr_non)>0:
unct_non = unct[fltr_non]
wrsd_non = (data[fltr_non] - model[fltr_non])/(unct_non * 2**0.5)
chsq_non = np.sum( -2.* np.log( 0.5 * (1 + erf(wrsd_non)) ) )
else:
chsq_non = 0.
chsq = chsq_dtc + chsq_non
return chsq
#The generalized chi-square function with simple Gaussian method.
def ChiSq_1(data, model, unct=None, flag=None):
'''
This is a generalized chi-square function that allows y to be upperlimits.
It contributes zero to the chi square that the model is below the upperlimits,
while it contributes as the normal detected points whtn the model is above
the upperlimits.
Parameters
----------
data : float array
The observed data and upperlimits.
model : float array
The model.
unct : float array or Nobe by default
The uncertainties.
flag : float array or None by default
The flag of upperlimits, 0 for detection and 1 for upperlimits.
Returns
-------
chsq : float
The Chi square
Notes
-----
This chi-square form consider the x and y asymmetrically except for some special
situations.
'''
if unct is None:
unct = np.ones_like(data)
if flag is None:
flag = np.zeros_like(data)
fltr_dtc = flag == 0
fltr_non = flag == 1
if np.sum(fltr_dtc)>0:
wrsd_dtc = (data[fltr_dtc] - model[fltr_dtc])/unct[fltr_dtc] #The weighted residual
chsq_dtc = np.sum(wrsd_dtc**2) + np.sum( np.log(2 * np.pi * unct[fltr_dtc]**2) )
else:
chsq_dtc = 0.
if np.sum(fltr_non)>0:
data_non = data[fltr_non]
model_non = model[fltr_non]
unct_non = unct[fltr_non]
wrsd_non = np.zeros_like(data_non)
#Only the when the model is above the upperlimit, it contributes to the chi square.
fltr = model_non > data_non
wrsd_non[fltr] = (model_non[fltr] - data_non[fltr]) / unct_non[fltr]
chsq_non = np.sum(wrsd_non**2) + np.sum( np.log(2 * np.pi * unct_non[fltr]**2) )
else:
chsq_non = 0.
chsq = chsq_dtc + chsq_non
return chsq
def lnlike_gcs(theta, x, y, xerr, yerr, fix_m=None, fix_b=None, *args, **kwargs):
"""
The ln of likelihood function use the generalized chi-square function. The y
of the data could be upperlimits.
Parameters
----------
theta : list
The list of the model parameters, [m, b, lnf (optional)].
x : float array
The data of x.
y : float array
The data of y.
xerr : float array
The uncertainty of the x data.
yerr : float array
The uncertainty of the y data.
fix_m : (Optional) float
Fix the value of m into the given value.
fix_b : (Optional) float
Fix the value of b into the given value.
args and kwargs : for the ChiSq function.
Returns
-------
The ln likelihood.
Notes
-----
The lnf here we use mainly as the epsy0 of Nukers method instead of the
fraction of model uncertainty. With this treatment, the best-fit result is
close to the Nukers result and we can deal with the upperlimits with this
function.
"""
lenPar = len(theta)
parDict = {}
nFix = 0
if not fix_m is None:
parDict["m"] = fix_m
nFix += 1
if not fix_b is None:
parDict["b"] = fix_b
nFix += 1
fixList = parDict.keys()
nUse = 0
if (lenPar + nFix) == 2:
if "m" in fixList:
m = parDict["m"]
else:
m = theta[nUse]
nUse += 1
if "b" in fixList:
b = parDict["b"]
else:
b = theta[nUse]
model = m * x + b
s = np.sqrt(yerr**2 + (m*xerr)**2)
elif (lenPar + nFix) == 3:
if "m" in fixList:
m = parDict["m"]
else:
m = theta[nUse]
nUse += 1
if "b" in fixList:
b = parDict["b"]
else:
b = theta[nUse]
nUse += 1
model = m * x + b
lnf = theta[nUse]
#s = np.sqrt(yerr**2 + (m*xerr)**2 + model**2*np.exp(2*lnf))
s = np.sqrt(yerr**2 + (m*xerr)**2 + np.exp(2*lnf))
lnL = -0.5 * ChiSq_0(y, model, s, *args, **kwargs)
return lnL
#-> The Nukers' lnlike
def lnlike_Nukers(theta, x, y, epsx, epsy):
"""
This is the ln likelihood function resembling the Nukers' estimate from
Tremaine et al. (2002). One of the merits of this form is that the x and y
are symmetric (see the paper for the details). The symbols of the parameters
also follow the paper.
Parameters
----------
theta : list
The list of model parameters, [beta, alpha, epsy0 (optional)].
beta is the slope, alpha is the intercept and epsy0 is the intrinsic
scatter along the y direction.
x : float array
The x data.
y : float array
The y data.
epsx : float array
The uncertainty of x data.
epsy : float array
The uncertainty of y data.
Returns
-------
lnL : float
The ln of the likelihood.
Notes
-----
The lnlike penalizes the very broad intrinsic dispersion assuming it is a
Gaussian distribution. Therefore, the optimization is to seek the maximum of
the lnlike instead of the Nukers' estimate ~1.
"""
if len(theta) == 2:
beta, alpha = theta
inv_sigma2 = 1.0/(epsy**2 + (beta * epsx)**2)
if len(theta) == 3:
beta, alpha, epsy0 = theta
inv_sigma2 = 1.0/(epsy**2 + epsy0**2 + (beta * epsx)**2)
lnL = -0.5*(np.sum((y - alpha - beta * x)**2*inv_sigma2 - np.log(inv_sigma2)))
return lnL
#-> The lnlike calculated from the distance perpendicular to the line following
#Hogg et al. (2010; arXiv:1008.4686)
def lnlike_perp(theta, x, y, sigx, sigy):
"""
This is the ln likelihood function considering the 2-dimensional uncertainties
and calculated based on the distance of the points perpendicular to the line.
It follows the equation (35) of Hogg et al. (2010; arXiv:1008.4686).
Parameters
----------
theta : list
The list of model parameters, [t, bv, V (optional)]. t (in radian) is
the angle (theta = arctan slope), bv is the perpendicular distance of
the line from the origin and V is intrinsic Gaussian variance orthogonal
to the line.
x : float array
The x data.
y : float array
The y data.
sigx : float array
The uncertainty of x data.
sigy : float array
The uncertainty of y data.
Returns
-------
lnL : float
The ln likelihood.
Notes
-----
None.
"""
if len(theta) == 2:
t, bv = theta
V = 0
if len(theta) == 3:
t, bv, V = theta
delta = y * np.cos(t) - x * np.sin(t) - bv
Sigma2 = (sigx * np.sin(t))**2 + (sigy * np.cos(t))**2
lnL = -0.5 * np.sum(delta**2 / (Sigma2 + V) + np.log(Sigma2 + V))
return lnL
def lnlike_perp2(theta, x, y, sigx, sigy):
"""
This is the ln likelihood function considering the 2-dimensional uncertainties
and calculated based on the distance of the points perpendicular to the line.
It follows the equation (35) of Hogg et al. (2010; arXiv:1008.4686).
Parameters
----------
theta : list
The list of model parameters, [t, b, V (optional)]. t (in radian) is
the angle (theta = arctan slope), b is the intercept and V is intrinsic
Gaussian variance orthogonal
to the line.
x : float array
The x data.
y : float array
The y data.
sigx : float array
The uncertainty of x data.
sigy : float array
The uncertainty of y data.
Returns
-------
lnL : float
The ln likelihood.
Notes
-----
None.
"""
if len(theta) == 2:
t, b = theta
V = 0
if len(theta) == 3:
t, b, V = theta
delta = (y - b) * np.cos(t) - x * np.sin(t)
Sigma2 = (sigx * np.sin(t))**2 + (sigy * np.cos(t))**2
lnL = -0.5 * np.sum(delta**2 / (Sigma2 + V) + np.log(Sigma2 + V))
return lnL
#-> The lnlike that considers the model imperfectness naively as a fraction of
#the model values.
def lnlike_naive(theta, x, y, xerr, yerr):
"""
The ln of likelihood function using all detected data.
Parameters
----------
theta : list
The list of the model parameters, [m, b, lnf (optional)].
x : float array
The data of x.
y : float array
The data of y.
xerr : float array
The uncertainty of the x data.
yerr : float array
The uncertainty of the y data.
Returns
-------
The ln likelihood.
Notes
-----
None.
"""
if len(theta) == 2:
m, b = theta
model = m * x + b
inv_sigma2 = 1.0/(yerr**2 + (m*xerr)**2)
return -0.5*(np.sum((y-model)**2*inv_sigma2 - np.log(inv_sigma2)))
if len(theta) == 3:
m, b, lnf = theta
model = m * x + b
inv_sigma2 = 1.0/(yerr**2 + (m*xerr)**2 + model**2*np.exp(2*lnf))
return -0.5*(np.sum((y-model)**2*inv_sigma2 - np.log(inv_sigma2)))
else:
raise ValueError("[linfit]: The length of parameters ({0}) is incorrect!".format(len(theta)))
def lnprior(theta, pRanges):
"""
The ln of prior function.
Parameters
----------
theta : list
The list of the model parameters, [m, b, lnf (optional)].
pRanges : list
The list of the parameter prior ranges.
Returns
-------
The ln prior.
Notes
-----
None.
"""
assert len(theta) == len(pRanges)
if len(theta) == 2:
m, b = theta
mR, bR = pRanges
if mR[0] < m < mR[1] and bR[0] < b < bR[1]:
return 0.0
return -np.inf
if len(theta) == 3:
m, b, lnf = theta
mR, bR, lnfR = pRanges
if mR[0] < m < mR[1] and bR[0] < b < bR[1] and lnfR[0] < lnf < lnfR[1]:
return 0.0
return -np.inf
else:
raise ValueError("[linfit]: The length of parameters ({0}) is incorrect!".format(len(theta)))
def lnprob(theta, x, y, xerr, yerr, pRanges, *args, **kwargs):
"""
The ln of probability function.
Parameters
----------
theta : list
The list of the model parameters, [m, b, lnf (optional)].
x : float array
The data of x.
y : float array
The data of y.
xerr : float array
The uncertainty of the x data.
yerr : float array
The uncertainty of the y data.
pRanges : list
The list of the parameter prior ranges.
args and kwargs : for the ChiSq function.
Returns
-------
The ln probability.
Notes
-----
None.
"""
print "args", args
print "kwargs", kwargs
lp = lnprior(theta, pRanges)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, xerr, yerr, *args, **kwargs)
if __name__ == "__main__":
m_true = -0.9594
b_true = 4.294
data = np.loadtxt("examples/data_lnf.txt")
#data = np.loadtxt("examples/data_upp.txt")
x = data[:, 0]
y = data[:, 1]
xerr = data[:, 2]
yerr = data[:, 3]
flag = data[:, 4]
model = m_true * x + b_true
sigma = np.sqrt(yerr**2 + (m_true * xerr)**2)
print ChiSq_0(y, model, sigma, flag)
print ChiSq_1(y, model, sigma, flag)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.