gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
"""Unit tests for the copy module."""
import sys
import copy
import copy_reg
import unittest
from test import test_support
class TestCopy(unittest.TestCase):
# Attempt full line coverage of copy.py from top to bottom
def test_exceptions(self):
self.assert_(copy.Error is copy.error)
self.assert_(issubclass(copy.Error, Exception))
# The copy() method
def test_copy_basic(self):
x = 42
y = copy.copy(x)
self.assertEqual(x, y)
def test_copy_copy(self):
class C(object):
def __init__(self, foo):
self.foo = foo
def __copy__(self):
return C(self.foo)
x = C(42)
y = copy.copy(x)
self.assertEqual(y.__class__, x.__class__)
self.assertEqual(y.foo, x.foo)
def test_copy_registry(self):
class C(object):
def __new__(cls, foo):
obj = object.__new__(cls)
obj.foo = foo
return obj
def pickle_C(obj):
return (C, (obj.foo,))
x = C(42)
self.assertRaises(TypeError, copy.copy, x)
copy_reg.pickle(C, pickle_C, C)
y = copy.copy(x)
def test_copy_reduce_ex(self):
class C(object):
def __reduce_ex__(self, proto):
return ""
def __reduce__(self):
raise test_support.TestFailed, "shouldn't call this"
x = C()
y = copy.copy(x)
self.assert_(y is x)
def test_copy_reduce(self):
class C(object):
def __reduce__(self):
return ""
x = C()
y = copy.copy(x)
self.assert_(y is x)
def test_copy_cant(self):
class C(object):
def __getattribute__(self, name):
if name.startswith("__reduce"):
raise AttributeError, name
return object.__getattribute__(self, name)
x = C()
self.assertRaises(copy.Error, copy.copy, x)
# Type-specific _copy_xxx() methods
def test_copy_atomic(self):
class Classic:
pass
class NewStyle(object):
pass
def f():
pass
tests = [None, 42, 2L**100, 3.14, True, False, 1j,
"hello", u"hello\u1234", f.func_code,
NewStyle, xrange(10), Classic, max]
for x in tests:
self.assert_(copy.copy(x) is x, repr(x))
def test_copy_list(self):
x = [1, 2, 3]
self.assertEqual(copy.copy(x), x)
def test_copy_tuple(self):
x = (1, 2, 3)
self.assertEqual(copy.copy(x), x)
def test_copy_dict(self):
x = {"foo": 1, "bar": 2}
self.assertEqual(copy.copy(x), x)
def test_copy_inst_vanilla(self):
class C:
def __init__(self, foo):
self.foo = foo
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_copy(self):
class C:
def __init__(self, foo):
self.foo = foo
def __copy__(self):
return C(self.foo)
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_getinitargs(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getinitargs__(self):
return (self.foo,)
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_getstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return {"foo": self.foo}
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __setstate__(self, state):
self.foo = state["foo"]
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_getstate_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return self.foo
def __setstate__(self, state):
self.foo = state
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
# The deepcopy() method
def test_deepcopy_basic(self):
x = 42
y = copy.deepcopy(x)
self.assertEqual(y, x)
def test_deepcopy_memo(self):
# Tests of reflexive objects are under type-specific sections below.
# This tests only repetitions of objects.
x = []
x = [x, x]
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y[0] is not x[0])
self.assert_(y[0] is y[1])
def test_deepcopy_issubclass(self):
# XXX Note: there's no way to test the TypeError coming out of
# issubclass() -- this can only happen when an extension
# module defines a "type" that doesn't formally inherit from
# type.
class Meta(type):
pass
class C:
__metaclass__ = Meta
self.assertEqual(copy.deepcopy(C), C)
def test_deepcopy_deepcopy(self):
class C(object):
def __init__(self, foo):
self.foo = foo
def __deepcopy__(self, memo=None):
return C(self.foo)
x = C(42)
y = copy.deepcopy(x)
self.assertEqual(y.__class__, x.__class__)
self.assertEqual(y.foo, x.foo)
def test_deepcopy_registry(self):
class C(object):
def __new__(cls, foo):
obj = object.__new__(cls)
obj.foo = foo
return obj
def pickle_C(obj):
return (C, (obj.foo,))
x = C(42)
self.assertRaises(TypeError, copy.deepcopy, x)
copy_reg.pickle(C, pickle_C, C)
y = copy.deepcopy(x)
def test_deepcopy_reduce_ex(self):
class C(object):
def __reduce_ex__(self, proto):
return ""
def __reduce__(self):
raise test_support.TestFailed, "shouldn't call this"
x = C()
y = copy.deepcopy(x)
self.assert_(y is x)
def test_deepcopy_reduce(self):
class C(object):
def __reduce__(self):
return ""
x = C()
y = copy.deepcopy(x)
self.assert_(y is x)
def test_deepcopy_cant(self):
class C(object):
def __getattribute__(self, name):
if name.startswith("__reduce"):
raise AttributeError, name
return object.__getattribute__(self, name)
x = C()
self.assertRaises(copy.Error, copy.deepcopy, x)
# Type-specific _deepcopy_xxx() methods
def test_deepcopy_atomic(self):
class Classic:
pass
class NewStyle(object):
pass
def f():
pass
tests = [None, 42, 2L**100, 3.14, True, False, 1j,
"hello", u"hello\u1234", f.func_code,
NewStyle, xrange(10), Classic, max]
for x in tests:
self.assert_(copy.deepcopy(x) is x, repr(x))
def test_deepcopy_list(self):
x = [[1, 2], 3]
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(x is not y)
self.assert_(x[0] is not y[0])
def test_deepcopy_reflexive_list(self):
x = []
x.append(x)
y = copy.deepcopy(x)
self.assertRaises(RuntimeError, cmp, y, x)
self.assert_(y is not x)
self.assert_(y[0] is y)
self.assertEqual(len(y), 1)
def test_deepcopy_tuple(self):
x = ([1, 2], 3)
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(x is not y)
self.assert_(x[0] is not y[0])
def test_deepcopy_reflexive_tuple(self):
x = ([],)
x[0].append(x)
y = copy.deepcopy(x)
self.assertRaises(RuntimeError, cmp, y, x)
self.assert_(y is not x)
self.assert_(y[0] is not x[0])
self.assert_(y[0][0] is y)
def test_deepcopy_dict(self):
x = {"foo": [1, 2], "bar": 3}
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(x is not y)
self.assert_(x["foo"] is not y["foo"])
def test_deepcopy_reflexive_dict(self):
x = {}
x['foo'] = x
y = copy.deepcopy(x)
self.assertRaises(RuntimeError, cmp, y, x)
self.assert_(y is not x)
self.assert_(y['foo'] is y)
self.assertEqual(len(y), 1)
def test_deepcopy_keepalive(self):
memo = {}
x = 42
y = copy.deepcopy(x, memo)
self.assert_(memo[id(x)] is x)
def test_deepcopy_inst_vanilla(self):
class C:
def __init__(self, foo):
self.foo = foo
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y.foo is not x.foo)
def test_deepcopy_inst_deepcopy(self):
class C:
def __init__(self, foo):
self.foo = foo
def __deepcopy__(self, memo):
return C(copy.deepcopy(self.foo, memo))
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y.foo is not x.foo)
def test_deepcopy_inst_getinitargs(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getinitargs__(self):
return (self.foo,)
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y.foo is not x.foo)
def test_deepcopy_inst_getstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return {"foo": self.foo}
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y.foo is not x.foo)
def test_deepcopy_inst_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __setstate__(self, state):
self.foo = state["foo"]
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y.foo is not x.foo)
def test_deepcopy_inst_getstate_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return self.foo
def __setstate__(self, state):
self.foo = state
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y.foo is not x.foo)
def test_deepcopy_reflexive_inst(self):
class C:
pass
x = C()
x.foo = x
y = copy.deepcopy(x)
self.assert_(y is not x)
self.assert_(y.foo is y)
# _reconstruct()
def test_reconstruct_string(self):
class C(object):
def __reduce__(self):
return ""
x = C()
y = copy.copy(x)
self.assert_(y is x)
y = copy.deepcopy(x)
self.assert_(y is x)
def test_reconstruct_nostate(self):
class C(object):
def __reduce__(self):
return (C, ())
x = C()
x.foo = 42
y = copy.copy(x)
self.assert_(y.__class__ is x.__class__)
y = copy.deepcopy(x)
self.assert_(y.__class__ is x.__class__)
def test_reconstruct_state(self):
class C(object):
def __reduce__(self):
return (C, (), self.__dict__)
def __cmp__(self, other):
return cmp(self.__dict__, other.__dict__)
x = C()
x.foo = [42]
y = copy.copy(x)
self.assertEqual(y, x)
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y.foo is not x.foo)
def test_reconstruct_state_setstate(self):
class C(object):
def __reduce__(self):
return (C, (), self.__dict__)
def __setstate__(self, state):
self.__dict__.update(state)
def __cmp__(self, other):
return cmp(self.__dict__, other.__dict__)
x = C()
x.foo = [42]
y = copy.copy(x)
self.assertEqual(y, x)
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y.foo is not x.foo)
def test_reconstruct_reflexive(self):
class C(object):
pass
x = C()
x.foo = x
y = copy.deepcopy(x)
self.assert_(y is not x)
self.assert_(y.foo is y)
# Additions for Python 2.3 and pickle protocol 2
def test_reduce_4tuple(self):
class C(list):
def __reduce__(self):
return (C, (), self.__dict__, iter(self))
def __cmp__(self, other):
return (cmp(list(self), list(other)) or
cmp(self.__dict__, other.__dict__))
x = C([[1, 2], 3])
y = copy.copy(x)
self.assertEqual(x, y)
self.assert_(x is not y)
self.assert_(x[0] is y[0])
y = copy.deepcopy(x)
self.assertEqual(x, y)
self.assert_(x is not y)
self.assert_(x[0] is not y[0])
def test_reduce_5tuple(self):
class C(dict):
def __reduce__(self):
return (C, (), self.__dict__, None, self.iteritems())
def __cmp__(self, other):
return (cmp(dict(self), list(dict)) or
cmp(self.__dict__, other.__dict__))
x = C([("foo", [1, 2]), ("bar", 3)])
y = copy.copy(x)
self.assertEqual(x, y)
self.assert_(x is not y)
self.assert_(x["foo"] is y["foo"])
y = copy.deepcopy(x)
self.assertEqual(x, y)
self.assert_(x is not y)
self.assert_(x["foo"] is not y["foo"])
def test_copy_slots(self):
class C(object):
__slots__ = ["foo"]
x = C()
x.foo = [42]
y = copy.copy(x)
self.assert_(x.foo is y.foo)
def test_deepcopy_slots(self):
class C(object):
__slots__ = ["foo"]
x = C()
x.foo = [42]
y = copy.deepcopy(x)
self.assertEqual(x.foo, y.foo)
self.assert_(x.foo is not y.foo)
def test_copy_list_subclass(self):
class C(list):
pass
x = C([[1, 2], 3])
x.foo = [4, 5]
y = copy.copy(x)
self.assertEqual(list(x), list(y))
self.assertEqual(x.foo, y.foo)
self.assert_(x[0] is y[0])
self.assert_(x.foo is y.foo)
def test_deepcopy_list_subclass(self):
class C(list):
pass
x = C([[1, 2], 3])
x.foo = [4, 5]
y = copy.deepcopy(x)
self.assertEqual(list(x), list(y))
self.assertEqual(x.foo, y.foo)
self.assert_(x[0] is not y[0])
self.assert_(x.foo is not y.foo)
def test_copy_tuple_subclass(self):
class C(tuple):
pass
x = C([1, 2, 3])
self.assertEqual(tuple(x), (1, 2, 3))
y = copy.copy(x)
self.assertEqual(tuple(y), (1, 2, 3))
def test_deepcopy_tuple_subclass(self):
class C(tuple):
pass
x = C([[1, 2], 3])
self.assertEqual(tuple(x), ([1, 2], 3))
y = copy.deepcopy(x)
self.assertEqual(tuple(y), ([1, 2], 3))
self.assert_(x is not y)
self.assert_(x[0] is not y[0])
def test_getstate_exc(self):
class EvilState(object):
def __getstate__(self):
raise ValueError, "ain't got no stickin' state"
self.assertRaises(ValueError, copy.copy, EvilState())
def test_copy_function(self):
self.assertEqual(copy.copy(global_foo), global_foo)
def foo(x, y): return x+y
self.assertEqual(copy.copy(foo), foo)
bar = lambda: None
self.assertEqual(copy.copy(bar), bar)
def test_deepcopy_function(self):
self.assertEqual(copy.deepcopy(global_foo), global_foo)
def foo(x, y): return x+y
self.assertEqual(copy.deepcopy(foo), foo)
bar = lambda: None
self.assertEqual(copy.deepcopy(bar), bar)
def global_foo(x, y): return x+y
def test_main():
test_support.run_unittest(TestCopy)
if __name__ == "__main__":
test_main()
|
|
# Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import shutil
import tempfile
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
import six
from neutron.agent.common import ovs_lib
from neutron.agent.l3 import ha_router
from neutron.agent.l3 import namespaces
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ip_link_support
from neutron.agent.linux import keepalived
from neutron.agent.linux import utils as agent_utils
from neutron.common import constants as n_consts
from neutron.common import utils
from neutron.i18n import _LE
from neutron.plugins.common import constants as const
from neutron.plugins.ml2.drivers.openvswitch.agent.common \
import constants as ovs_const
LOG = logging.getLogger(__name__)
MINIMUM_DNSMASQ_VERSION = 2.67
MINIMUM_DIBBLER_VERSION = '1.0.1'
def ovs_vxlan_supported(from_ip='192.0.2.1', to_ip='192.0.2.2'):
name = "vxlantest-" + utils.get_random_string(6)
with ovs_lib.OVSBridge(name) as br:
port = br.add_tunnel_port(from_ip, to_ip, const.TYPE_VXLAN)
return port != ovs_lib.INVALID_OFPORT
def ovs_geneve_supported(from_ip='192.0.2.3', to_ip='192.0.2.4'):
name = "genevetest-" + utils.get_random_string(6)
with ovs_lib.OVSBridge(name) as br:
port = br.add_tunnel_port(from_ip, to_ip, const.TYPE_GENEVE)
return port != ovs_lib.INVALID_OFPORT
def iproute2_vxlan_supported():
ip = ip_lib.IPWrapper()
name = "vxlantest-" + utils.get_random_string(4)
port = ip.add_vxlan(name, 3000)
ip.del_veth(name)
return name == port.name
def patch_supported():
seed = utils.get_random_string(6)
name = "patchtest-" + seed
peer_name = "peertest0-" + seed
patch_name = "peertest1-" + seed
with ovs_lib.OVSBridge(name) as br:
port = br.add_patch_port(patch_name, peer_name)
return port != ovs_lib.INVALID_OFPORT
def nova_notify_supported():
try:
import neutron.notifiers.nova # noqa since unused
return True
except ImportError:
return False
def ofctl_arg_supported(cmd, **kwargs):
"""Verify if ovs-ofctl binary supports cmd with **kwargs.
:param cmd: ovs-ofctl command to use for test.
:param **kwargs: arguments to test with the command.
:returns: a boolean if the supplied arguments are supported.
"""
br_name = 'br-test-%s' % utils.get_random_string(6)
with ovs_lib.OVSBridge(br_name) as test_br:
full_args = ["ovs-ofctl", cmd, test_br.br_name,
ovs_lib._build_flow_expr_str(kwargs, cmd.split('-')[0])]
try:
agent_utils.execute(full_args, run_as_root=True)
except RuntimeError as e:
LOG.debug("Exception while checking supported feature via "
"command %s. Exception: %s", full_args, e)
return False
except Exception:
LOG.exception(_LE("Unexpected exception while checking supported"
" feature via command: %s"), full_args)
return False
else:
return True
def arp_responder_supported():
mac = netaddr.EUI('dead:1234:beef', dialect=netaddr.mac_unix)
ip = netaddr.IPAddress('240.0.0.1')
actions = ovs_const.ARP_RESPONDER_ACTIONS % {'mac': mac, 'ip': ip}
return ofctl_arg_supported(cmd='add-flow',
table=21,
priority=1,
proto='arp',
dl_vlan=42,
nw_dst='%s' % ip,
actions=actions)
def arp_header_match_supported():
return ofctl_arg_supported(cmd='add-flow',
table=24,
priority=1,
proto='arp',
arp_op='0x2',
arp_spa='1.1.1.1',
actions="NORMAL")
def vf_management_supported():
is_supported = True
required_caps = (
ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_STATE,
ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_SPOOFCHK,
ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE)
try:
vf_section = ip_link_support.IpLinkSupport.get_vf_mgmt_section()
for cap in required_caps:
if not ip_link_support.IpLinkSupport.vf_mgmt_capability_supported(
vf_section, cap):
is_supported = False
LOG.debug("ip link command does not support "
"vf capability '%(cap)s'", cap)
except ip_link_support.UnsupportedIpLinkCommand:
LOG.exception(_LE("Unexpected exception while checking supported "
"ip link command"))
return False
return is_supported
def netns_read_requires_helper():
ipw = ip_lib.IPWrapper()
nsname = "netnsreadtest-" + uuidutils.generate_uuid()
ipw.netns.add(nsname)
try:
# read without root_helper. if exists, not required.
ipw_nohelp = ip_lib.IPWrapper()
exists = ipw_nohelp.netns.exists(nsname)
finally:
ipw.netns.delete(nsname)
return not exists
def get_minimal_dnsmasq_version_supported():
return MINIMUM_DNSMASQ_VERSION
def dnsmasq_version_supported():
try:
cmd = ['dnsmasq', '--version']
env = {'LC_ALL': 'C'}
out = agent_utils.execute(cmd, addl_env=env)
m = re.search(r"version (\d+\.\d+)", out)
ver = float(m.group(1)) if m else 0
if ver < MINIMUM_DNSMASQ_VERSION:
return False
except (OSError, RuntimeError, IndexError, ValueError) as e:
LOG.debug("Exception while checking minimal dnsmasq version. "
"Exception: %s", e)
return False
return True
class KeepalivedIPv6Test(object):
def __init__(self, ha_port, gw_port, gw_vip, default_gw):
self.ha_port = ha_port
self.gw_port = gw_port
self.gw_vip = gw_vip
self.default_gw = default_gw
self.manager = None
self.config = None
self.config_path = None
self.nsname = "keepalivedtest-" + uuidutils.generate_uuid()
self.pm = external_process.ProcessMonitor(cfg.CONF, 'router')
self.orig_interval = cfg.CONF.AGENT.check_child_processes_interval
def configure(self):
config = keepalived.KeepalivedConf()
instance1 = keepalived.KeepalivedInstance('MASTER', self.ha_port, 1,
['169.254.192.0/18'],
advert_int=5)
instance1.track_interfaces.append(self.ha_port)
# Configure keepalived with an IPv6 address (gw_vip) on gw_port.
vip_addr1 = keepalived.KeepalivedVipAddress(self.gw_vip, self.gw_port)
instance1.vips.append(vip_addr1)
# Configure keepalived with an IPv6 default route on gw_port.
gateway_route = keepalived.KeepalivedVirtualRoute(n_consts.IPv6_ANY,
self.default_gw,
self.gw_port)
instance1.virtual_routes.gateway_routes = [gateway_route]
config.add_instance(instance1)
self.config = config
def start_keepalived_process(self):
# Disable process monitoring for Keepalived process.
cfg.CONF.set_override('check_child_processes_interval', 0, 'AGENT')
# Create a temp directory to store keepalived configuration.
self.config_path = tempfile.mkdtemp()
# Instantiate keepalived manager with the IPv6 configuration.
self.manager = keepalived.KeepalivedManager('router1', self.config,
namespace=self.nsname, process_monitor=self.pm,
conf_path=self.config_path)
self.manager.spawn()
def verify_ipv6_address_assignment(self, gw_dev):
process = self.manager.get_process()
agent_utils.wait_until_true(lambda: process.active)
def _gw_vip_assigned():
iface_ip = gw_dev.addr.list(ip_version=6, scope='global')
if iface_ip:
return self.gw_vip == iface_ip[0]['cidr']
agent_utils.wait_until_true(_gw_vip_assigned)
def __enter__(self):
ip_lib.IPWrapper().netns.add(self.nsname)
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.pm.stop()
if self.manager:
self.manager.disable()
if self.config_path:
shutil.rmtree(self.config_path, ignore_errors=True)
ip_lib.IPWrapper().netns.delete(self.nsname)
cfg.CONF.set_override('check_child_processes_interval',
self.orig_interval, 'AGENT')
def keepalived_ipv6_supported():
"""Check if keepalived supports IPv6 functionality.
Validation is done as follows.
1. Create a namespace.
2. Create OVS bridge with two ports (ha_port and gw_port)
3. Move the ovs ports to the namespace.
4. Spawn keepalived process inside the namespace with IPv6 configuration.
5. Verify if IPv6 address is assigned to gw_port.
6. Verify if IPv6 default route is configured by keepalived.
"""
random_str = utils.get_random_string(6)
br_name = "ka-test-" + random_str
ha_port = ha_router.HA_DEV_PREFIX + random_str
gw_port = namespaces.INTERNAL_DEV_PREFIX + random_str
gw_vip = 'fdf8:f53b:82e4::10/64'
expected_default_gw = 'fe80:f816::1'
with ovs_lib.OVSBridge(br_name) as br:
with KeepalivedIPv6Test(ha_port, gw_port, gw_vip,
expected_default_gw) as ka:
br.add_port(ha_port, ('type', 'internal'))
br.add_port(gw_port, ('type', 'internal'))
ha_dev = ip_lib.IPDevice(ha_port)
gw_dev = ip_lib.IPDevice(gw_port)
ha_dev.link.set_netns(ka.nsname)
gw_dev.link.set_netns(ka.nsname)
ha_dev.link.set_up()
gw_dev.link.set_up()
ka.configure()
ka.start_keepalived_process()
ka.verify_ipv6_address_assignment(gw_dev)
default_gw = gw_dev.route.get_gateway(ip_version=6)
if default_gw:
default_gw = default_gw['gateway']
return expected_default_gw == default_gw
def ovsdb_native_supported():
# Running the test should ensure we are configured for OVSDB native
try:
ovs = ovs_lib.BaseOVS()
ovs.get_bridges()
return True
except ImportError as ex:
LOG.error(_LE("Failed to import required modules. Ensure that the "
"python-openvswitch package is installed. Error: %s"),
ex)
except Exception as ex:
LOG.exception(six.text_type(ex))
return False
def ebtables_supported():
try:
cmd = ['ebtables', '--version']
agent_utils.execute(cmd)
return True
except (OSError, RuntimeError, IndexError, ValueError) as e:
LOG.debug("Exception while checking for installed ebtables. "
"Exception: %s", e)
return False
def get_minimal_dibbler_version_supported():
return MINIMUM_DIBBLER_VERSION
def dibbler_version_supported():
try:
cmd = ['dibbler-client',
'help']
out = agent_utils.execute(cmd)
return '-w' in out
except (OSError, RuntimeError, IndexError, ValueError) as e:
LOG.debug("Exception while checking minimal dibbler version. "
"Exception: %s", e)
return False
|
|
#!/usr/bin/env python2
# coding: utf8
import subprocess
import socket
import time
import sys
import paramiko
device = 'sda'
print("1. Boot to ArchLinux.iso")
ip = raw_input("2. Get IP (run `ip address` of `ifconfig`): ")
print(" Checking ip")
proc = subprocess.Popen(["ping", "-c 1", "-W 100", ip], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
res = proc.communicate()
response = proc.returncode
if response != 0:
print(" Host is down =(")
exit(1)
print("3. Run `passwd` and set root's password `root`")
print("4. Run `systemctl start sshd`")
print(" Waiting for 22 port")
while True:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, 22))
s.close()
break
except:
time.sleep(1)
print(" Connecting")
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname=ip, username='root', password='root')
### Work using ssh
def dbg_print(text):
print("--> " + text)
def ssh_exec(command, ignore_exit_code=False):
stdin, stdout, stderr = client.exec_command(command, get_pty=True)
while not stdout.closed:
line = stdout.readline()
if line == '':
break
sys.stdout.write('\033[92m>\033[0m ' + line)
# stdout_str = stdout.read()
# if stdout_str != '':
# stdout_lines = stdout_str.splitlines()
# for line in stdout_lines:
# print('\033[92m>\033[0m ' + line)
stderr_str = stderr.read()
if stderr_str != '':
stderr_lines = stderr_str.splitlines()
for line in stderr_lines:
print('\033[91m>\033[0m ' + line)
if not ignore_exit_code:
exit_status = stdout.channel.recv_exit_status()
if exit_status != 0:
print("\033[91m==> FAIL\033[0m {0}".format(exit_status))
exit(2)
dbg_print("Setting time")
ssh_exec('timedatectl set-ntp true')
dbg_print("Partitioning drive")
ssh_exec('parted /dev/{0} -s -a opt mklabel msdos mkpart primary ext2 0% 100MB set 1 boot on mkpart primary linux-swap 100MB 2048MB mkpart primary ext4 2048MB 100%'.format(device))
dbg_print("Creating filesystems")
ssh_exec('mkfs.ext2 /dev/{0}1 -F -L boot'.format(device))
ssh_exec('mkswap /dev/{0}2 -L swap'.format(device))
ssh_exec('mkfs.ext3 /dev/{0}3 -F -L root'.format(device))
dbg_print("Mounting filesystems")
ssh_exec('mount /dev/{0}3 /mnt'.format(device))
ssh_exec('mkdir /mnt/boot')
ssh_exec('mount /dev/{0}1 /mnt/boot'.format(device))
ssh_exec('swapon /dev/{0}2'.format(device))
dbg_print("Updating mirrors")
ssh_exec('mv /etc/pacman.d/mirrorlist /etc/pacman.d/mirrorlist.orig')
ssh_exec('echo \'Server = http://mirror.yandex.ru/archlinux/$repo/os/$arch\' > /etc/pacman.d/mirrorlist')
ssh_exec('cat /etc/pacman.d/mirrorlist.orig >> /etc/pacman.d/mirrorlist')
ssh_exec('rm /etc/pacman.d/mirrorlist.orig')
dbg_print("Killing SigLevel")
ssh_exec('pacman-key --init')
ssh_exec('sed -i \'s/^SigLevel\s*= Required DatabaseOptional/SigLevel=Never/g\' /etc/pacman.conf')
dbg_print("Installing packages")
ssh_exec('pacstrap /mnt --noconfirm base base-devel net-tools grub openssh')
dbg_print("Generating fstab")
ssh_exec('genfstab -p /mnt > /mnt/etc/fstab')
dbg_print("Localizing new system")
ssh_exec('echo "en_US.UTF-8 UTF-8" >> /mnt/etc/locale.gen')
ssh_exec('echo "ru_RU.UTF-8 UTF-8" >> /mnt/etc/locale.gen')
ssh_exec('arch-chroot /mnt locale-gen')
dbg_print("Adding hooks and modules to mkinitcpio.conf")
ssh_exec('sed -i \'s/^MODULES=""/MODULES="i915 radeon nouveau"/g\' /mnt/etc/mkinitcpio.conf')
ssh_exec('sed -i \'s/^HOOKS="base/HOOKS="base keymap/g\' /mnt/etc/mkinitcpio.conf')
dbg_print("Running mkinitcpio")
ssh_exec('arch-chroot /mnt mkinitcpio -p linux')
dbg_print("Installing grub")
ssh_exec('arch-chroot /mnt grub-install /dev/{0}'.format(device))
ssh_exec('arch-chroot /mnt grub-mkconfig -o /boot/grub/grub.cfg')
dbg_print("Enabling dhcpcd and sshd")
ssh_exec('arch-chroot /mnt systemctl enable dhcpcd')
ssh_exec('arch-chroot /mnt systemctl enable sshd')
ssh_exec('echo "PermitRootLogin yes" >> /mnt/etc/ssh/sshd_config')
dbg_print("Setting root password (`root`)")
stdin, stdout, stderr = client.exec_command('arch-chroot /mnt passwd')
stdin.write('root\n')
stdin.write('root\n')
dbg_print("Umounting drives")
ssh_exec('umount /mnt/boot')
time.sleep(2)
ssh_exec('umount /mnt')
time.sleep(2)
ssh_exec('swapoff /dev/{0}2'.format(device))
raw_input("5. Now remove installation media and press Enter to reboot")
dbg_print("Rebooting")
ssh_exec('reboot', ignore_exit_code=True)
client.close()
## Working in installed system
print("6. Login as root/root")
ip = raw_input("7. Get IP (run `ip address` of `ifconfig`): ")
print(" Checking ip")
proc = subprocess.Popen(["ping", "-c 1", "-W 100", ip], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
res = proc.communicate()
response = proc.returncode
if response != 0:
print(" Host is down =(")
exit(1)
hostname = raw_input("8. Enter hostname for new system: ")
username = raw_input("9. Enter username for new system: ")
print(" Password will be the same")
timezone = raw_input("10. Enter timezone: ")
packages = raw_input("11. Packages to install: ") # tmux ffmpeg git docker gpm nmon dstat ...
print(" Waiting for 22 port")
while True:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, 22))
s.close()
break
except:
time.sleep(1)
print(" Connecting")
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname=ip, username='root', password='root')
### Work using ssh
dbg_print("Setting hostname")
ssh_exec('hostnamectl set-hostname ' + hostname)
ssh_exec('systemctl enable systemd-resolved')
dbg_print("Setting time")
ssh_exec('timedatectl set-timezone ' + timezone)
ssh_exec('timedatectl set-ntp true')
dbg_print("Localizing system")
ssh_exec('localectl set-keymap ru')
ssh_exec('setfont cyr-sun16')
ssh_exec('localectl set-locale LANG="ru_RU.UTF-8"')
ssh_exec('export LANG=ru_RU.UTF-8')
ssh_exec('echo "FONT=cyr-sun16" > /etc/vconsole.conf')
dbg_print("Updating mkinitcpio")
ssh_exec('mkinitcpio -p linux')
dbg_print("Updating grub")
ssh_exec('grub-mkconfig -o /boot/grub/grub.cfg')
dbg_print("Getting system architecture")
stdin, stdout, stderr = client.exec_command('grep -q "^flags.*\\blm\\b" /proc/cpuinfo && echo 64 || echo 32')
architecture = stdout.read().strip()
if architecture == "":
dbg_print("\033[91mERROR! Cannot get system architecture\033[0m")
if architecture == "64":
dbg_print("Adding multilib to pacman repos list (x64 only)")
ssh_exec('echo -e \'[multilib]\\nInclude = /etc/pacman.d/mirrorlist\' >> /etc/pacman.conf')
dbg_print("Creating user")
ssh_exec('useradd -m -g users -G audio,games,lp,optical,power,scanner,storage,video,wheel -s /bin/bash ' + username)
stdin, stdout, stderr = client.exec_command('passwd ' + username)
stdin.write(username + '\n')
stdin.write(username + '\n')
dbg_print("Updating keys")
ssh_exec('pacman-key --init')
ssh_exec('pacman-key --populate archlinux')
dbg_print("Updating system")
ssh_exec('pacman --noconfirm -Syyu')
dbg_print("Installing packages")
ssh_exec('pacman --noconfirm -S yajl bash-completion ' + packages)
packages = 'xorg-server xorg-xinit xorg-server-utils mesa-libgl ' + \
'xf86-video-intel xf86-video-ati xf86-video-nouveau xf86-video-vesa ' + \
'xfce4 xfce4-goodies ' + \
'ttf-liberation ttf-dejavu opendesktop-fonts ttf-bitstream-vera ttf-arphic-ukai ttf-arphic-uming ttf-hanazono '
if architecture == "64":
packages += 'lib32-mesa-libgl '
dbg_print("Installing X")
ssh_exec('pacman --noconfirm -S ' + packages)
dbg_print("Configuring sudo")
ssh_exec('sed -i \'s/^# %wheel ALL=(ALL) ALL/%wheel ALL=(ALL) ALL/g\' /etc/sudoers')
dbg_print("Installing yaourt")
ssh_exec('mkdir /build')
ssh_exec('echo \'cd /build\' > /build/build.sh')
ssh_exec('echo \'curl -O https://aur.archlinux.org/cgit/aur.git/snapshot/$1.tar.gz\' >> /build/build.sh')
ssh_exec('echo \'tar xzf $1.tar.gz\' >> /build/build.sh')
ssh_exec('echo \'cd $1\' >> /build/build.sh')
ssh_exec('echo \'makepkg\' >> /build/build.sh')
ssh_exec('chmod +x /build/build.sh')
ssh_exec('chown nobody:nobody /build')
dbg_print(" Installing package-query")
ssh_exec('sudo -u nobody /build/build.sh package-query')
ssh_exec('pacman --noconfirm -U /build/package-query/*.pkg.tar.xz')
dbg_print(" Installing package-query")
ssh_exec('sudo -u nobody /build/build.sh yaourt')
ssh_exec('pacman --noconfirm -U /build/yaourt/*.pkg.tar.xz')
dbg_print(" Cleaning up")
ssh_exec('cd /')
ssh_exec('rm -rf /build')
dbg_print("Removing PermitRootLogin from sshd_config and adding " + username)
ssh_exec('sed -i \'s/^PermitRootLogin yes/AllowUsers ' + username + '/g\' /etc/ssh/sshd_config')
print("12. System is installed and configured. Rebooting")
ssh_exec('reboot', ignore_exit_code=True)
client.close()
|
|
import logging
from django.core.paginator import Paginator, InvalidPage
from django.contrib.auth.models import User
from django.conf.urls.defaults import url
from django.shortcuts import get_object_or_404
from django.http import Http404
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext
from haystack.utils import Highlighter
from tastypie import fields
from tastypie.authentication import BasicAuthentication
from tastypie.authorization import Authorization, DjangoAuthorization
from tastypie.constants import ALL_WITH_RELATIONS, ALL
from tastypie.resources import ModelResource
from tastypie.exceptions import NotFound, ImmediateHttpResponse
from tastypie import http
from tastypie.utils.mime import build_content_type
from tastypie.http import HttpCreated
from tastypie.utils import dict_strip_unicode_keys, trailing_slash
from core.forms import FacetedSearchForm
from builds.models import Build, Version
from projects.models import Project, ImportedFile
from projects.utils import highest_version, mkversion
from projects import tasks
from djangome import views as djangome
log = logging.getLogger(__name__)
from .utils import SearchMixin, PostAuthentication, EnhancedModelResource
class ProjectResource(ModelResource, SearchMixin):
users = fields.ToManyField('api.base.UserResource', 'users')
class Meta:
include_absolute_url = True
allowed_methods = ['get', 'post', 'put']
queryset = Project.objects.public()
authentication = PostAuthentication()
authorization = DjangoAuthorization()
excludes = ['path', 'featured']
filtering = {
"users": ALL_WITH_RELATIONS,
"slug": ALL_WITH_RELATIONS,
}
def get_object_list(self, request):
self._meta.queryset = Project.objects.public(user=request.user)
return super(ProjectResource, self).get_object_list(request)
def dehydrate(self, bundle):
bundle.data['subdomain'] = "http://%s/" % bundle.obj.subdomain
downloads = {}
downloads['htmlzip'] = bundle.obj.get_htmlzip_url()
downloads['epub'] = bundle.obj.get_epub_url()
downloads['pdf'] = bundle.obj.get_pdf_url()
downloads['manpage'] = bundle.obj.get_manpage_url()
bundle.data['downloads'] = downloads
return bundle
def post_list(self, request, **kwargs):
"""
Creates a new resource/object with the provided data.
Calls ``obj_create`` with the provided data and returns a response
with the new resource's location.
If a new resource is created, return ``HttpCreated`` (201 Created).
"""
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
# Force this in an ugly way, at least should do "reverse"
deserialized["users"] = ["/api/v1/user/%s/" % request.user.id,]
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized))
self.is_valid(bundle, request)
updated_bundle = self.obj_create(bundle, request=request)
return HttpCreated(location=self.get_resource_uri(updated_bundle))
def override_urls(self):
return [
url(r"^(?P<resource_name>%s)/schema/$" % self._meta.resource_name, self.wrap_view('get_schema'), name="api_get_schema"),
url(r"^(?P<resource_name>%s)/search%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_search'), name="api_get_search"),
url(r"^(?P<resource_name>%s)/(?P<slug>[a-z-_]+)/$" % self._meta.resource_name, self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
]
class VersionResource(EnhancedModelResource):
project = fields.ForeignKey(ProjectResource, 'project', full=True)
class Meta:
queryset = Version.objects.all()
allowed_methods = ['get', 'put', 'post']
always_return_data = True
queryset = Version.objects.public()
authentication = PostAuthentication()
authorization = DjangoAuthorization()
filtering = {
"project": ALL_WITH_RELATIONS,
"slug": ALL_WITH_RELATIONS,
"active": ALL,
}
#Find a better name for this before including it.
#def dehydrate(self, bundle):
#bundle.data['subdomain'] = "http://%s/en/%s/" % (bundle.obj.project.subdomain, bundle.obj.slug)
#return bundle
def get_object_list(self, request):
self._meta.queryset = Version.objects.public(user=request.user, only_active=False)
return super(VersionResource, self).get_object_list(request)
def version_compare(self, request, **kwargs):
project = get_object_or_404(Project, slug=kwargs['project_slug'])
highest = highest_version(project.versions.filter(active=True))
base = kwargs.get('base', None)
ret_val = {
'project': highest[0],
'version': highest[1],
'is_highest': True,
}
if highest[0]:
ret_val['url'] = highest[0].get_absolute_url()
ret_val['slug'] = highest[0].slug,
if base and base != 'latest':
try:
ver_obj = project.versions.get(slug=base)
base_ver = mkversion(ver_obj)
if base_ver:
#This is only place where is_highest can get set.
#All error cases will be set to True, for non-
#standard versions.
ret_val['is_highest'] = base_ver >= highest[1]
else:
ret_val['is_highest'] = True
except (Version.DoesNotExist, TypeError):
ret_val['is_highest'] = True
return self.create_response(request, ret_val)
def build_version(self, request, **kwargs):
project = get_object_or_404(Project, slug=kwargs['project_slug'])
version = kwargs.get('version_slug', 'latest')
version_obj = project.versions.get(slug=version)
tasks.update_docs.delay(pk=project.pk, version_pk=version_obj.pk)
return self.create_response(request, {'building': True})
def override_urls(self):
return [
url(r"^(?P<resource_name>%s)/schema/$" % self._meta.resource_name, self.wrap_view('get_schema'), name="api_get_schema"),
url(r"^(?P<resource_name>%s)/(?P<project_slug>[a-z-_]+)/highest/(?P<base>.+)/$" % self._meta.resource_name, self.wrap_view('version_compare'), name="version_compare"),
url(r"^(?P<resource_name>%s)/(?P<project_slug>[a-z-_]+)/highest/$" % self._meta.resource_name, self.wrap_view('version_compare'), name="version_compare"),
url(r"^(?P<resource_name>%s)/(?P<project__slug>[a-z-_]+[a-z0-9-_]+)/$" % self._meta.resource_name, self.wrap_view('dispatch_list'), name="api_version_list"),
url(r"^(?P<resource_name>%s)/(?P<project_slug>[a-z-_]+)/(?P<version_slug>[a-z0-9-_.]+)/build/$" % self._meta.resource_name, self.wrap_view('build_version'), name="api_version_build_slug"),
]
class BuildResource(EnhancedModelResource):
project = fields.ForeignKey('api.base.ProjectResource', 'project')
version = fields.ForeignKey('api.base.VersionResource', 'version')
class Meta:
include_absolute_url = True
allowed_methods = ['get', 'post', 'put']
queryset = Build.objects.all()
authentication = PostAuthentication()
authorization = DjangoAuthorization()
filtering = {
"project": ALL_WITH_RELATIONS,
"slug": ALL_WITH_RELATIONS,
"type": ALL_WITH_RELATIONS,
"state": ALL_WITH_RELATIONS,
}
def override_urls(self):
return [
url(r"^(?P<resource_name>%s)/schema/$" % self._meta.resource_name, self.wrap_view('get_schema'), name="api_get_schema"),
url(r"^(?P<resource_name>%s)/(?P<project__slug>[a-z-_]+)/$" % self._meta.resource_name, self.wrap_view('dispatch_list'), name="build_list_detail"),
]
class FileResource(EnhancedModelResource, SearchMixin):
project = fields.ForeignKey(ProjectResource, 'project', full=True)
class Meta:
allowed_methods = ['get', 'post']
queryset = ImportedFile.objects.all()
excludes = ['md5', 'slug']
include_absolute_url = True
authentication = PostAuthentication()
authorization = DjangoAuthorization()
search_facets = ['project']
def override_urls(self):
return [
url(r"^(?P<resource_name>%s)/schema/$" % self._meta.resource_name, self.wrap_view('get_schema'), name="api_get_schema"),
url(r"^(?P<resource_name>%s)/search%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_search'), name="api_get_search"),
url(r"^(?P<resource_name>%s)/anchor%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_anchor'), name="api_get_anchor"),
]
def get_anchor(self, request, **kwargs):
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
query = request.GET.get('q', '')
redis_data = djangome.r.keys("*redirects:v4*%s*" % query)
#-2 because http:
urls = [''.join(data.split(':')[6:]) for data in redis_data if 'http://' in data]
"""
paginator = Paginator(urls, 20)
try:
page = paginator.page(int(request.GET.get('page', 1)))
except InvalidPage:
raise Http404("Sorry, no results on that page.")
objects = [result for result in page.object_list]
object_list = { 'objects': objects, }
"""
object_list = { 'objects': urls }
self.log_throttled_access(request)
return self.create_response(request, object_list)
class UserResource(ModelResource):
class Meta:
allowed_methods = ['get']
queryset = User.objects.all()
fields = ['username', 'first_name', 'last_name', 'last_login', 'id']
filtering = {
'username': 'exact',
}
def override_urls(self):
return [
url(r"^(?P<resource_name>%s)/schema/$" % self._meta.resource_name, self.wrap_view('get_schema'), name="api_get_schema"),
url(r"^(?P<resource_name>%s)/(?P<username>[a-z-_]+)/$" % self._meta.resource_name, self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
]
|
|
# Copyright 2008-2016 by Peter Cock. All rights reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.AlignIO support for "fasta-m10" output from Bill Pearson's FASTA tools.
You are expected to use this module via the Bio.AlignIO functions (or the
Bio.SeqIO functions if you want to work directly with the gapped sequences).
This module contains a parser for the pairwise alignments produced by Bill
Pearson's FASTA tools, for use from the Bio.AlignIO interface where it is
referred to as the "fasta-m10" file format (as we only support the machine
readable output format selected with the -m 10 command line option).
This module does NOT cover the generic "fasta" file format originally
developed as an input format to the FASTA tools. The Bio.AlignIO and
Bio.SeqIO both use the Bio.SeqIO.FastaIO module to deal with these files,
which can also be used to store a multiple sequence alignments.
"""
from __future__ import print_function
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Align import MultipleSeqAlignment
from Bio.Alphabet import single_letter_alphabet, generic_dna, generic_protein
from Bio.Alphabet import Gapped
def _extract_alignment_region(alignment_seq_with_flanking, annotation):
"""Helper function for the main parsing code (PRIVATE).
To get the actual pairwise alignment sequences, we must first
translate the un-gapped sequence based coordinates into positions
in the gapped sequence (which may have a flanking region shown
using leading - characters). To date, I have never seen any
trailing flanking region shown in the m10 file, but the
following code should also cope with that.
Note that this code seems to work fine even when the "sq_offset"
entries are prsent as a result of using the -X command line option.
"""
align_stripped = alignment_seq_with_flanking.strip("-")
display_start = int(annotation['al_display_start'])
if int(annotation['al_start']) <= int(annotation['al_stop']):
start = int(annotation['al_start']) \
- display_start
end = int(annotation['al_stop']) \
- display_start + 1
else:
# FASTA has flipped this sequence...
start = display_start \
- int(annotation['al_start'])
end = display_start \
- int(annotation['al_stop']) + 1
end += align_stripped.count("-")
assert 0 <= start and start < end and end <= len(align_stripped), \
"Problem with sequence start/stop,\n%s[%i:%i]\n%s" \
% (alignment_seq_with_flanking, start, end, annotation)
return align_stripped[start:end]
def FastaM10Iterator(handle, alphabet=single_letter_alphabet):
"""Alignment iterator for the FASTA tool's pairwise alignment output.
This is for reading the pairwise alignments output by Bill Pearson's
FASTA program when called with the -m 10 command line option for machine
readable output. For more details about the FASTA tools, see the website
http://fasta.bioch.virginia.edu/ and the paper:
W.R. Pearson & D.J. Lipman PNAS (1988) 85:2444-2448
This class is intended to be used via the Bio.AlignIO.parse() function
by specifying the format as "fasta-m10" as shown in the following code::
from Bio import AlignIO
handle = ...
for a in AlignIO.parse(handle, "fasta-m10"):
assert len(a) == 2, "Should be pairwise!"
print("Alignment length %i" % a.get_alignment_length())
for record in a:
print("%s %s %s" % (record.seq, record.name, record.id))
Note that this is not a full blown parser for all the information
in the FASTA output - for example, most of the header and all of the
footer is ignored. Also, the alignments are not batched according to
the input queries.
Also note that there can be up to about 30 letters of flanking region
included in the raw FASTA output as contextual information. This is NOT
part of the alignment itself, and is not included in the resulting
MultipleSeqAlignment objects returned.
"""
if alphabet is None:
alphabet = single_letter_alphabet
state_PREAMBLE = -1
state_NONE = 0
state_QUERY_HEADER = 1
state_ALIGN_HEADER = 2
state_ALIGN_QUERY = 3
state_ALIGN_MATCH = 4
state_ALIGN_CONS = 5
def build_hsp():
if not query_tags and not match_tags:
raise ValueError("No data for query %r, match %r"
% (query_id, match_id))
assert query_tags, query_tags
assert match_tags, match_tags
evalue = align_tags.get("fa_expect")
q = "?" # Just for printing len(q) in debug below
m = "?" # Just for printing len(m) in debug below
tool = global_tags.get("tool", "").upper()
try:
q = _extract_alignment_region(query_seq, query_tags)
if tool in ["TFASTX"] and len(match_seq) == len(q):
m = match_seq
# Quick hack until I can work out how -, * and / characters
# and the apparent mix of aa and bp coordinates works.
else:
m = _extract_alignment_region(match_seq, match_tags)
assert len(q) == len(m)
except AssertionError as err:
print("Darn... amino acids vs nucleotide coordinates?")
print(tool)
print(query_seq)
print(query_tags)
print("%s %i" % (q, len(q)))
print(match_seq)
print(match_tags)
print("%s %i" % (m, len(m)))
print(handle.name)
raise err
assert alphabet is not None
alignment = MultipleSeqAlignment([], alphabet)
# TODO - Introduce an annotated alignment class?
# For now, store the annotation a new private property:
alignment._annotations = {}
# Want to record both the query header tags, and the alignment tags.
for key, value in header_tags.items():
alignment._annotations[key] = value
for key, value in align_tags.items():
alignment._annotations[key] = value
# Query
# =====
record = SeqRecord(Seq(q, alphabet),
id=query_id,
name="query",
description=query_descr,
annotations={"original_length": int(query_tags["sq_len"])})
# TODO - handle start/end coordinates properly. Short term hack for now:
record._al_start = int(query_tags["al_start"])
record._al_stop = int(query_tags["al_stop"])
alignment.append(record)
# TODO - What if a specific alphabet has been requested?
# TODO - Use an IUPAC alphabet?
# TODO - Can FASTA output RNA?
if alphabet == single_letter_alphabet and "sq_type" in query_tags:
if query_tags["sq_type"] == "D":
record.seq.alphabet = generic_dna
elif query_tags["sq_type"] == "p":
record.seq.alphabet = generic_protein
if "-" in q:
if not hasattr(record.seq.alphabet, "gap_char"):
record.seq.alphabet = Gapped(record.seq.alphabet, "-")
# Match
# =====
record = SeqRecord(Seq(m, alphabet),
id=match_id,
name="match",
description=match_descr,
annotations={"original_length": int(match_tags["sq_len"])})
# TODO - handle start/end coordinates properly. Short term hack for now:
record._al_start = int(match_tags["al_start"])
record._al_stop = int(match_tags["al_stop"])
alignment.append(record)
# This is still a very crude way of dealing with the alphabet:
if alphabet == single_letter_alphabet and "sq_type" in match_tags:
if match_tags["sq_type"] == "D":
record.seq.alphabet = generic_dna
elif match_tags["sq_type"] == "p":
record.seq.alphabet = generic_protein
if "-" in m:
if not hasattr(record.seq.alphabet, "gap_char"):
record.seq.alphabet = Gapped(record.seq.alphabet, "-")
return alignment
state = state_PREAMBLE
query_id = None
match_id = None
query_descr = ""
match_descr = ""
global_tags = {}
header_tags = {}
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
for line in handle:
if ">>>" in line and not line.startswith(">>>"):
if query_id and match_id:
# This happens on old FASTA output which lacked an end of
# query >>><<< marker line.
yield build_hsp()
state = state_NONE
query_descr = line[line.find(">>>") + 3:].strip()
query_id = query_descr.split(None, 1)[0]
match_id = None
header_tags = {}
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
elif line.startswith("!! No "):
# e.g.
# !! No library sequences with E() < 0.5
# or on more recent versions,
# No sequences with E() < 0.05
assert state == state_NONE
assert not header_tags
assert not align_tags
assert not match_tags
assert not query_tags
assert match_id is None
assert not query_seq
assert not match_seq
assert not cons_seq
query_id = None
elif line.strip() in [">>><<<", ">>>///"]:
# End of query, possible end of all queries
if query_id and match_id:
yield build_hsp()
state = state_NONE
query_id = None
match_id = None
header_tags = {}
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
elif line.startswith(">>>"):
# Should be start of a match!
assert query_id is not None
assert line[3:].split(", ", 1)[0] == query_id, line
assert match_id is None
assert not header_tags
assert not align_tags
assert not query_tags
assert not match_tags
assert not match_seq
assert not query_seq
assert not cons_seq
state = state_QUERY_HEADER
elif line.startswith(">>"):
# Should now be at start of a match alignment!
if query_id and match_id:
yield build_hsp()
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
match_descr = line[2:].strip()
match_id = match_descr.split(None, 1)[0]
state = state_ALIGN_HEADER
elif line.startswith(">--"):
# End of one HSP
assert query_id and match_id, line
yield build_hsp()
# Clean up read for next HSP
# but reuse header_tags
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
state = state_ALIGN_HEADER
elif line.startswith(">"):
if state == state_ALIGN_HEADER:
# Should be start of query alignment seq...
assert query_id is not None, line
assert match_id is not None, line
assert query_id.startswith(line[1:].split(None, 1)[0]), line
state = state_ALIGN_QUERY
elif state == state_ALIGN_QUERY:
# Should be start of match alignment seq
assert query_id is not None, line
assert match_id is not None, line
assert match_id.startswith(line[1:].split(None, 1)[0]), line
state = state_ALIGN_MATCH
elif state == state_NONE:
# Can get > as the last line of a histogram
pass
else:
assert False, "state %i got %r" % (state, line)
elif line.startswith("; al_cons"):
assert state == state_ALIGN_MATCH, line
state = state_ALIGN_CONS
# Next line(s) should be consensus seq...
elif line.startswith("; "):
if ": " in line:
key, value = [s.strip() for s in line[2:].split(": ", 1)]
else:
import warnings
# Seen in lalign36, specifically version 36.3.4 Apr, 2011
# Fixed in version 36.3.5b Oct, 2011(preload8)
warnings.warn("Missing colon in line: %r" % line)
try:
key, value = [s.strip() for s in line[2:].split(" ", 1)]
except ValueError:
raise ValueError("Bad line: %r" % line)
if state == state_QUERY_HEADER:
header_tags[key] = value
elif state == state_ALIGN_HEADER:
align_tags[key] = value
elif state == state_ALIGN_QUERY:
query_tags[key] = value
elif state == state_ALIGN_MATCH:
match_tags[key] = value
else:
assert False, "Unexpected state %r, %r" % (state, line)
elif state == state_ALIGN_QUERY:
query_seq += line.strip()
elif state == state_ALIGN_MATCH:
match_seq += line.strip()
elif state == state_ALIGN_CONS:
cons_seq += line.strip("\n")
elif state == state_PREAMBLE:
if line.startswith("#"):
global_tags["command"] = line[1:].strip()
elif line.startswith(" version "):
global_tags["version"] = line[9:].strip()
elif " compares a " in line:
global_tags["tool"] = line[:line.find(" compares a ")].strip()
elif " searches a " in line:
global_tags["tool"] = line[:line.find(" searches a ")].strip()
else:
pass
if __name__ == "__main__":
print("Running a quick self-test")
# http://emboss.sourceforge.net/docs/themes/alnformats/align.simple
simple_example = \
"""# /opt/fasta/fasta34 -Q -H -E 1 -m 10 NC_002127.faa NC_009649.faa
FASTA searches a protein or DNA sequence data bank
version 34.26 January 12, 2007
Please cite:
W.R. Pearson & D.J. Lipman PNAS (1988) 85:2444-2448
Query library NC_002127.faa vs NC_009649.faa library
searching NC_009649.faa library
1>>>gi|10955263|ref|NP_052604.1| plasmid mobilization [Escherichia coli O157:H7 s 107 aa - 107 aa
vs NC_009649.faa library
45119 residues in 180 sequences
Expectation_n fit: rho(ln(x))= 6.9146+/-0.0249; mu= -5.7948+/- 1.273
mean_var=53.6859+/-13.609, 0's: 0 Z-trim: 1 B-trim: 9 in 1/25
Lambda= 0.175043
FASTA (3.5 Sept 2006) function [optimized, BL50 matrix (15:-5)] ktup: 2
join: 36, opt: 24, open/ext: -10/-2, width: 16
Scan time: 0.000
The best scores are: opt bits E(180)
gi|152973457|ref|YP_001338508.1| ATPase with chape ( 931) 71 24.9 0.58
gi|152973588|ref|YP_001338639.1| F pilus assembly ( 459) 63 23.1 0.99
>>>gi|10955263|ref|NP_052604.1|, 107 aa vs NC_009649.faa library
; pg_name: /opt/fasta/fasta34
; pg_ver: 34.26
; pg_argv: /opt/fasta/fasta34 -Q -H -E 1 -m 10 NC_002127.faa NC_009649.faa
; pg_name: FASTA
; pg_ver: 3.5 Sept 2006
; pg_matrix: BL50 (15:-5)
; pg_open-ext: -10 -2
; pg_ktup: 2
; pg_optcut: 24
; pg_cgap: 36
; mp_extrap: 60000 180
; mp_stats: Expectation_n fit: rho(ln(x))= 6.9146+/-0.0249; mu= -5.7948+/- 1.273 mean_var=53.6859+/-13.609, 0's: 0 Z-trim: 1 B-trim: 9 in 1/25 Lambda= 0.175043
; mp_KS: -0.0000 (N=0) at 8159228
>>gi|152973457|ref|YP_001338508.1| ATPase with chaperone activity, ATP-binding subunit [Klebsiella pneumoniae subsp. pneumoniae MGH 78578]
; fa_frame: f
; fa_initn: 65
; fa_init1: 43
; fa_opt: 71
; fa_z-score: 90.3
; fa_bits: 24.9
; fa_expect: 0.58
; sw_score: 71
; sw_ident: 0.250
; sw_sim: 0.574
; sw_overlap: 108
>gi|10955263| ..
; sq_len: 107
; sq_offset: 1
; sq_type: p
; al_start: 5
; al_stop: 103
; al_display_start: 1
--------------------------MTKRSGSNT-RRRAISRPVRLTAE
ED---QEIRKRAAECGKTVSGFLRAAALGKKVNSLTDDRVLKEVM-----
RLGALQKKLFIDGKRVGDREYAEVLIAITEYHRALLSRLMAD
>gi|152973457|ref|YP_001338508.1| ..
; sq_len: 931
; sq_type: p
; al_start: 96
; al_stop: 195
; al_display_start: 66
SDFFRIGDDATPVAADTDDVVDASFGEPAAAGSGAPRRRGSGLASRISEQ
SEALLQEAAKHAAEFGRS------EVDTEHLLLALADSDVVKTILGQFKI
KVDDLKRQIESEAKR-GDKPF-EGEIGVSPRVKDALSRAFVASNELGHSY
VGPEHFLIGLAEEGEGLAANLLRRYGLTPQ
>>gi|152973588|ref|YP_001338639.1| F pilus assembly protein [Klebsiella pneumoniae subsp. pneumoniae MGH 78578]
; fa_frame: f
; fa_initn: 33
; fa_init1: 33
; fa_opt: 63
; fa_z-score: 86.1
; fa_bits: 23.1
; fa_expect: 0.99
; sw_score: 63
; sw_ident: 0.266
; sw_sim: 0.656
; sw_overlap: 64
>gi|10955263| ..
; sq_len: 107
; sq_offset: 1
; sq_type: p
; al_start: 32
; al_stop: 94
; al_display_start: 2
TKRSGSNTRRRAISRPVRLTAEEDQEIRKRAAECGKTVSGFLRAAALGKK
VNSLTDDRVLKEV-MRLGALQKKLFIDGKRVGDREYAEVLIAITEYHRAL
LSRLMAD
>gi|152973588|ref|YP_001338639.1| ..
; sq_len: 459
; sq_type: p
; al_start: 191
; al_stop: 248
; al_display_start: 161
VGGLFPRTQVAQQKVCQDIAGESNIFSDWAASRQGCTVGG--KMDSVQDK
ASDKDKERVMKNINIMWNALSKNRLFDG----NKELKEFIMTLTGTLIFG
ENSEITPLPARTTDQDLIRAMMEGGTAKIYHCNDSDKCLKVVADATVTIT
SNKALKSQISALLSSIQNKAVADEKLTDQE
2>>>gi|10955264|ref|NP_052605.1| hypothetical protein pOSAK1_02 [Escherichia coli O157:H7 s 126 aa - 126 aa
vs NC_009649.faa library
45119 residues in 180 sequences
Expectation_n fit: rho(ln(x))= 7.1374+/-0.0246; mu= -7.6540+/- 1.313
mean_var=51.1189+/-13.171, 0's: 0 Z-trim: 1 B-trim: 8 in 1/25
Lambda= 0.179384
FASTA (3.5 Sept 2006) function [optimized, BL50 matrix (15:-5)] ktup: 2
join: 36, opt: 24, open/ext: -10/-2, width: 16
Scan time: 0.000
The best scores are: opt bits E(180)
gi|152973462|ref|YP_001338513.1| hypothetical prot ( 101) 58 22.9 0.29
>>>gi|10955264|ref|NP_052605.1|, 126 aa vs NC_009649.faa library
; pg_name: /opt/fasta/fasta34
; pg_ver: 34.26
; pg_argv: /opt/fasta/fasta34 -Q -H -E 1 -m 10 NC_002127.faa NC_009649.faa
; pg_name: FASTA
; pg_ver: 3.5 Sept 2006
; pg_matrix: BL50 (15:-5)
; pg_open-ext: -10 -2
; pg_ktup: 2
; pg_optcut: 24
; pg_cgap: 36
; mp_extrap: 60000 180
; mp_stats: Expectation_n fit: rho(ln(x))= 7.1374+/-0.0246; mu= -7.6540+/- 1.313 mean_var=51.1189+/-13.171, 0's: 0 Z-trim: 1 B-trim: 8 in 1/25 Lambda= 0.179384
; mp_KS: -0.0000 (N=0) at 8159228
>>gi|152973462|ref|YP_001338513.1| hypothetical protein KPN_pKPN3p05904 [Klebsiella pneumoniae subsp. pneumoniae MGH 78578]
; fa_frame: f
; fa_initn: 50
; fa_init1: 50
; fa_opt: 58
; fa_z-score: 95.8
; fa_bits: 22.9
; fa_expect: 0.29
; sw_score: 58
; sw_ident: 0.289
; sw_sim: 0.632
; sw_overlap: 38
>gi|10955264| ..
; sq_len: 126
; sq_offset: 1
; sq_type: p
; al_start: 1
; al_stop: 38
; al_display_start: 1
------------------------------MKKDKKYQIEAIKNKDKTLF
IVYATDIYSPSEFFSKIESDLKKKKSKGDVFFDLIIPNGGKKDRYVYTSF
NGEKFSSYTLNKVTKTDEYN
>gi|152973462|ref|YP_001338513.1| ..
; sq_len: 101
; sq_type: p
; al_start: 44
; al_stop: 81
; al_display_start: 14
DALLGEIQRLRKQVHQLQLERDILTKANELIKKDLGVSFLKLKNREKTLI
VDALKKKYPVAELLSVLQLARSCYFYQNVCTISMRKYA
3>>>gi|10955265|ref|NP_052606.1| hypothetical protein pOSAK1_03 [Escherichia coli O157:H7 s 346 aa - 346 aa
vs NC_009649.faa library
45119 residues in 180 sequences
Expectation_n fit: rho(ln(x))= 6.0276+/-0.0276; mu= 3.0670+/- 1.461
mean_var=37.1634+/- 8.980, 0's: 0 Z-trim: 1 B-trim: 14 in 1/25
Lambda= 0.210386
FASTA (3.5 Sept 2006) function [optimized, BL50 matrix (15:-5)] ktup: 2
join: 37, opt: 25, open/ext: -10/-2, width: 16
Scan time: 0.020
The best scores are: opt bits E(180)
gi|152973545|ref|YP_001338596.1| putative plasmid ( 242) 70 27.5 0.082
>>>gi|10955265|ref|NP_052606.1|, 346 aa vs NC_009649.faa library
; pg_name: /opt/fasta/fasta34
; pg_ver: 34.26
; pg_argv: /opt/fasta/fasta34 -Q -H -E 1 -m 10 NC_002127.faa NC_009649.faa
; pg_name: FASTA
; pg_ver: 3.5 Sept 2006
; pg_matrix: BL50 (15:-5)
; pg_open-ext: -10 -2
; pg_ktup: 2
; pg_optcut: 25
; pg_cgap: 37
; mp_extrap: 60000 180
; mp_stats: Expectation_n fit: rho(ln(x))= 6.0276+/-0.0276; mu= 3.0670+/- 1.461 mean_var=37.1634+/- 8.980, 0's: 0 Z-trim: 1 B-trim: 14 in 1/25 Lambda= 0.210386
; mp_KS: -0.0000 (N=0) at 8159228
>>gi|152973545|ref|YP_001338596.1| putative plasmid SOS inhibition protein A [Klebsiella pneumoniae subsp. pneumoniae MGH 78578]
; fa_frame: f
; fa_initn: 52
; fa_init1: 52
; fa_opt: 70
; fa_z-score: 105.5
; fa_bits: 27.5
; fa_expect: 0.082
; sw_score: 70
; sw_ident: 0.279
; sw_sim: 0.651
; sw_overlap: 43
>gi|10955265| ..
; sq_len: 346
; sq_offset: 1
; sq_type: p
; al_start: 197
; al_stop: 238
; al_display_start: 167
DFMCSILNMKEIVEQKNKEFNVDIKKETIESELHSKLPKSIDKIHEDIKK
QLSC-SLIMKKIDVEMEDYSTYCFSALRAIEGFIYQILNDVCNPSSSKNL
GEYFTENKPKYIIREIHQET
>gi|152973545|ref|YP_001338596.1| ..
; sq_len: 242
; sq_type: p
; al_start: 52
; al_stop: 94
; al_display_start: 22
IMTVEEARQRGARLPSMPHVRTFLRLLTGCSRINSDVARRIPGIHRDPKD
RLSSLKQVEEALDMLISSHGEYCPLPLTMDVQAENFPEVLHTRTVRRLKR
QDFAFTRKMRREARQVEQSW
>>><<<
579 residues in 3 query sequences
45119 residues in 180 library sequences
Scomplib [34.26]
start: Tue May 20 16:38:45 2008 done: Tue May 20 16:38:45 2008
Total Scan time: 0.020 Total Display time: 0.010
Function used was FASTA [version 34.26 January 12, 2007]
"""
from Bio._py3k import StringIO
alignments = list(FastaM10Iterator(StringIO(simple_example)))
assert len(alignments) == 4, len(alignments)
assert len(alignments[0]) == 2
for a in alignments:
print("Alignment %i sequences of length %i"
% (len(a), a.get_alignment_length()))
for r in a:
print("%s %s %i" % (r.seq, r.id, r.annotations["original_length"]))
# print(a.annotations)
print("Done")
import os
path = "../../Tests/Fasta/"
files = sorted(f for f in os.listdir(path) if os.path.splitext(f)[-1] == ".m10")
for filename in files:
if os.path.splitext(filename)[-1] == ".m10":
print("")
print(filename)
print("=" * len(filename))
for i, a in enumerate(FastaM10Iterator(open(os.path.join(path, filename)))):
print("#%i, %s" % (i + 1, a))
for r in a:
if "-" in r.seq:
assert r.seq.alphabet.gap_char == "-"
else:
assert not hasattr(r.seq.alphabet, "gap_char")
|
|
# -*- coding: UTF-8 -*-
import threading
import MySQLdb
import MySQLdb.cursors
import Queue
import tornado.ioloop
import tornado.gen
import tornado.concurrent
class MySQLConnection(object):
class Worker(threading.Thread):
def __init__(self, conn, *args, **kwargs):
"""
Initialize a new Worker thread.
:return: None
"""
self.conn = conn
self.db = None
self.in_tx = False
super(MySQLConnection.Worker, self).__init__(*args, **kwargs)
def connect(self):
"""
(re)Connect to a MySQL database.
:return: None
"""
if self.db is not None:
self.disconnect()
self.db = MySQLdb.connect(host=self.conn.host, port=self.conn.port, db=self.conn.db, user=self.conn.user, passwd=self.conn.pwd, use_unicode=True, charset='utf8', cursorclass=MySQLdb.cursors.DictCursor)
self.db.autocommit(self.conn.auto_commit)
def disconnect(self):
"""
Close a MySQL connection.
:return: None
"""
if self.db:
try:
self.db.close()
finally:
self.db = None
def run(self):
"""
Working thread main loop. Pick up jobs from the queue and execute them.
:return: None
"""
# First thing, create a MySQL connection for this thread
self.connect()
# Start thread's ioloop
while self.conn.running:
result = None
error = None
cursor = None
try:
# Get next task from queue
task = self.conn.queue.get(True)
# Handle special abort command
if task['command'] == 'abort':
self.conn.queue.put(task)
break
# Ignore Transactions which are not this thread's
tx_id = task.get('tx_id')
if tx_id is not None:
if tx_id != self.name:
# Put task request back into queue and wait again
self.conn.queue.put(task)
continue
# Handle transactions
if task['command'] == '*begin-tx*':
if self.in_tx:
# Already attending a transaction, return request to queue
self.conn.queue.put(task)
continue
else:
# Signal this Thread will handle the Transaction!
self.in_tx = True
result = self.name
elif task['command'] == '*end-tx*':
if self.in_tx and task['tx_id'] == self.name:
# This is our signal to stop attending this transaction
self.in_tx = False
else:
# Not attending a transaction or it's not our transaction. Either way, ignore request
self.conn.queue.put(task)
continue
else:
# Get a DB cursor and execute query (at most 3 times!)
retries = 3
while retries > 0:
try:
cursor = self.db.cursor()
rows_affected = cursor.execute(task['query'], task.get('args'))
error = None
break
except (AttributeError, MySQLdb.OperationalError) as e:
retries -= 1
error = e
cursor = None
self.connect()
except Exception as e:
if cursor is not None:
cursor.close()
error = e
break
if error is None:
# Determine result
if task['command'] == 'select':
# for a SELECT, we want the resultset
result = list(cursor.fetchall())
if len(result) == 0:
result = None
elif task['command'] == 'insert':
# for an INSERT, we want the new ID
result = cursor.lastrowid
else:
# for everything else, we'll be fine with rows_affected
result = rows_affected
else:
if retries == 0:
raise Exception('Failed 3 reconnection attempts to MySQL server: {0}'.format(e))
except Exception as e:
error = e
finally:
# Make sure we close the DB cursor!
if cursor is not None:
cursor.close()
# Send result to the query's request-ee
self.conn._send_result(task, result, error)
# No more tasks. Close connection
self.disconnect()
class Transaction():
def __init__(self, conn):
"""
Initialize a Transaction context manager.
:param conn: Reference to the MySQL connection instance.
"""
self.conn = conn
self.tx_id = None
@tornado.gen.coroutine
def query(self, query, args=None):
"""
Execute a query
"""
ret = yield self.conn.query(query, args, tx_id=self.tx_id)
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def begin(self):
"""
Start a new Transaction.
:return: None
"""
if self.tx_id is None:
self.tx_id = yield self.conn.query('*begin-tx*')
yield self.conn.query('START TRANSACTION WITH CONSISTENT SNAPSHOT', tx_id=self.tx_id)
@tornado.gen.coroutine
def commit(self):
"""
Commit transaction.
:return: None
"""
if self.tx_id is not None:
try:
yield self.conn.query('COMMIT', tx_id=self.tx_id)
finally:
yield self.conn.query('*end-tx*', tx_id=self.tx_id)
self.tx_id = None
@tornado.gen.coroutine
def rollback(self):
"""
Rollback transaction.
:return: None
"""
if self.tx_id is not None:
try:
yield self.conn.query('ROLLBACK', tx_id=self.tx_id)
finally:
yield self.conn.query('*end-tx*', tx_id=self.tx_id)
self.tx_id = None
def __init__(self, connection_string, worker_pool_size=10, auto_commit=True):
"""
Initialize the Tornado Async MySQL wrapper
:param connection_string: Connection string for establishing a server connection.
:param worker_pool_size: Number of worker threads to initialize.
:param auto_commit: Whether or not to auto commit statements on execution.
:return: None
"""
self.auto_commit = auto_commit
self._parse_connection_string(connection_string)
self.running = True
self.queue = Queue.Queue()
self.workers = []
for i in xrange(worker_pool_size):
w = MySQLConnection.Worker(self, name=str(i))
w.start()
self.workers.append(w)
def _parse_connection_string(self, connection_string):
"""
Parse the given Connection string. Only recognized parts will be used. All other parts will be ignored.
:param connection_string: MySQL connection string.
:return: None
"""
self.host = '127.0.0.1'
self.port = 3306
self.db = None
self.user = None
self.pwd = None
for part in connection_string.split(';'):
part = part.strip()
if part != '':
k, v = part.split('=')
k = k.lower()
if k == 'server':
self.host = v.strip()
elif k == 'port':
self.port = int(v.strip())
elif k == 'database':
self.db = v.strip()
elif k == 'uid':
self.user = v.strip()
elif k == 'pwd':
self.pwd = v.strip()
def _send_result(self, task, result, error):
"""
Send a query result back to it's request-ee.
:return: None
"""
if error is None:
task['future'].set_result(result)
else:
task['future'].set_exception(error)
def close(self):
"""
Shutdown this connection.
:return: None
"""
self.running = False
self.queue.put({'command':'abort'})
map(lambda w: w.join(), self.workers)
def query(self, sql, args=None, tx_id=None):
"""
Perform a DB query.
:param sql: SQL statement to execute.
:param args: Optional arguments.
:param tx_id: Transaction ID. for internal use only!
:return: Future instance
"""
future = tornado.concurrent.TracebackFuture()
self.queue.put({'query':sql, 'command':sql.split(None, 1)[0].lower(), 'future':future, 'tx_id':tx_id, 'args':args})
return future
def transaction(self):
"""
Return a new Transaction helper object for performing queries within a Transaction scope.
:return: Transaction instance.
"""
return MySQLConnection.Transaction(self)
|
|
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
import abc
import contextlib
import copy
import hashlib
import os
import threading
from oslo_utils import reflection
from oslo_utils import strutils
from requests import Response
import six
from novaclient import exceptions
from novaclient import utils
def getid(obj):
"""Get object's ID or object.
Abstracts the common pattern of allowing both an object or an object's ID
as a parameter when dealing with relationships.
"""
try:
return obj.id
except AttributeError:
return obj
# TODO(aababilov): call run_hooks() in HookableMixin's child classes
class HookableMixin(object):
"""Mixin so classes can register and run hooks."""
_hooks_map = {}
@classmethod
def add_hook(cls, hook_type, hook_func):
"""Add a new hook of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param hook_func: hook function
"""
if hook_type not in cls._hooks_map:
cls._hooks_map[hook_type] = []
cls._hooks_map[hook_type].append(hook_func)
@classmethod
def run_hooks(cls, hook_type, *args, **kwargs):
"""Run all hooks of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param args: args to be passed to every hook function
:param kwargs: kwargs to be passed to every hook function
"""
hook_funcs = cls._hooks_map.get(hook_type) or []
for hook_func in hook_funcs:
hook_func(*args, **kwargs)
class RequestIdMixin(object):
"""Wrapper class to expose x-openstack-request-id to the caller.
"""
def request_ids_setup(self):
self.x_openstack_request_ids = []
@property
def request_ids(self):
return self.x_openstack_request_ids
def append_request_ids(self, resp):
"""Add request_ids as an attribute to the object
:param resp: Response object or list of Response objects
"""
if isinstance(resp, list):
# Add list of request_ids if response is of type list.
for resp_obj in resp:
self._append_request_id(resp_obj)
elif resp is not None:
# Add request_ids if response contains single object.
self._append_request_id(resp)
def _append_request_id(self, resp):
if isinstance(resp, Response):
# Extract 'x-openstack-request-id' from headers if
# response is a Response object.
request_id = (resp.headers.get('x-openstack-request-id') or
resp.headers.get('x-compute-request-id'))
else:
# If resp is of type string or None.
request_id = resp
if request_id not in self.x_openstack_request_ids:
self.x_openstack_request_ids.append(request_id)
class Resource(RequestIdMixin):
"""Base class for OpenStack resources (tenant, user, etc.).
This is pretty much just a bag for attributes.
"""
HUMAN_ID = False
NAME_ATTR = 'name'
def __init__(self, manager, info, loaded=False, resp=None):
"""Populate and bind to a manager.
:param manager: BaseManager object
:param info: dictionary representing resource attributes
:param loaded: prevent lazy-loading if set to True
:param resp: Response or list of Response objects
"""
self.manager = manager
self._info = info
self._add_details(info)
self._loaded = loaded
self.request_ids_setup()
self.append_request_ids(resp)
def __repr__(self):
reprkeys = sorted(k
for k in self.__dict__.keys()
if k[0] != '_' and
k not in ['manager', 'x_openstack_request_ids'])
info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
return "<%s %s>" % (self.__class__.__name__, info)
@property
def api_version(self):
return self.manager.api_version
@property
def human_id(self):
"""Human-readable ID which can be used for bash completion.
"""
if self.HUMAN_ID:
name = getattr(self, self.NAME_ATTR, None)
if name is not None:
return strutils.to_slug(name)
return None
def _add_details(self, info):
for (k, v) in six.iteritems(info):
try:
setattr(self, k, v)
self._info[k] = v
except AttributeError:
# In this case we already defined the attribute on the class
pass
def __getattr__(self, k):
if k not in self.__dict__:
# NOTE(bcwaldon): disallow lazy-loading if already loaded once
if not self.is_loaded():
self.get()
return self.__getattr__(k)
raise AttributeError(k)
else:
return self.__dict__[k]
def get(self):
"""Support for lazy loading details.
Some clients, such as novaclient have the option to lazy load the
details, details which can be loaded with this function.
"""
# set_loaded() first ... so if we have to bail, we know we tried.
self.set_loaded(True)
if not hasattr(self.manager, 'get'):
return
new = self.manager.get(self.id)
if new:
self._add_details(new._info)
# The 'request_ids' attribute has been added,
# so store the request id to it instead of _info
self.append_request_ids(new.request_ids)
def __eq__(self, other):
if not isinstance(other, Resource):
return NotImplemented
# two resources of different types are not equal
if not isinstance(other, self.__class__):
return False
if hasattr(self, 'id') and hasattr(other, 'id'):
return self.id == other.id
return self._info == other._info
def __ne__(self, other):
return not self.__eq__(other)
def is_loaded(self):
return self._loaded
def set_loaded(self, val):
self._loaded = val
def to_dict(self):
return copy.deepcopy(self._info)
class Manager(HookableMixin):
"""Manager for API service.
Managers interact with a particular type of API (servers, flavors, images,
etc.) and provide CRUD operations for them.
"""
resource_class = None
cache_lock = threading.RLock()
def __init__(self, api):
self.api = api
@property
def client(self):
return self.api.client
@property
def api_version(self):
return self.api.api_version
def _list(self, url, response_key, obj_class=None, body=None):
if body:
resp, body = self.api.client.post(url, body=body)
else:
resp, body = self.api.client.get(url)
if obj_class is None:
obj_class = self.resource_class
data = body[response_key]
# NOTE(ja): keystone returns values as list as {'values': [ ... ]}
# unlike other services which just return the list...
if isinstance(data, dict):
try:
data = data['values']
except KeyError:
pass
with self.completion_cache('human_id', obj_class, mode="w"):
with self.completion_cache('uuid', obj_class, mode="w"):
items = [obj_class(self, res, loaded=True)
for res in data if res]
return ListWithMeta(items, resp)
@contextlib.contextmanager
def alternate_service_type(self, default, allowed_types=()):
original_service_type = self.api.client.service_type
if original_service_type in allowed_types:
yield
else:
self.api.client.service_type = default
try:
yield
finally:
self.api.client.service_type = original_service_type
@contextlib.contextmanager
def completion_cache(self, cache_type, obj_class, mode):
"""The completion cache for bash autocompletion.
The completion cache store items that can be used for bash
autocompletion, like UUIDs or human-friendly IDs.
A resource listing will clear and repopulate the cache.
A resource create will append to the cache.
Delete is not handled because listings are assumed to be performed
often enough to keep the cache reasonably up-to-date.
"""
# NOTE(wryan): This lock protects read and write access to the
# completion caches
with self.cache_lock:
base_dir = utils.env('NOVACLIENT_UUID_CACHE_DIR',
default="~/.novaclient")
# NOTE(sirp): Keep separate UUID caches for each username +
# endpoint pair
username = utils.env('OS_USERNAME', 'NOVA_USERNAME')
url = utils.env('OS_URL', 'NOVA_URL')
uniqifier = hashlib.md5(username.encode('utf-8') +
url.encode('utf-8')).hexdigest()
cache_dir = os.path.expanduser(os.path.join(base_dir, uniqifier))
try:
os.makedirs(cache_dir, 0o755)
except OSError:
# NOTE(kiall): This is typically either permission denied while
# attempting to create the directory, or the
# directory already exists. Either way, don't
# fail.
pass
resource = obj_class.__name__.lower()
filename = "%s-%s-cache" % (resource, cache_type.replace('_', '-'))
path = os.path.join(cache_dir, filename)
cache_attr = "_%s_cache" % cache_type
try:
setattr(self, cache_attr, open(path, mode))
except IOError:
# NOTE(kiall): This is typically a permission denied while
# attempting to write the cache file.
pass
try:
yield
finally:
cache = getattr(self, cache_attr, None)
if cache:
cache.close()
delattr(self, cache_attr)
def write_to_completion_cache(self, cache_type, val):
cache = getattr(self, "_%s_cache" % cache_type, None)
if cache:
cache.write("%s\n" % val)
def _get(self, url, response_key):
resp, body = self.api.client.get(url)
if response_key is not None:
content = body[response_key]
else:
content = body
return self.resource_class(self, content, loaded=True,
resp=resp)
def _create(self, url, body, response_key, return_raw=False, **kwargs):
self.run_hooks('modify_body_for_create', body, **kwargs)
resp, body = self.api.client.post(url, body=body)
if return_raw:
return self.convert_into_with_meta(body[response_key], resp)
with self.completion_cache('human_id', self.resource_class, mode="a"):
with self.completion_cache('uuid', self.resource_class, mode="a"):
return self.resource_class(self, body[response_key], resp=resp)
def _delete(self, url):
resp, body = self.api.client.delete(url)
return self.convert_into_with_meta(body, resp)
def _update(self, url, body, response_key=None, **kwargs):
self.run_hooks('modify_body_for_update', body, **kwargs)
resp, body = self.api.client.put(url, body=body)
if body:
if response_key:
return self.resource_class(self, body[response_key], resp=resp)
else:
return self.resource_class(self, body, resp=resp)
else:
return StrWithMeta(body, resp)
def convert_into_with_meta(self, item, resp):
if isinstance(item, six.string_types):
if six.PY2 and isinstance(item, six.text_type):
return UnicodeWithMeta(item, resp)
else:
return StrWithMeta(item, resp)
elif isinstance(item, six.binary_type):
return BytesWithMeta(item, resp)
elif isinstance(item, list):
return ListWithMeta(item, resp)
elif isinstance(item, tuple):
return TupleWithMeta(item, resp)
elif item is None:
return TupleWithMeta((), resp)
else:
return DictWithMeta(item, resp)
@six.add_metaclass(abc.ABCMeta)
class ManagerWithFind(Manager):
"""Like a `Manager`, but with additional `find()`/`findall()` methods."""
@abc.abstractmethod
def list(self):
pass
def find(self, **kwargs):
"""Find a single item with attributes matching ``**kwargs``."""
matches = self.findall(**kwargs)
num_matches = len(matches)
if num_matches == 0:
msg = "No %s matching %s." % (self.resource_class.__name__, kwargs)
raise exceptions.NotFound(404, msg)
elif num_matches > 1:
raise exceptions.NoUniqueMatch
else:
matches[0].append_request_ids(matches.request_ids)
return matches[0]
def findall(self, **kwargs):
"""Find all items with attributes matching ``**kwargs``."""
found = ListWithMeta([], None)
searches = kwargs.items()
detailed = True
list_kwargs = {}
list_argspec = reflection.get_callable_args(self.list)
if 'detailed' in list_argspec:
detailed = ("human_id" not in kwargs and
"name" not in kwargs and
"display_name" not in kwargs)
list_kwargs['detailed'] = detailed
if 'is_public' in list_argspec and 'is_public' in kwargs:
is_public = kwargs['is_public']
list_kwargs['is_public'] = is_public
if is_public is None:
tmp_kwargs = kwargs.copy()
del tmp_kwargs['is_public']
searches = tmp_kwargs.items()
if 'search_opts' in list_argspec:
# pass search_opts in to do server side based filtering.
# TODO(jogo) not all search_opts support regex, find way to
# identify when to use regex and when to use string matching.
# volumes does not support regex while servers does. So when
# doing findall on servers some client side filtering is still
# needed.
if "human_id" in kwargs:
list_kwargs['search_opts'] = {"name": kwargs["human_id"]}
elif "name" in kwargs:
list_kwargs['search_opts'] = {"name": kwargs["name"]}
elif "display_name" in kwargs:
list_kwargs['search_opts'] = {"name": kwargs["display_name"]}
if "all_tenants" in kwargs:
all_tenants = kwargs['all_tenants']
list_kwargs['search_opts']['all_tenants'] = all_tenants
searches = [(k, v) for k, v in searches if k != 'all_tenants']
if "deleted" in kwargs:
deleted = kwargs['deleted']
list_kwargs['search_opts']['deleted'] = deleted
searches = [(k, v) for k, v in searches if k != 'deleted']
listing = self.list(**list_kwargs)
found.append_request_ids(listing.request_ids)
for obj in listing:
try:
if all(getattr(obj, attr) == value
for (attr, value) in searches):
if detailed:
found.append(obj)
else:
detail = self.get(obj.id)
found.append(detail)
found.append_request_ids(detail.request_ids)
except AttributeError:
continue
return found
class BootingManagerWithFind(ManagerWithFind):
"""Like a `ManagerWithFind`, but has the ability to boot servers."""
def _parse_block_device_mapping(self, block_device_mapping):
"""Parses legacy block device mapping."""
# FIXME(andreykurilin): make it work with block device mapping v2
bdm = []
for device_name, mapping in six.iteritems(block_device_mapping):
#
# The mapping is in the format:
# <id>:[<type>]:[<size(GB)>]:[<delete_on_terminate>]
#
bdm_dict = {'device_name': device_name}
mapping_parts = mapping.split(':')
source_id = mapping_parts[0]
if len(mapping_parts) == 1:
bdm_dict['volume_id'] = source_id
elif len(mapping_parts) > 1:
source_type = mapping_parts[1]
if source_type.startswith('snap'):
bdm_dict['snapshot_id'] = source_id
else:
bdm_dict['volume_id'] = source_id
if len(mapping_parts) > 2 and mapping_parts[2]:
bdm_dict['volume_size'] = str(int(mapping_parts[2]))
if len(mapping_parts) > 3:
bdm_dict['delete_on_termination'] = mapping_parts[3]
bdm.append(bdm_dict)
return bdm
class ListWithMeta(list, RequestIdMixin):
def __init__(self, values, resp):
super(ListWithMeta, self).__init__(values)
self.request_ids_setup()
self.append_request_ids(resp)
class DictWithMeta(dict, RequestIdMixin):
def __init__(self, values, resp):
super(DictWithMeta, self).__init__(values)
self.request_ids_setup()
self.append_request_ids(resp)
class TupleWithMeta(tuple, RequestIdMixin):
def __new__(cls, values, resp):
return super(TupleWithMeta, cls).__new__(cls, values)
def __init__(self, values, resp):
self.request_ids_setup()
self.append_request_ids(resp)
class StrWithMeta(str, RequestIdMixin):
def __new__(cls, value, resp):
return super(StrWithMeta, cls).__new__(cls, value)
def __init__(self, values, resp):
self.request_ids_setup()
self.append_request_ids(resp)
class BytesWithMeta(six.binary_type, RequestIdMixin):
def __new__(cls, value, resp):
return super(BytesWithMeta, cls).__new__(cls, value)
def __init__(self, values, resp):
self.request_ids_setup()
self.append_request_ids(resp)
if six.PY2:
class UnicodeWithMeta(six.text_type, RequestIdMixin):
def __new__(cls, value, resp):
return super(UnicodeWithMeta, cls).__new__(cls, value)
def __init__(self, values, resp):
self.request_ids_setup()
self.append_request_ids(resp)
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import sys
import pytest
PY3 = sys.version_info[0] >= 3
pytest.skip("PY3")
pytest.importorskip('sphinx') # skips these tests if sphinx not present
class FakeConfig(object):
"""
Mocks up a sphinx configuration setting construct for automodapi tests
"""
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
class FakeApp(object):
"""
Mocks up a `sphinx.application.Application` object for automodapi tests
"""
# Some default config values
_defaults = {
'automodapi_toctreedirnm': 'api',
'automodapi_writereprocessed': False
}
def __init__(self, **configs):
config = self._defaults.copy()
config.update(configs)
self.config = FakeConfig(**config)
self.info = []
self.warnings = []
def info(self, msg, loc):
self.info.append((msg, loc))
def warn(self, msg, loc):
self.warnings.append((msg, loc))
am_replacer_str = """
This comes before
.. automodapi:: astropy_helpers.sphinx.ext.tests.test_automodapi
{options}
This comes after
"""
am_replacer_basic_expected = """
This comes before
astropy_helpers.sphinx.ext.tests.test_automodapi Module
-------------------------------------------------------
.. automodule:: astropy_helpers.sphinx.ext.tests.test_automodapi
Functions
^^^^^^^^^
.. automodsumm:: astropy_helpers.sphinx.ext.tests.test_automodapi
:functions-only:
:toctree: api/
Classes
^^^^^^^
.. automodsumm:: astropy_helpers.sphinx.ext.tests.test_automodapi
:classes-only:
:toctree: api/
Class Inheritance Diagram
^^^^^^^^^^^^^^^^^^^^^^^^^
.. automod-diagram:: astropy_helpers.sphinx.ext.tests.test_automodapi
:private-bases:
:parts: 1
{empty}
This comes after
""".format(empty='').replace('/', os.sep)
# the .format is necessary for editors that remove empty-line whitespace
def test_am_replacer_basic():
"""
Tests replacing an ".. automodapi::" with the automodapi no-option
template
"""
from ..automodapi import automodapi_replace
fakeapp = FakeApp()
result = automodapi_replace(am_replacer_str.format(options=''), fakeapp)
assert result == am_replacer_basic_expected
am_replacer_noinh_expected = """
This comes before
astropy_helpers.sphinx.ext.tests.test_automodapi Module
-------------------------------------------------------
.. automodule:: astropy_helpers.sphinx.ext.tests.test_automodapi
Functions
^^^^^^^^^
.. automodsumm:: astropy_helpers.sphinx.ext.tests.test_automodapi
:functions-only:
:toctree: api/
Classes
^^^^^^^
.. automodsumm:: astropy_helpers.sphinx.ext.tests.test_automodapi
:classes-only:
:toctree: api/
This comes after
""".format(empty='').replace('/', os.sep)
def test_am_replacer_noinh():
"""
Tests replacing an ".. automodapi::" with no-inheritance-diagram
option
"""
from ..automodapi import automodapi_replace
fakeapp = FakeApp()
ops = ['', ':no-inheritance-diagram:']
ostr = '\n '.join(ops)
result = automodapi_replace(am_replacer_str.format(options=ostr), fakeapp)
assert result == am_replacer_noinh_expected
am_replacer_titleandhdrs_expected = """
This comes before
astropy_helpers.sphinx.ext.tests.test_automodapi Module
&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
.. automodule:: astropy_helpers.sphinx.ext.tests.test_automodapi
Functions
*********
.. automodsumm:: astropy_helpers.sphinx.ext.tests.test_automodapi
:functions-only:
:toctree: api/
Classes
*******
.. automodsumm:: astropy_helpers.sphinx.ext.tests.test_automodapi
:classes-only:
:toctree: api/
Class Inheritance Diagram
*************************
.. automod-diagram:: astropy_helpers.sphinx.ext.tests.test_automodapi
:private-bases:
:parts: 1
{empty}
This comes after
""".format(empty='').replace('/', os.sep)
def test_am_replacer_titleandhdrs():
"""
Tests replacing an ".. automodapi::" entry with title-setting and header
character options.
"""
from ..automodapi import automodapi_replace
fakeapp = FakeApp()
ops = ['', ':title: A new title', ':headings: &*']
ostr = '\n '.join(ops)
result = automodapi_replace(am_replacer_str.format(options=ostr), fakeapp)
assert result == am_replacer_titleandhdrs_expected
am_replacer_nomain_str = """
This comes before
.. automodapi:: astropy_helpers.sphinx.ext.automodapi
:no-main-docstr:
This comes after
"""
am_replacer_nomain_expected = """
This comes before
astropy_helpers.sphinx.ext.automodapi Module
--------------------------------------------
Functions
^^^^^^^^^
.. automodsumm:: astropy_helpers.sphinx.ext.automodapi
:functions-only:
:toctree: api/
This comes after
""".format(empty='').replace('/', os.sep)
def test_am_replacer_nomain():
"""
Tests replacing an ".. automodapi::" with "no-main-docstring" .
"""
from ..automodapi import automodapi_replace
fakeapp = FakeApp()
result = automodapi_replace(am_replacer_nomain_str, fakeapp)
assert result == am_replacer_nomain_expected
am_replacer_skip_str = """
This comes before
.. automodapi:: astropy_helpers.sphinx.ext.automodapi
:skip: something1
:skip: something2
This comes after
"""
am_replacer_skip_expected = """
This comes before
astropy_helpers.sphinx.ext.automodapi Module
--------------------------------------------
.. automodule:: astropy_helpers.sphinx.ext.automodapi
Functions
^^^^^^^^^
.. automodsumm:: astropy_helpers.sphinx.ext.automodapi
:functions-only:
:toctree: api/
:skip: something1,something2
This comes after
""".format(empty='').replace('/', os.sep)
def test_am_replacer_skip():
"""
Tests using the ":skip: option in an ".. automodapi::" .
"""
from ..automodapi import automodapi_replace
fakeapp = FakeApp()
result = automodapi_replace(am_replacer_skip_str, fakeapp)
assert result == am_replacer_skip_expected
am_replacer_invalidop_str = """
This comes before
.. automodapi:: astropy_helpers.sphinx.ext.automodapi
:invalid-option:
This comes after
"""
def test_am_replacer_invalidop():
"""
Tests that a sphinx warning is produced with an invalid option.
"""
from ..automodapi import automodapi_replace
fakeapp = FakeApp()
automodapi_replace(am_replacer_invalidop_str, fakeapp)
expected_warnings = [('Found additional options invalid-option in '
'automodapi.', None)]
assert fakeapp.warnings == expected_warnings
|
|
"""
Copyright 2015 Paul T. Grogan, Massachusetts Institute of Technology
Copyright 2017 Paul T. Grogan, Stevens Institute of Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
The L{ofspy.context} package contains classes related to the environment.
"""
import random
import logging
from ..simulation import Entity
class Context(Entity):
"""
A L{Context} contains the complete simulation state.
"""
def __init__(self, locations=None, events=None, federations=None, seed=0):
"""
@param locations: the locations in this context
@type locations: L{list}
@param events: the events in this context
@type events: L{list}
@param federations: the federations in this context
@type federations: L{list}
@param seed: the seed for stochastic events
@type seed: L{int}
"""
Entity.__init__(self, 'context')
if locations is None:
self.locations = []
else:
self.locations = locations
if events is None:
self.events = []
else:
self.events = events
self.currentEvents = []
self.futureEvents = []
self.pastEvents = []
if federations is None:
self.federations = []
else:
self.federations = federations
self.seed = seed
self.sectors = frozenset(l.sector for l in self.locations)
self.initTime = 0
self.maxTime = 0
self.time = 0
self._nextTime = 0
def getNumSectors(self):
"""
Gets the number of sectors in this context.
@return: L{int}
"""
return len(self.sectors)
def propagate(self, location, duration):
"""
Propagates a location over a specified duration.
@param location: the location to propagate
@type location: L{Location}
@param duration: the duration for which to propagate
@type duration: L{float}
@return: L{Location}
"""
if location is not None and location.isOrbit():
distance = 0
if location.altitude == "LEO":
distance = 2*duration
elif location.altitude == "MEO":
distance = 1*duration
elif location.altitude == "GEO":
distance = 0*duration
path = [l for l in self.locations
if l.isOrbit()
and l.altitude == location.altitude]
return next((p for p in path
if p.sector == (location.sector
+ distance) % len(path)), None)
return location
def getElementOwner(self, element):
"""
Gets the element owner in this context.
@param element: the element
@type element: L{Element}
@param federation: the federation
@type federation: L{Federation}
@return L{Federate}
"""
return next((federate for federation in self.federations
for federate in federation.federates
if element in federate.elements), None)
def getContractOwner(self, contract):
"""
Gets the contract owner in this context.
@param contract: the contract
@type contract: L{Contract}
@param federation: the federation
@type federation: L{Federation}
@return L{Federate}
"""
return next((federate for federation in self.federations
for federate in federation.federates
if contract in federate.contracts), None)
def getDemandOwner(self, demand):
"""
Gets the demand owner in this context.
@param demand: the demand
@type demand: L{Demand}
@param federation: the federation
@type federation: L{Federation}
@return L{Federate}
"""
return next((federate for federation in self.federations
for federate in federation.federates
if any(contract.demand is demand
for contract in federate.contracts)), None)
def getData(self, contract):
"""
Gets the data for a contract.
@param contract: the contract
@type contract: L{Contract}
@return: L{Data}
"""
return next((data for federation in self.federations
for federate in federation.federates
for element in federate.elements
for module in element.modules
for data in module.data
if data.contract is contract), None)
def getContract(self, demand):
"""
Gets the contract for a demand.
@param demand: the demand
@type demand: L{Demand}
@return: L{Contract}
"""
return next((contract for federation in self.federations
for federate in federation.federates
for contract in federate.contracts
if contract.demand is demand), None)
def getDataLocation(self, contract):
"""
Gets the location of data for a contract.
@param contract: the contract
@type contract: L{Contract}
@return: L{Location}
"""
return next((element.location
for federation in self.federations
for federate in federation.federates
for element in federate.elements
if any(any(d.contract is contract
for d in module.data)
for module in element.modules)), None)
def getDataElement(self, contract):
"""
Gets the element containing data for a contract.
@param contract: the contract
@type contract: L{Contract}
@return: L{Element}
"""
return next((element for federation in self.federations
for federate in federation.federates
for element in federate.elements
if any(data.contract is contract
for module in element.modules
for data in module.data)), None)
def init(self, sim):
"""
Initializes this context in a simulation.
@param sim: the simulator
@type sim: L{Simulator}
"""
super(Context, self).init(sim)
self.masterStream = random.Random(self.seed)
self.shuffleStream = random.Random(self.masterStream.random())
self.orderStream = random.Random(self.masterStream.random())
self.rollStreams = {}
for federate in [federate for federation in self.federations
for federate in federation.federates]:
self.rollStreams[federate.name] = random.Random(self.masterStream.random())
self.currentEvents = []
self.pastEvents = []
self.futureEvents = self.events[:]
random.shuffle(self.futureEvents, random=self.shuffleStream.random)
self.time = sim.initTime
self.trigger('init', self, self.time)
self.initTime = sim.initTime
self.maxTime = sim.maxTime
for federation in self.federations:
federation.init(sim)
def tick(self, sim):
"""
Ticks this context in a simulation.
@param sim: the simulator
@type sim: L{Simulator}
"""
super(Context, self).tick(sim)
for federation in self.federations:
federation.tick(sim)
self._nextTime = self.time + sim.timeStep
def revealEvents(self):
"""
Reveal events.
"""
# reveal and resolve new events in each sector
while len(self.currentEvents) > 0:
self.pastEvents.append(self.currentEvents.pop())
for sector in self.sectors:
event = self.futureEvents.pop()
event.sector = sector
self.currentEvents.append(event)
if any(element for federation in self.federations
for federate in federation.federates
for element in federate.elements
if element.isSpace()
and element.location is not None
and element.location.sector == sector):
logging.debug('Sector {0} event: {1}'
.format(sector+1, event.name))
# shuffle past events if there are no more future events
if len(self.futureEvents) < 1:
logging.info('Shuffling events...')
random.shuffle(self.pastEvents, self.shuffleStream.random)
while len(self.pastEvents) > 0:
self.futureEvents.append(self.pastEvents.pop())
self.trigger('reveal', self, event)
def resolveDisturbances(self):
for event in [e for e in self.currentEvents if e.isDisturbance()]:
# resolve disturbances
for federate in [federate for federation in self.federations
for federate in federation.federates]:
rollStream = self.rollStreams[federate.name]
for element in federate.elements:
if (element.isSpace()
and element.location is not None
and element.location.sector == event.sector):
if any(module.isDefense() for module in element.modules):
logging.info('{0} is protected from {1}'
.format(element.name, event.name))
else:
numHits = 0
modules = element.modules[:]
random.shuffle(modules, random=rollStream.random)
for module in modules:
if (numHits < event.maxHits
and rollStream.random() < event.hitChance):
element.modules.remove(module)
numHits += 1
logging.info('{0} was hit and lost {1}'
.format(element.name, module.name))
self.trigger('hit', self, element, module)
else:
logging.debug('{0} was not hit'
.format(element.name))
self.trigger('resolve', self, event)
def logState(self):
"""
Logs the spatial state for debugging purposes.
"""
for location in self.locations:
if any(element for federation in self.federations
for federate in federation.federates
for element in federate.elements
if element.location is location):
logging.debug('{0}'.format(location.name))
for element in [element for federation in self.federations
for federate in federation.federates
for element in federate.elements
if element.location is location]:
logging.debug('-{0}'.format(element.name))
for module in element.modules:
if len(module.data) > 0:
logging.debug(' -{0}'.format(module.name))
for d in module.data:
logging.debug(' -{0} {1}'.format(d.phenomenon,
d.contract))
def autoDefault(self):
"""
Automatically defaults any invalid contracts
and liquidates bankrupt federates.
"""
for federate in [federate for federation in self.federations
for federate in federation.federates]:
# default any failed contracts
for contract in federate.contracts[:]:
if contract.isDefaulted(self.getDataLocation(contract)):
logging.warning('Auto-defaulting {0} for {1}'
.format(contract.name, federate.name))
federate.resolve(contract, self)
# liquidate bankrupt federates
if federate.getCash() < 0:
federate.liquidate(self)
def executeOperations(self):
"""
Executes operational models.
"""
logging.info('Commence operations for time {0}'.format(self.time))
federates = [federate for federation in self.federations
for federate in federation.federates]
random.shuffle(federates, random=self.orderStream.random)
for federate in federates:
federate.operations.execute(federate, self)
federations = self.federations[:]
random.shuffle(federations, random=self.orderStream.random)
for federation in federations:
federation.operations.execute(federation, self)
for federate in [federate for federation in self.federations
for federate in federation.federates]:
logging.info('{0} has {1} cash at time {2}'
.format(federate.name, federate.getCash(), self.time))
def tock(self):
"""
Tocks this context in a simulation.
"""
super(Context, self).tock()
for federation in self.federations:
federation.tock()
self.autoDefault()
self.time = self._nextTime
self.trigger('advance', self, self.time)
self.logState()
self.revealEvents()
self.resolveDisturbances()
self.executeOperations()
|
|
#!/usr/bin/python
import sys
import logging
from re import search
from glob import glob
import numpy as np
import pandas as pd
from numpy import fromfile
def read_aexp_list_from_files(directory, filename_prefix="halo_catalog_a", filename_suffix=".dat") :
epochs = []
for output in glob("%s/%s*%s" % (directory, filename_prefix, filename_suffix)) :
search_string = filename_prefix+"(\d\.\d\d\d\d)"+filename_suffix
epochs.append( float(search(search_string, output).group(1)) )
return sorted(epochs)[::-1]
def read_halo_ids_from_file(filename) :
h_ids = []
with open(filename,"r") as f_input :
for line in f_input.readlines() :
cols = line.split()
h_ids.append(int(cols[0]))
return h_ids
def read_halo_catalog(directory, aexp, filename_prefix = "halo_catalog_a") :
"""
loads halos from halo_catalog_a*.dat files
"""
filename = "%s/%s%0.4f.dat" % (directory, filename_prefix, aexp)
columns = ["id", "x", "y", "z", "vx", "vy", "vz", "r_hc",
"M_hc", "num_particles", "vmax_hc", "rmax_hc"]
halo_catalog = pd.read_table(filename, sep=r"\s+",
skiprows=10, names=columns)
halo_catalog["aexp"] = aexp
return halo_catalog
def load_halo_catalog_data(sim, directory, aexp, h_inverse, mass_cut = 0.0, halo_list = []) :
halo_data = read_halo_catalog(directory, aexp)
if mass_cut :
halo_data = halo_data[halo_data["M_hc"] > mass_cut]
if halo_list :
halo_data = halo_data[halo_data["id"] in halo_list]
add_remove_h_inverse(halo_data, h_inverse, "in_halo_catalog")
comoving_physical_swap(halo_data, aexp, comoving_to_physical=True)
return halo_data
def read_halo_list(directory, aexp, radii = ["vir", "200c", "200m"],
filename_prefix = "halo_list") :
# id R_vir(Delta) [kpc/h physical] M_dark M_gas
# M_gas_cold M_star M_star_new M_baryon M_total (< R_vir(Delta))
# M_total(catalog) [M_sun/h] V_circ_max [km/s physical]
# R(V_circ_max) [kpc/h physical] gas-Z_II_ave gas-Z_Ia_ave star-Z_II_ave
# star-Z_Ia_ave star_new-Z_II_ave star_new-Z_Ia_ave [wrt solar] star-age_ave [Gyr]
halo_lists = None
for radius in radii :
columns = ["id", "r"+radius, "M_dark_"+radius, "M_gas_"+radius,
"M_gas_cold_"+radius, "M_star_"+radius, "M_star_new_"+radius,
"M_baryon_"+radius, "M_total_"+radius, "M_total_hc",
"vmax_"+radius, "rmax_"+radius,
"gas-Z_II_avg_"+radius, "gas-Z_Ia_avg_"+radius,
"star-Z_II_avg_"+radius, "star-Z_Ia_avg_"+radius,
"star_new-Z_II_avg_"+radius, "star_new-Z_Ia_avg_"+radius,
"star-age_avg_"+radius]
filename = "%s/%s_%s_a%0.4f.dat" % (directory, filename_prefix,
radius, aexp)
halo_list = pd.read_table(filename, sep=r"\s+",
skiprows=6, names=columns)
#remove extra hc mass column
halo_list = halo_list.drop('M_total_hc',1)
if halo_lists is None :
halo_lists = halo_list
else :
halo_lists = pd.merge(halo_lists, halo_list)
halo_lists["aexp"] = aexp
return halo_lists
def load_halo_list_data(sim, directory, aexp, radii, h_inverse,
mass_cut = 0.0, halo_list = []) :
halo_data = read_halo_list(directory, aexp, radii = radii)
if mass_cut :
halo_data = halo_data[halo_data["M_hc"] > mass_cut]
if halo_list :
halo_data = halo_data[halo_data["id"] in halo_list]
add_remove_h_inverse(halo_data, h_inverse, "in_profiles")
return halo_data
def load_profiles_data(sim, directory, aexp, h_inverse,
mass_cut = 0.0, halo_list = [],
profile_types = ["gas", "mass", "velocity"]) :
if mass_cut :
halo_list = sim.get_halo_ids(aexp=aexp, mass_cut=mass_cut,
halo_catalog = True)
if len(halo_list):
print "loading profiles for %d halos" % len(halo_list)
else :
return False
profiles = read_halo_profiles(directory, aexp, halo_list = halo_list,
profile_types = profile_types)
add_remove_h_inverse(profiles, h_inverse, "in_profiles")
return profiles
def read_halo_profiles(directory, aexp, filename_prefix = "halo_profile",
halo_list = [],
profile_types = ["gas", "mass", "velocity"]) :
valid_profile_types = ["gas", "mass", "velocity"]
for profile_type in profile_types :
if profile_type not in valid_profile_types :
sys.exit("profile type %s not recognized", profile_types)
columns = {}
filename = "%s/%s_%s_a%0.4f.dat" % (directory, filename_prefix,
profile_types[0], aexp)
is_enabled_clump_exclusion = check_for_clump_exclusion(filename)
columns["radius"] = ["r_in","r_mid","r_out","volume_bin", "volume"]
# gas
# gas_temperature(volume-weighted, mass-weighted) [K]
# gas_pressure(volume-weighted, mass-weighted) [ergs cm ^ {-3}]
# gas_entropy(volume-weighted, mass-weighted) [keV cm ^ 2]
columns["gas"] = ["T_vw", "T_mw", "P_vw", "P_mw", "S_vw", "S_mw"]
#mass
# dark-M_(bin, cum) gas-M_(bin, cum) gas_cold-M_(bin, cum)
# star-M_(bin, cum) star_new-M_(bin, cum) [M_sun/h]
# dark-V_circ gas-V_circ star-V_circ total-V_circ [km/s physical]
columns["mass"] = ["M_dark_bin", "M_dark", "M_gas_bin", "M_gas",
"M_gas_cold_bin", "M_gas_cold", "M_star_bin",
"M_star", "M_star_new_bin", "M_star_new",
"vcirc_dark", "vcirc_gas", "vcirc_star", "vcirc_total"]
if is_enabled_clump_exclusion :
columns["mass"] += ["M_gas_bulk_bin", "M_gas_cold_bulk_bin",
"volume_bulk", "density_threshold"]
# velocity
# dark_V_ave dark_V_rad_(ave, std) dark_V_tan(ave, std)
# gas-V_ave gas-V_rad_(ave, std) gas-V_tan_(ave, std)
# gas_cold-V_ave gas_cold-V_rad_(ave, std) gas_cold-V_tan_(ave, std)
# star-V_ave star-V_rad_(ave, std) star-V_tan_(ave, std) total-V_ave [km/s physical]
columns["velocity"] = [ "vel_dark_avg", "vel_dark_rad_avg", "vel_dark_rad_std",
"vel_dark_tan_avg", "vel_dark_tan_std",
"vel_gas_avg", "vel_gas_rad_avg", "vel_gas_rad_std",
"vel_gas_tan_avg", "vel_gas_tan_std",
"vel_gas_cold_avg", "vel_gas_cold_rad_avg",
"vel_gas_cold_rad_std",
"vel_gas_cold_tan_avg", "vel_gas_cold_tan_std",
"vel_star_avg", "vel_star_rad_avg", "vel_star_rad_std",
"vel_star_tan_avg",
"vel_star_tan_std", "vel_total_avg"]
radii_file = "%s/%s_radius_bin_a%0.4f.dat" % (directory, filename_prefix, aexp)
radii, nrows = read_radii_profile_from_ascii(radii_file, columns["radius"])
profiles = None
logging.debug(halo_list)
for profile_type in profile_types :
filename = "%s/%s_%s_a%0.4f.dat" % (directory, filename_prefix,
profile_type, aexp)
logging.debug(filename)
profile = read_profile_from_ascii(filename, columns[profile_type],
radii, halo_list = halo_list,
nrows=nrows)
if profiles is None :
profiles = profile
else :
profiles = pd.merge(profiles, profile)
profiles["aexp"] = aexp
# return profiles.set_index(["id", "bin"])
return profiles
def read_profile_from_ascii(filename, columns, radii,
halo_list = None, nrows = None) :
profiles = None
i = 0
with open(filename, 'r') as f:
#read in header
while 1 :
line = f.readline()
if line.startswith("##") :
skiprows = i+2
break
if i == 0 and not nrows:
#expects nrows to be last thing on first line
nrows = int(line.split()[-1])
i += 1
while 1:
line = f.readline()
if not line : break
if line.startswith("#") :
halo_id = int(line.split()[1])
if halo_list is None or halo_id in halo_list :
this_profile = pd.read_table(filename, sep=r"\s+", nrows=nrows,
skiprows=skiprows, names=columns)
this_profile = this_profile.join(radii)
this_profile["id"] = halo_id
if profiles is None :
profiles = this_profile
else :
profiles = pd.concat([profiles,this_profile], ignore_index=True)
skiprows += nrows+1
return profiles
def read_radii_profile_from_ascii(filename, columns) :
i = 0
with open(filename, 'r') as f:
while 1 :
line = f.readline()
if not line.startswith("#") :
skiprows = i
break
if i == 0:
#expects nrows to be last thing on first line
nrows = int(line.split()[-1])
i += 1
profile = pd.read_table(filename, sep=r"\s+", skiprows=skiprows, names=columns)
profile["bin"] = np.arange(0,nrows)
return profile, nrows
def read_halo_particles( filename, min_np = 1000, clusters = None ):
"""
loads particles (ids and binding energy) for halos
from halo_particles_a*.dat files
"""
if ( clusters != None ):
cluster_hash = {}
for c in clusters:
cluster_hash[c] = 1
with open( filename, "r" ) as input :
size=fromfile( input, dtype='i', count=1 )
(aexpn,) = fromfile( input, dtype='f', count=1 )
size=fromfile( input, dtype='i', count=1 )
size=fromfile( input, dtype='i', count=1 )
if size > 4+4:
print "assuming long particle ids"
particleid_t = 'int64'
else:
particleid_t = 'i'
[nhd] = fromfile( input, dtype='i', count=1 )
[np] = fromfile( input, dtype=particleid_t, count=1 )
size=fromfile( input, dtype='i', count=1 )
print 'ae,num_halos,np', aexpn, nhd, np
cluster_data = {}
for halo in xrange(nhd):
size = fromfile( input, dtype='i', count=1 )
[ih] = fromfile( input, dtype='i', count=1 )
[inp] = fromfile( input, dtype='i', count=1 )
if ih > nhd:
print halo, ih, nhd, 'quitting'
sys.exit()
if halo % 1024 == 0:
print halo, "..."
if ( clusters is None or cluster_hash.has_key(ih) ) and inp > min_np:
pids = fromfile( input, dtype=particleid_t, count=inp )
bind = fromfile( input, dtype='f', count=inp )
cluster_data[ih] = zip(pids,bind)
#throw away radius
radius, = fromfile( input, dtype='i',count=1)
else:
if particleid_t == 'i':
input.seek( 4*(2*inp+1), 1 )
else:
input.seek( 8*inp+4*inp+4*1, 1 )
return cluster_data
def add_remove_h_inverse(halo_data, h_inverse, file_type) :
if h_inverse[file_type] == h_inverse["into_db"] :
logging.debug("h-1 status in ascii file matches database"
"request, no action taken")
return
elif h_inverse["into_db"] == True and h_inverse[file_type] == False :
h_factor = h_inverse["h"]
elif h_inverse["into_db"] == False and h_inverse[file_type] == True :
h_factor = 1.0/h_inverse["h"]
else :
logging.error("Unregonized combo of h_inverse_into_db "
"and h_inverse_in_ascii set. Set these "
"settings to booleans.")
for column in halo_data.columns.tolist() :
if "M" in column :
halo_data[column] *= h_factor
elif column.startswith("r") :
halo_data[column] *= h_factor
elif "volume" in column :
halo_data[column] *= (h_factor**3)
def check_for_clump_exclusion(filename) :
'''
Checks for clump exclusion keywords in file header. Pass
mass profile file for best results.
'''
with open(filename, 'r') as f:
#read in header
while 1 :
line = f.readline()
if "clump density threshold" in line :
cols = line.split()
return cols[-1]
if line.startswith("##") :
return False
def comoving_physical_swap(halo_data, aexp, comoving_to_physical = True) :
if comoving_to_physical :
conversion = aexp
else :
conversion = 1.0/aexp
for column in halo_data.columns.tolist() :
if "M" in column :
halo_data[column] *= conversion
elif column.startswith("r") :
halo_data[column] *= conversion
|
|
from taskplus.apps.rest import models
from taskplus.apps.rest.database import db_session
from taskplus.core.domain import User, UserRole
from taskplus.core.shared.repository import Repository
from taskplus.core.shared.exceptions import NoResultFound, NotUnique, DbError, CannotBeDeleted
from taskplus.core.authorization import Permission, Condition
from sqlalchemy import exc
class UsersRepository(Repository):
def __init__(self):
self.user_model = models.User
self.session = db_session
def one(self, id):
result = self.user_model.query.get(id)
if not result:
raise NoResultFound(id, User.__name__)
return self._to_domain_model(result)
def list(self, filters=None):
if not filters:
result = self.user_model.query.all()
else:
filters = self._parse_filters(filters)
filters_expression = []
for filter in filters:
key = getattr(self.user_model, filter.key)
filters_expression.append(
getattr(key, filter.operator)(filter.value))
result = self.user_model.query.filter(*filters_expression).all()
users = [self._to_domain_model(user) for user in result]
return users
def save(self, user, password):
roles = models.UserRole.query.filter(
models.UserRole.id.in_([role.id for role in user.roles])).all()
if not roles:
raise NoResultFound([role.id for role in user.roles], 'UserRole')
try:
new_user = self.user_model(name=user.name, roles=roles, password=password)
self.session.add(new_user)
self.session.commit()
except exc.IntegrityError as e:
self.session.rollback()
if 'unique' in str(e).lower():
raise NotUnique('User already exist')
raise
except exc.SQLAlchemyError:
self.session.rollback()
raise DbError()
return self._to_domain_model(new_user)
def update(self, user):
user_to_update = self.user_model.query.get(user.id)
if not user_to_update:
raise NoResultFound(user.id, User.__name__)
roles = models.UserRole.query.filter(
models.UserRole.id.in_([role.id for role in user.roles])).all()
if not roles:
raise NoResultFound([role.id for role in user.roles], 'UserRole')
try:
user_to_update.name = user.name
user_to_update.roles = roles
self.session.add(user_to_update)
self.session.commit()
except exc.IntegrityError as e:
self.session.rollback()
if 'unique' in str(e).lower():
raise NotUnique('User already exist')
raise
except exc.SQLAlchemyError:
self.session.rollback()
raise DbError()
return self._to_domain_model(user_to_update)
def delete(self, id):
user = self.user_model.query.get(id)
if not user:
raise NoResultFound(id, User.__name__)
rv = self._to_domain_model(user)
try:
self.session.delete(user)
self.session.commit()
except exc.IntegrityError as e:
self.session.rollback()
if 'foreign' in str(e).lower():
raise CannotBeDeleted('Cannot delete user')
raise
except exc.SQLAlchemyError:
self.session.rollback()
raise DbError()
return rv
def _to_domain_model(self, data):
permissions = []
for role in data.roles:
if role.name == 'creator':
permissions.append(Permission('ListTasksAction'))
permissions.append(Permission('ListTaskStatusesAction'))
permissions.append(Permission('ListUserRolesAction'))
permissions.append(Permission('AddTaskAction'))
permissions.append(Permission('CancelTaskAction', conditions=[
Condition('task.doer', 'eq', 'None')
]))
permissions.append(Permission('GetTaskDetailsAction'))
permissions.append(Permission('GetRoleDetailsAction'))
permissions.append(Permission('GetTaskStatusDetailsAction'))
permissions.append(Permission('GetUserDetailsAction', conditions=[
Condition('request.user_id', 'eq', 'user.id')
]))
permissions.append(Permission('GetNotCompletedTasksAction'))
permissions.append(
Permission('UpdateTaskAction', conditions=[
Condition('task.creator.id', 'eq', 'user.id')
]))
if role.name == 'doer':
permissions.append(
Permission('CancelTaskAction', conditions=[
Condition('task.doer.id', 'eq', 'user.id')
]))
permissions.append(
Permission('CompleteTaskAction', conditions=[
Condition('task.doer.id', 'eq', 'user.id')
]))
permissions.append(
Permission('AssignUserToTaskAction', conditions=[
Condition('request.user_id', 'eq', 'user.id'),
Condition('task.doer', 'eq', 'None')
]))
permissions.append(
Permission('UnassignUserFromTaskAction', conditions=[
Condition('task.doer.id', 'eq', 'user.id')
]))
permissions.append(Permission('GetUserDetailsAction', conditions=[
Condition('request.user_id', 'eq', 'user.id')
]))
permissions.append(Permission('GetTaskDetailsAction'))
permissions.append(Permission('GetRoleDetailsAction'))
permissions.append(Permission('GetTaskStatusDetailsAction'))
permissions.append(Permission('ListTasksAction'))
permissions.append(Permission('ListTaskStatusesAction'))
permissions.append(Permission('ListUserRolesAction'))
permissions.append(Permission('GetNotCompletedTasksAction'))
if role.name == 'admin':
permissions.append(Permission('AddUserRoleAction'))
permissions.append(Permission('DeleteUserRoleAction'))
permissions.append(Permission('ListUserRolesAction'))
permissions.append(Permission('UpdateUserRoleAction'))
permissions.append(Permission('AddUserAction'))
permissions.append(Permission('ListUsersAction'))
permissions.append(Permission('UpdateUserAction'))
permissions.append(Permission('DeleteUserAction'))
permissions.append(Permission('GetTaskDetailsAction'))
permissions.append(Permission('GetRoleDetailsAction'))
permissions.append(Permission('GetTaskStatusDetailsAction'))
permissions.append(Permission('GetUserDetailsAction'))
permissions.append(Permission('ListTasksAction'))
permissions.append(Permission('ListTaskStatusesAction'))
permissions.append(Permission('AddTaskStatusAction'))
permissions.append(Permission('DeleteTaskStatusAction'))
permissions.append(Permission('UpdateTaskStatusAction'))
permissions.append(Permission('GetNotCompletedTasksAction'))
permissions.append(Permission('UpdateTaskAction'))
return User(
id=data.id,
name=data.name,
roles=[UserRole(id=role.id, name=role.name)
for role in data.roles],
permissions=permissions
)
def check_password(self, user, password):
result = self.user_model.query.get(user.id)
if not result:
raise NoResultFound(id, User.__name__)
return result.check_password(password)
|
|
import os.path as op
from functools import wraps
from flask import Blueprint, current_app, render_template, abort, g, url_for
from flask_admin import babel
from flask_admin._compat import with_metaclass
from flask_admin import helpers as h
# For compatibility reasons import MenuLink
from flask_admin.menu import MenuCategory, MenuView, MenuLink
def expose(url='/', methods=('GET',)):
"""
Use this decorator to expose views in your view classes.
:param url:
Relative URL for the view
:param methods:
Allowed HTTP methods. By default only GET is allowed.
"""
def wrap(f):
if not hasattr(f, '_urls'):
f._urls = []
f._urls.append((url, methods))
return f
return wrap
def expose_plugview(url='/'):
"""
Decorator to expose Flask's pluggable view classes
(``flask.views.View`` or ``flask.views.MethodView``).
:param url:
Relative URL for the view
.. versionadded:: 1.0.4
"""
def wrap(v):
handler = expose(url, v.methods)
if hasattr(v, 'as_view'):
return handler(v.as_view(v.__name__))
else:
return handler(v)
return wrap
# Base views
def _wrap_view(f):
# Avoid wrapping view method twice
if hasattr(f, '_wrapped'):
return f
@wraps(f)
def inner(self, *args, **kwargs):
# Store current admin view
h.set_current_view(self)
# Check if administrative piece is accessible
abort = self._handle_view(f.__name__, **kwargs)
if abort is not None:
return abort
return self._run_view(f, *args, **kwargs)
inner._wrapped = True
return inner
class AdminViewMeta(type):
"""
View metaclass.
Does some precalculations (like getting list of view methods from the class) to avoid
calculating them for each view class instance.
"""
def __init__(cls, classname, bases, fields):
type.__init__(cls, classname, bases, fields)
# Gather exposed views
cls._urls = []
cls._default_view = None
for p in dir(cls):
attr = getattr(cls, p)
if hasattr(attr, '_urls'):
# Collect methods
for url, methods in attr._urls:
cls._urls.append((url, p, methods))
if url == '/':
cls._default_view = p
# Wrap views
setattr(cls, p, _wrap_view(attr))
class BaseViewClass(object):
pass
class BaseView(with_metaclass(AdminViewMeta, BaseViewClass)):
"""
Base administrative view.
Derive from this class to implement your administrative interface piece. For example::
from flask_admin import BaseView, expose
class MyView(BaseView):
@expose('/')
def index(self):
return 'Hello World!'
Icons can be added to the menu by using `menu_icon_type` and `menu_icon_value`. For example::
admin.add_view(MyView(name='My View', menu_icon_type='glyph', menu_icon_value='glyphicon-home'))
"""
@property
def _template_args(self):
"""
Extra template arguments.
If you need to pass some extra parameters to the template,
you can override particular view function, contribute
arguments you want to pass to the template and call parent view.
These arguments are local for this request and will be discarded
in the next request.
Any value passed through ``_template_args`` will override whatever
parent view function passed to the template.
For example::
class MyAdmin(ModelView):
@expose('/')
def index(self):
self._template_args['name'] = 'foobar'
self._template_args['code'] = '12345'
super(MyAdmin, self).index()
"""
args = getattr(g, '_admin_template_args', None)
if args is None:
args = g._admin_template_args = dict()
return args
def __init__(self, name=None, category=None, endpoint=None, url=None,
static_folder=None, static_url_path=None,
menu_class_name=None, menu_icon_type=None, menu_icon_value=None):
"""
Constructor.
:param name:
Name of this view. If not provided, will default to the class name.
:param category:
View category. If not provided, this view will be shown as a top-level menu item. Otherwise, it will
be in a submenu.
:param endpoint:
Base endpoint name for the view. For example, if there's a view method called "index" and
endpoint is set to "myadmin", you can use `url_for('myadmin.index')` to get the URL to the
view method. Defaults to the class name in lower case.
:param url:
Base URL. If provided, affects how URLs are generated. For example, if the url parameter
is "test", the resulting URL will look like "/admin/test/". If not provided, will
use endpoint as a base url. However, if URL starts with '/', absolute path is assumed
and '/admin/' prefix won't be applied.
:param static_url_path:
Static URL Path. If provided, this specifies the path to the static url directory.
:param menu_class_name:
Optional class name for the menu item.
:param menu_icon_type:
Optional icon. Possible icon types:
- `flask_admin.consts.ICON_TYPE_GLYPH` - Bootstrap glyph icon
- `flask_admin.consts.ICON_TYPE_FONT_AWESOME` - Font Awesome icon
- `flask_admin.consts.ICON_TYPE_IMAGE` - Image relative to Flask static directory
- `flask_admin.consts.ICON_TYPE_IMAGE_URL` - Image with full URL
:param menu_icon_value:
Icon glyph name or URL, depending on `menu_icon_type` setting
"""
self.name = name
self.category = category
self.endpoint = self._get_endpoint(endpoint)
self.url = url
self.static_folder = static_folder
self.static_url_path = static_url_path
self.menu = None
self.menu_class_name = menu_class_name
self.menu_icon_type = menu_icon_type
self.menu_icon_value = menu_icon_value
# Initialized from create_blueprint
self.admin = None
self.blueprint = None
# Default view
if self._default_view is None:
raise Exception(u'Attempted to instantiate admin view %s without default view' % self.__class__.__name__)
def _get_endpoint(self, endpoint):
"""
Generate Flask endpoint name. By default converts class name to lower case if endpoint is
not explicitly provided.
"""
if endpoint:
return endpoint
return self.__class__.__name__.lower()
def create_blueprint(self, admin):
"""
Create Flask blueprint.
"""
# Store admin instance
self.admin = admin
# If the static_url_path is not provided, use the admin's
if not self.static_url_path:
self.static_url_path = admin.static_url_path
# If url is not provided, generate it from endpoint name
if self.url is None:
if self.admin.url != '/':
self.url = '%s/%s' % (self.admin.url, self.endpoint)
else:
if self == admin.index_view:
self.url = '/'
else:
self.url = '/%s' % self.endpoint
else:
if not self.url.startswith('/'):
self.url = '%s/%s' % (self.admin.url, self.url)
# If we're working from the root of the site, set prefix to None
if self.url == '/':
self.url = None
# prevent admin static files from conflicting with flask static files
if not self.static_url_path:
self.static_folder = 'static'
self.static_url_path = '/static/admin'
# If name is not povided, use capitalized endpoint name
if self.name is None:
self.name = self._prettify_class_name(self.__class__.__name__)
# Create blueprint and register rules
self.blueprint = Blueprint(self.endpoint, __name__,
url_prefix=self.url,
subdomain=self.admin.subdomain,
template_folder=op.join('templates', self.admin.template_mode),
static_folder=self.static_folder,
static_url_path=self.static_url_path)
for url, name, methods in self._urls:
self.blueprint.add_url_rule(url,
name,
getattr(self, name),
methods=methods)
return self.blueprint
def render(self, template, **kwargs):
"""
Render template
:param template:
Template path to render
:param kwargs:
Template arguments
"""
# Store self as admin_view
kwargs['admin_view'] = self
kwargs['admin_base_template'] = self.admin.base_template
# Provide i18n support even if flask-babel is not installed
# or enabled.
kwargs['_gettext'] = babel.gettext
kwargs['_ngettext'] = babel.ngettext
kwargs['h'] = h
# Expose get_url helper
kwargs['get_url'] = self.get_url
# Expose config info
kwargs['config'] = current_app.config
# Contribute extra arguments
kwargs.update(self._template_args)
return render_template(template, **kwargs)
def _prettify_class_name(self, name):
"""
Split words in PascalCase string into separate words.
:param name:
String to prettify
"""
return h.prettify_class_name(name)
def is_visible(self):
"""
Override this method if you want dynamically hide or show administrative views
from Flask-Admin menu structure
By default, item is visible in menu.
Please note that item should be both visible and accessible to be displayed in menu.
"""
return True
def is_accessible(self):
"""
Override this method to add permission checks.
Flask-Admin does not make any assumptions about the authentication system used in your application, so it is
up to you to implement it.
By default, it will allow access for everyone.
"""
return True
def _handle_view(self, name, **kwargs):
"""
This method will be executed before calling any view method.
It will execute the ``inaccessible_callback`` if the view is not
accessible.
:param name:
View function name
:param kwargs:
View function arguments
"""
if not self.is_accessible():
return self.inaccessible_callback(name, **kwargs)
def _run_view(self, fn, *args, **kwargs):
"""
This method will run actual view function.
While it is similar to _handle_view, can be used to change
arguments that are passed to the view.
:param fn:
View function
:param kwargs:
Arguments
"""
return fn(self, *args, **kwargs)
def inaccessible_callback(self, name, **kwargs):
"""
Handle the response to inaccessible views.
By default, it throw HTTP 403 error. Override this method to
customize the behaviour.
"""
return abort(403)
def get_url(self, endpoint, **kwargs):
"""
Generate URL for the endpoint. If you want to customize URL generation
logic (persist some query string argument, for example), this is
right place to do it.
:param endpoint:
Flask endpoint name
:param kwargs:
Arguments for `url_for`
"""
return url_for(endpoint, **kwargs)
@property
def _debug(self):
if not self.admin or not self.admin.app:
return False
return self.admin.app.debug
class AdminIndexView(BaseView):
"""
Default administrative interface index page when visiting the ``/admin/`` URL.
It can be overridden by passing your own view class to the ``Admin`` constructor::
class MyHomeView(AdminIndexView):
@expose('/')
def index(self):
arg1 = 'Hello'
return self.render('admin/myhome.html', arg1=arg1)
admin = Admin(index_view=MyHomeView())
Also, you can change the root url from /admin to / with the following::
admin = Admin(
app,
index_view=AdminIndexView(
name='Home',
template='admin/myhome.html',
url='/'
)
)
Default values for the index page are:
* If a name is not provided, 'Home' will be used.
* If an endpoint is not provided, will default to ``admin``
* Default URL route is ``/admin``.
* Automatically associates with static folder.
* Default template is ``admin/index.html``
"""
def __init__(self, name=None, category=None,
endpoint=None, url=None,
template='admin/index.html',
menu_class_name=None,
menu_icon_type=None,
menu_icon_value=None):
super(AdminIndexView, self).__init__(name or babel.lazy_gettext('Home'),
category,
endpoint or 'admin',
url or '/admin',
'static',
menu_class_name=menu_class_name,
menu_icon_type=menu_icon_type,
menu_icon_value=menu_icon_value)
self._template = template
@expose()
def index(self):
return self.render(self._template)
class Admin(object):
"""
Collection of the admin views. Also manages menu structure.
"""
def __init__(self, app=None, name=None,
url=None, subdomain=None,
index_view=None,
translations_path=None,
endpoint=None,
static_url_path=None,
base_template=None,
template_mode=None):
"""
Constructor.
:param app:
Flask application object
:param name:
Application name. Will be displayed in the main menu and as a page title. Defaults to "Admin"
:param url:
Base URL
:param subdomain:
Subdomain to use
:param index_view:
Home page view to use. Defaults to `AdminIndexView`.
:param translations_path:
Location of the translation message catalogs. By default will use the translations
shipped with Flask-Admin.
:param endpoint:
Base endpoint name for index view. If you use multiple instances of the `Admin` class with
a single Flask application, you have to set a unique endpoint name for each instance.
:param static_url_path:
Static URL Path. If provided, this specifies the default path to the static url directory for
all its views. Can be overridden in view configuration.
:param base_template:
Override base HTML template for all static views. Defaults to `admin/base.html`.
:param template_mode:
Base template path. Defaults to `bootstrap2`. If you want to use
Bootstrap 3 integration, change it to `bootstrap3`.
"""
self.app = app
self.translations_path = translations_path
self._views = []
self._menu = []
self._menu_categories = dict()
self._menu_links = []
if name is None:
name = 'Admin'
self.name = name
self.index_view = index_view or AdminIndexView(endpoint=endpoint, url=url)
self.endpoint = endpoint or self.index_view.endpoint
self.url = url or self.index_view.url
self.static_url_path = static_url_path
self.subdomain = subdomain
self.base_template = base_template or 'admin/base.html'
self.template_mode = template_mode or 'bootstrap2'
# Add predefined index view
self.add_view(self.index_view)
# Register with application
if app is not None:
self._init_extension()
def add_view(self, view):
"""
Add a view to the collection.
:param view:
View to add.
"""
# Add to views
self._views.append(view)
# If app was provided in constructor, register view with Flask app
if self.app is not None:
self.app.register_blueprint(view.create_blueprint(self))
self._add_view_to_menu(view)
def add_link(self, link):
"""
Add link to menu links collection.
:param link:
Link to add.
"""
if link.category:
self._add_menu_item(link, link.category)
else:
self._menu_links.append(link)
def _add_menu_item(self, menu_item, target_category):
if target_category:
category = self._menu_categories.get(target_category)
if category is None:
category = MenuCategory(target_category)
self._menu_categories[target_category] = category
self._menu.append(category)
category.add_child(menu_item)
else:
self._menu.append(menu_item)
def _add_view_to_menu(self, view):
"""
Add a view to the menu tree
:param view:
View to add
"""
self._add_menu_item(MenuView(view.name, view), view.category)
def get_category_menu_item(self, name):
return self._menu_categories.get(name)
def init_app(self, app):
"""
Register all views with the Flask application.
:param app:
Flask application instance
"""
self.app = app
self._init_extension()
# Register views
for view in self._views:
app.register_blueprint(view.create_blueprint(self))
def _init_extension(self):
if not hasattr(self.app, 'extensions'):
self.app.extensions = dict()
admins = self.app.extensions.get('admin', [])
for p in admins:
if p.endpoint == self.endpoint:
raise Exception(u'Cannot have two Admin() instances with same'
u' endpoint name.')
if p.url == self.url and p.subdomain == self.subdomain:
raise Exception(u'Cannot assign two Admin() instances with same'
u' URL and subdomain to the same application.')
admins.append(self)
self.app.extensions['admin'] = admins
def menu(self):
"""
Return the menu hierarchy.
"""
return self._menu
def menu_links(self):
"""
Return menu links.
"""
return self._menu_links
|
|
import difflib
import json
import os
import re
import sys
from copy import copy
from functools import wraps
from urlparse import urlsplit, urlunsplit
from xml.dom.minidom import parseString, Node
import select
import socket
import threading
import errno
from django.conf import settings
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.core import mail
from django.core.exceptions import ValidationError, ImproperlyConfigured
from django.core.handlers.wsgi import WSGIHandler
from django.core.management import call_command
from django.core.signals import request_started
from django.core.servers.basehttp import (WSGIRequestHandler, WSGIServer,
WSGIServerException)
from django.core.urlresolvers import clear_url_caches
from django.core.validators import EMPTY_VALUES
from django.db import (transaction, connection, connections, DEFAULT_DB_ALIAS,
reset_queries)
from django.forms.fields import CharField
from django.http import QueryDict
from django.test import _doctest as doctest
from django.test.client import Client
from django.test.html import HTMLParseError, parse_html
from django.test.signals import template_rendered
from django.test.utils import (get_warnings_state, restore_warnings_state,
override_settings)
from django.test.utils import ContextList
from django.utils import unittest as ut2
from django.utils.encoding import smart_str, force_unicode
from django.utils.unittest.util import safe_repr
from django.views.static import serve
__all__ = ('DocTestRunner', 'OutputChecker', 'TestCase', 'TransactionTestCase',
'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature')
normalize_long_ints = lambda s: re.sub(r'(?<![\w])(\d+)L(?![\w])', '\\1', s)
normalize_decimals = lambda s: re.sub(r"Decimal\('(\d+(\.\d*)?)'\)",
lambda m: "Decimal(\"%s\")" % m.groups()[0], s)
def to_list(value):
"""
Puts value into a list if it's not already one.
Returns an empty list if value is None.
"""
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value
real_commit = transaction.commit
real_rollback = transaction.rollback
real_enter_transaction_management = transaction.enter_transaction_management
real_leave_transaction_management = transaction.leave_transaction_management
real_managed = transaction.managed
def nop(*args, **kwargs):
return
def disable_transaction_methods():
transaction.commit = nop
transaction.rollback = nop
transaction.enter_transaction_management = nop
transaction.leave_transaction_management = nop
transaction.managed = nop
def restore_transaction_methods():
transaction.commit = real_commit
transaction.rollback = real_rollback
transaction.enter_transaction_management = real_enter_transaction_management
transaction.leave_transaction_management = real_leave_transaction_management
transaction.managed = real_managed
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError as e:
standardMsg = u'%s\n%s' % (msg, e.msg)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class OutputChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
"""
The entry method for doctest output checking. Defers to a sequence of
child checkers
"""
checks = (self.check_output_default,
self.check_output_numeric,
self.check_output_xml,
self.check_output_json)
for check in checks:
if check(want, got, optionflags):
return True
return False
def check_output_default(self, want, got, optionflags):
"""
The default comparator provided by doctest - not perfect, but good for
most purposes
"""
return doctest.OutputChecker.check_output(self, want, got, optionflags)
def check_output_numeric(self, want, got, optionflags):
"""Doctest does an exact string comparison of output, which means that
some numerically equivalent values aren't equal. This check normalizes
* long integers (22L) so that they equal normal integers. (22)
* Decimals so that they are comparable, regardless of the change
made to __repr__ in Python 2.6.
"""
return doctest.OutputChecker.check_output(self,
normalize_decimals(normalize_long_ints(want)),
normalize_decimals(normalize_long_ints(got)),
optionflags)
def check_output_xml(self, want, got, optionsflags):
"""Tries to do a 'xml-comparision' of want and got. Plain string
comparision doesn't always work because, for example, attribute
ordering should not be important.
Based on http://codespeak.net/svn/lxml/trunk/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join([c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE])
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
for want, got in zip(want_children, got_children):
if not check_element(want, got):
return False
return True
want, got = self._strip_quotes(want, got)
want = want.replace('\\n','\n')
got = got.replace('\\n','\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
try:
want_root = parseString(want).firstChild
got_root = parseString(got).firstChild
except Exception:
return False
return check_element(want_root, got_root)
def check_output_json(self, want, got, optionsflags):
"""
Tries to compare want and got as if they were JSON-encoded data
"""
want, got = self._strip_quotes(want, got)
try:
want_json = json.loads(want)
got_json = json.loads(got)
except Exception:
return False
return want_json == got_json
def _strip_quotes(self, want, got):
"""
Strip quotes of doctests output values:
>>> o = OutputChecker()
>>> o._strip_quotes("'foo'")
"foo"
>>> o._strip_quotes('"foo"')
"foo"
>>> o._strip_quotes("u'foo'")
"foo"
>>> o._strip_quotes('u"foo"')
"foo"
"""
def is_quoted_string(s):
s = s.strip()
return (len(s) >= 2
and s[0] == s[-1]
and s[0] in ('"', "'"))
def is_quoted_unicode(s):
s = s.strip()
return (len(s) >= 3
and s[0] == 'u'
and s[1] == s[-1]
and s[1] in ('"', "'"))
if is_quoted_string(want) and is_quoted_string(got):
want = want.strip()[1:-1]
got = got.strip()[1:-1]
elif is_quoted_unicode(want) and is_quoted_unicode(got):
want = want.strip()[2:-1]
got = got.strip()[2:-1]
return want, got
class DocTestRunner(doctest.DocTestRunner):
def __init__(self, *args, **kwargs):
doctest.DocTestRunner.__init__(self, *args, **kwargs)
self.optionflags = doctest.ELLIPSIS
def report_unexpected_exception(self, out, test, example, exc_info):
doctest.DocTestRunner.report_unexpected_exception(self, out, test,
example, exc_info)
# Rollback, in case of database errors. Otherwise they'd have
# side effects on other tests.
for conn in connections:
transaction.rollback_unless_managed(using=conn)
class _AssertNumQueriesContext(object):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
self.connection = connection
def __enter__(self):
self.old_debug_cursor = self.connection.use_debug_cursor
self.connection.use_debug_cursor = True
self.starting_queries = len(self.connection.queries)
request_started.disconnect(reset_queries)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.connection.use_debug_cursor = self.old_debug_cursor
request_started.connect(reset_queries)
if exc_type is not None:
return
final_queries = len(self.connection.queries)
executed = final_queries - self.starting_queries
self.test_case.assertEqual(
executed, self.num, "%d queries executed, %d expected" % (
executed, self.num
)
)
class _AssertTemplateUsedContext(object):
def __init__(self, test_case, template_name):
self.test_case = test_case
self.template_name = template_name
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
return self.template_name in self.rendered_template_names
def message(self):
return u'%s was not rendered.' % self.template_name
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
if not self.test():
message = self.message()
if len(self.rendered_templates) == 0:
message += u' No template was rendered.'
else:
message += u' Following templates were rendered: %s' % (
', '.join(self.rendered_template_names))
self.test_case.fail(message)
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
return self.template_name not in self.rendered_template_names
def message(self):
return u'%s was rendered.' % self.template_name
class SimpleTestCase(ut2.TestCase):
def save_warnings_state(self):
"""
Saves the state of the warnings module
"""
self._warnings_state = get_warnings_state()
def restore_warnings_state(self):
"""
Restores the state of the warnings module to the state
saved by save_warnings_state()
"""
restore_warnings_state(self._warnings_state)
def settings(self, **kwargs):
"""
A context manager that temporarily sets a setting and reverts
back to the original value when exiting the context.
"""
return override_settings(**kwargs)
def assertRaisesMessage(self, expected_exception, expected_message,
callable_obj=None, *args, **kwargs):
"""
Asserts that the message in a raised exception matches the passed
value.
Args:
expected_exception: Exception class expected to be raised.
expected_message: expected error message string value.
callable_obj: Function to be called.
args: Extra args.
kwargs: Extra kwargs.
"""
return self.assertRaisesRegexp(expected_exception,
re.escape(expected_message), callable_obj, *args, **kwargs)
def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None,
field_kwargs=None, empty_value=u''):
"""
Asserts that a form field behaves correctly with various inputs.
Args:
fieldclass: the class of the field to be tested.
valid: a dictionary mapping valid inputs to their expected
cleaned values.
invalid: a dictionary mapping invalid inputs to one or more
raised error messages.
field_args: the args passed to instantiate the field
field_kwargs: the kwargs passed to instantiate the field
empty_value: the expected clean output for inputs in EMPTY_VALUES
"""
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args,
**dict(field_kwargs, required=False))
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [force_unicode(required.error_messages['required'])]
for e in EMPTY_VALUES:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages,
error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({'min_length':2, 'max_length':20})
self.assertTrue(isinstance(fieldclass(*field_args, **field_kwargs),
fieldclass))
def assertHTMLEqual(self, html1, html2, msg=None):
"""
Asserts that two HTML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid HTML.
"""
dom1 = assert_and_parse_html(self, html1, msg,
u'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg,
u'Second argument is not valid HTML:')
if dom1 != dom2:
standardMsg = '%s != %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
unicode(dom1).splitlines(),
unicode(dom2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertHTMLNotEqual(self, html1, html2, msg=None):
"""Asserts that two HTML snippets are not semantically equivalent."""
dom1 = assert_and_parse_html(self, html1, msg,
u'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg,
u'Second argument is not valid HTML:')
if dom1 == dom2:
standardMsg = '%s == %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
self.fail(self._formatMessage(msg, standardMsg))
class TransactionTestCase(SimpleTestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* Flushing the database.
* If the Test Case class has a 'fixtures' member, installing the
named fixtures.
* If the Test Case class has a 'urls' member, replace the
ROOT_URLCONF with it.
* Clearing the mail test outbox.
"""
self._fixture_setup()
self._urlconf_setup()
mail.outbox = []
def _fixture_setup(self):
# If the test case has a multi_db=True flag, flush all databases.
# Otherwise, just flush default.
if getattr(self, 'multi_db', False):
databases = connections
else:
databases = [DEFAULT_DB_ALIAS]
for db in databases:
call_command('flush', verbosity=0, interactive=False, database=db,
skip_validation=True)
if hasattr(self, 'fixtures'):
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures,
**{'verbosity': 0, 'database': db, 'skip_validation': True})
def _urlconf_setup(self):
if hasattr(self, 'urls'):
self._old_root_urlconf = settings.ROOT_URLCONF
settings.ROOT_URLCONF = self.urls
clear_url_caches()
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
testMethod = getattr(self, self._testMethodName)
skipped = (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False))
if not skipped:
self.client = self.client_class()
try:
self._pre_setup()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
result.addError(self, sys.exc_info())
return
super(TransactionTestCase, self).__call__(result)
if not skipped:
try:
self._post_teardown()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
result.addError(self, sys.exc_info())
return
def _post_teardown(self):
""" Performs any post-test things. This includes:
* Putting back the original ROOT_URLCONF if it was changed.
* Force closing the connection, so that the next test gets
a clean cursor.
"""
self._fixture_teardown()
self._urlconf_teardown()
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does rollback, the effect
# of these statements is lost, which can effect the operation
# of tests (e.g., losing a timezone setting causing objects to
# be created with the wrong time).
# To make sure this doesn't happen, get a clean connection at the
# start of every test.
for conn in connections.all():
conn.close()
def _fixture_teardown(self):
pass
def _urlconf_teardown(self):
if hasattr(self, '_old_root_urlconf'):
settings.ROOT_URLCONF = self._old_root_urlconf
clear_url_caches()
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, host=None, msg_prefix=''):
"""Asserts that a response redirected to a specific URL, and that the
redirect URL can be loaded.
Note that assertRedirects won't work for external links since it uses
TestClient to do a request.
"""
if msg_prefix:
msg_prefix += ": "
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(len(response.redirect_chain) > 0,
msg_prefix + "Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
self.assertEqual(response.redirect_chain[0][1], status_code,
msg_prefix + "Initial response didn't redirect as expected:"
" Response code was %d (expected %d)" %
(response.redirect_chain[0][1], status_code))
url, status_code = response.redirect_chain[-1]
self.assertEqual(response.status_code, target_status_code,
msg_prefix + "Response didn't redirect as expected: Final"
" Response code was %d (expected %d)" %
(response.status_code, target_status_code))
else:
# Not a followed redirect
self.assertEqual(response.status_code, status_code,
msg_prefix + "Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
url = response['Location']
scheme, netloc, path, query, fragment = urlsplit(url)
redirect_response = response.client.get(path, QueryDict(query))
# Get the redirection page, using the same client that was used
# to obtain the original response.
self.assertEqual(redirect_response.status_code, target_status_code,
msg_prefix + "Couldn't retrieve redirection page '%s':"
" response code was %d (expected %d)" %
(path, redirect_response.status_code, target_status_code))
e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(
expected_url)
if not (e_scheme or e_netloc):
expected_url = urlunsplit(('http', host or 'testserver', e_path,
e_query, e_fragment))
self.assertEqual(url, expected_url,
msg_prefix + "Response redirected to '%s', expected '%s'" %
(url, expected_url))
def assertContains(self, response, text, count=None, status_code=200,
msg_prefix='', html=False):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if (hasattr(response, 'render') and callable(response.render)
and not response.is_rendered):
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code))
text = smart_str(text, response._charset)
content = response.content
if html:
content = assert_and_parse_html(self, content, None,
u"Response's content is not valid HTML:")
text = assert_and_parse_html(self, text, None,
u"Second argument is not valid HTML:")
real_count = content.count(text)
if count is not None:
self.assertEqual(real_count, count,
msg_prefix + "Found %d instances of '%s' in response"
" (expected %d)" % (real_count, text, count))
else:
self.assertTrue(real_count != 0,
msg_prefix + "Couldn't find '%s' in response" % text)
def assertNotContains(self, response, text, status_code=200,
msg_prefix='', html=False):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` doesn't occurs in the content of the response.
"""
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if (hasattr(response, 'render') and callable(response.render)
and not response.is_rendered):
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code))
text = smart_str(text, response._charset)
content = response.content
if html:
content = assert_and_parse_html(self, content, None,
u'Response\'s content is not valid HTML:')
text = assert_and_parse_html(self, text, None,
u'Second argument is not valid HTML:')
self.assertEqual(content.count(text), 0,
msg_prefix + "Response should not contain '%s'" % text)
def assertFormError(self, response, form, field, errors, msg_prefix=''):
"""
Asserts that a form used to render the response has a specific field
error.
"""
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + "Response did not use any contexts to "
"render the response")
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_form = False
for i,context in enumerate(contexts):
if form not in context:
continue
found_form = True
for err in errors:
if field:
if field in context[form].errors:
field_errors = context[form].errors[field]
self.assertTrue(err in field_errors,
msg_prefix + "The field '%s' on form '%s' in"
" context %d does not contain the error '%s'"
" (actual errors: %s)" %
(field, form, i, err, repr(field_errors)))
elif field in context[form].fields:
self.fail(msg_prefix + "The field '%s' on form '%s'"
" in context %d contains no errors" %
(field, form, i))
else:
self.fail(msg_prefix + "The form '%s' in context %d"
" does not contain the field '%s'" %
(form, i, field))
else:
non_field_errors = context[form].non_field_errors()
self.assertTrue(err in non_field_errors,
msg_prefix + "The form '%s' in context %d does not"
" contain the non-field error '%s'"
" (actual errors: %s)" %
(form, i, err, non_field_errors))
if not found_form:
self.fail(msg_prefix + "The form '%s' was not used to render the"
" response" % form)
def assertTemplateUsed(self, response=None, template_name=None, msg_prefix=''):
"""
Asserts that the template with the provided name was used in rendering
the response. Also usable as context manager.
"""
if response is None and template_name is None:
raise TypeError(u'response and/or template_name argument must be provided')
if msg_prefix:
msg_prefix += ": "
# Use assertTemplateUsed as context manager.
if not hasattr(response, 'templates') or (response is None and template_name):
if response:
template_name = response
response = None
context = _AssertTemplateUsedContext(self, template_name)
return context
template_names = [t.name for t in response.templates]
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s" %
(template_name, u', '.join(template_names)))
def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''):
"""
Asserts that the template with the provided name was NOT used in
rendering the response. Also usable as context manager.
"""
if response is None and template_name is None:
raise TypeError(u'response and/or template_name argument must be provided')
if msg_prefix:
msg_prefix += ": "
# Use assertTemplateUsed as context manager.
if not hasattr(response, 'templates') or (response is None and template_name):
if response:
template_name = response
response = None
context = _AssertTemplateNotUsedContext(self, template_name)
return context
template_names = [t.name for t in response.templates]
self.assertFalse(template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering"
" the response" % template_name)
def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True):
if not ordered:
return self.assertEqual(set(map(transform, qs)), set(values))
return self.assertEqual(map(transform, qs), values)
def assertNumQueries(self, num, func=None, *args, **kwargs):
using = kwargs.pop("using", DEFAULT_DB_ALIAS)
conn = connections[using]
context = _AssertNumQueriesContext(self, num, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
def connections_support_transactions():
"""
Returns True if all connections support transactions.
"""
return all(conn.features.supports_transactions
for conn in connections.all())
class TestCase(TransactionTestCase):
"""
Does basically the same as TransactionTestCase, but surrounds every test
with a transaction, monkey-patches the real transaction management routines
to do nothing, and rollsback the test transaction at the end of the test.
You have to use TransactionTestCase, if you need transaction management
inside a test.
"""
def _fixture_setup(self):
if not connections_support_transactions():
return super(TestCase, self)._fixture_setup()
# If the test case has a multi_db=True flag, setup all databases.
# Otherwise, just use default.
if getattr(self, 'multi_db', False):
databases = connections
else:
databases = [DEFAULT_DB_ALIAS]
for db in databases:
transaction.enter_transaction_management(using=db)
transaction.managed(True, using=db)
disable_transaction_methods()
from django.contrib.sites.models import Site
Site.objects.clear_cache()
for db in databases:
if hasattr(self, 'fixtures'):
call_command('loaddata', *self.fixtures,
**{
'verbosity': 0,
'commit': False,
'database': db,
'skip_validation': True,
})
def _fixture_teardown(self):
if not connections_support_transactions():
return super(TestCase, self)._fixture_teardown()
# If the test case has a multi_db=True flag, teardown all databases.
# Otherwise, just teardown default.
if getattr(self, 'multi_db', False):
databases = connections
else:
databases = [DEFAULT_DB_ALIAS]
restore_transaction_methods()
for db in databases:
transaction.rollback(using=db)
transaction.leave_transaction_management(using=db)
def _deferredSkip(condition, reason):
def decorator(test_func):
if not (isinstance(test_func, type) and
issubclass(test_func, TestCase)):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if condition():
raise ut2.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
test_item = test_func
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIfDBFeature(feature):
"""
Skip a test if a database has the named feature
"""
return _deferredSkip(lambda: getattr(connection.features, feature),
"Database has feature %s" % feature)
def skipUnlessDBFeature(feature):
"""
Skip a test unless a database has the named feature
"""
return _deferredSkip(lambda: not getattr(connection.features, feature),
"Database doesn't support feature %s" % feature)
class QuietWSGIRequestHandler(WSGIRequestHandler):
"""
Just a regular WSGIRequestHandler except it doesn't log to the standard
output any of the requests received, so as to not clutter the output for
the tests' results.
"""
def log_message(*args):
pass
class _ImprovedEvent(threading._Event):
"""
Does the same as `threading.Event` except it overrides the wait() method
with some code borrowed from Python 2.7 to return the set state of the
event (see: http://hg.python.org/cpython/rev/b5aa8aa78c0f/). This allows
to know whether the wait() method exited normally or because of the
timeout. This class can be removed when Django supports only Python >= 2.7.
"""
def wait(self, timeout=None):
self._Event__cond.acquire()
try:
if not self._Event__flag:
self._Event__cond.wait(timeout)
return self._Event__flag
finally:
self._Event__cond.release()
class StoppableWSGIServer(WSGIServer):
"""
The code in this class is borrowed from the `SocketServer.BaseServer` class
in Python 2.6. The important functionality here is that the server is non-
blocking and that it can be shut down at any moment. This is made possible
by the server regularly polling the socket and checking if it has been
asked to stop.
Note for the future: Once Django stops supporting Python 2.6, this class
can be removed as `WSGIServer` will have this ability to shutdown on
demand and will not require the use of the _ImprovedEvent class whose code
is borrowed from Python 2.7.
"""
def __init__(self, *args, **kwargs):
super(StoppableWSGIServer, self).__init__(*args, **kwargs)
self.__is_shut_down = _ImprovedEvent()
self.__serving = False
def serve_forever(self, poll_interval=0.5):
"""
Handle one request at a time until shutdown.
Polls for shutdown every poll_interval seconds.
"""
self.__serving = True
self.__is_shut_down.clear()
while self.__serving:
r, w, e = select.select([self], [], [], poll_interval)
if r:
self._handle_request_noblock()
self.__is_shut_down.set()
def shutdown(self):
"""
Stops the serve_forever loop.
Blocks until the loop has finished. This must be called while
serve_forever() is running in another thread, or it will
deadlock.
"""
self.__serving = False
if not self.__is_shut_down.wait(2):
raise RuntimeError(
"Failed to shutdown the live test server in 2 seconds. The "
"server might be stuck or generating a slow response.")
def handle_request(self):
"""Handle one request, possibly blocking.
"""
fd_sets = select.select([self], [], [], None)
if not fd_sets[0]:
return
self._handle_request_noblock()
def _handle_request_noblock(self):
"""
Handle one request, without blocking.
I assume that select.select has returned that the socket is
readable before this function was called, so there should be
no risk of blocking in get_request().
"""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except Exception:
self.handle_error(request, client_address)
self.close_request(request)
class _MediaFilesHandler(StaticFilesHandler):
"""
Handler for serving the media files. This is a private class that is
meant to be used solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.MEDIA_ROOT
def get_base_url(self):
return settings.MEDIA_URL
def serve(self, request):
relative_url = request.path[len(self.base_url[2]):]
return serve(request, relative_url, document_root=self.get_base_dir())
class LiveServerThread(threading.Thread):
"""
Thread for running a live http server while the tests are running.
"""
def __init__(self, host, possible_ports, connections_override=None):
self.host = host
self.port = None
self.possible_ports = possible_ports
self.is_ready = threading.Event()
self.error = None
self.connections_override = connections_override
super(LiveServerThread, self).__init__()
def run(self):
"""
Sets up the live server and databases, and then loops over handling
http requests.
"""
if self.connections_override:
from django.db import connections
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
# Create the handler for serving static and media files
handler = StaticFilesHandler(_MediaFilesHandler(WSGIHandler()))
# Go through the list of possible ports, hoping that we can find
# one that is free to use for the WSGI server.
for index, port in enumerate(self.possible_ports):
try:
self.httpd = StoppableWSGIServer(
(self.host, port), QuietWSGIRequestHandler)
except WSGIServerException as e:
if (index + 1 < len(self.possible_ports) and
e.args[0].errno == errno.EADDRINUSE):
# This port is already in use, so we go on and try with
# the next one in the list.
continue
else:
# Either none of the given ports are free or the error
# is something else than "Address already in use". So
# we let that error bubble up to the main thread.
raise
else:
# A free port was found.
self.port = port
break
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
def join(self, timeout=None):
if hasattr(self, 'httpd'):
# Stop the WSGI server
self.httpd.shutdown()
self.httpd.server_close()
super(LiveServerThread, self).join(timeout)
class LiveServerTestCase(TransactionTestCase):
"""
Does basically the same as TransactionTestCase but also launches a live
http server in a separate thread so that the tests may use another testing
framework, such as Selenium for example, instead of the built-in dummy
client.
Note that it inherits from TransactionTestCase instead of TestCase because
the threads do not share the same transactions (unless if using in-memory
sqlite) and each thread needs to commit all their transactions so that the
other thread can see the changes.
"""
@property
def live_server_url(self):
return 'http://%s:%s' % (
self.server_thread.host, self.server_thread.port)
@classmethod
def setUpClass(cls):
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if (conn.settings_dict['ENGINE'] == 'django.db.backends.sqlite3'
and conn.settings_dict['NAME'] == ':memory:'):
# Explicitly enable thread-shareability for this connection
conn.allow_thread_sharing = True
connections_override[conn.alias] = conn
# Launch the live server's thread
specified_address = os.environ.get(
'DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:8081')
# The specified ports may be of the form '8000-8010,8080,9200-9300'
# i.e. a comma-separated list of ports or ranges of ports, so we break
# it down into a detailed list of all possible ports.
possible_ports = []
try:
host, port_ranges = specified_address.split(':')
for port_range in port_ranges.split(','):
# A port range can be of either form: '8000' or '8000-8010'.
extremes = map(int, port_range.split('-'))
assert len(extremes) in [1, 2]
if len(extremes) == 1:
# Port range of the form '8000'
possible_ports.append(extremes[0])
else:
# Port range of the form '8000-8010'
for port in range(extremes[0], extremes[1] + 1):
possible_ports.append(port)
except Exception:
raise ImproperlyConfigured('Invalid address ("%s") for live '
'server.' % specified_address)
cls.server_thread = LiveServerThread(
host, possible_ports, connections_override)
cls.server_thread.daemon = True
cls.server_thread.start()
# Wait for the live server to be ready
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
raise cls.server_thread.error
super(LiveServerTestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
# There may not be a 'server_thread' attribute if setUpClass() for some
# reasons has raised an exception.
if hasattr(cls, 'server_thread'):
# Terminate the live server's thread
cls.server_thread.join()
super(LiveServerTestCase, cls).tearDownClass()
|
|
import sys, os, json, ast, requests
from time import time as tm, sleep as slp
from datetime import datetime as dt
from itertools import cycle
from pyquery import PyQuery as pq_
from browse import browse_url_profile_details
from user_agents import user_agents
data_dir = 'profile_data/'
class indeed_resumes_details(object):
def __init__(self, unique_id):
self.user_agents_cycle = cycle(user_agents)
self.unique_id = unique_id
self.profile_identities = {
'workExperience': {'list_key': 'work_experiences',
'content': '.workExperience-content .items-container',
'item_w_id': '.workExperience-content .items-container #%s',
'items': [('.work_title', None), ('.work_dates', None), ('.work_description', None), ('.work_company', {'.work_company': 0, '.work_location': -1})]
},
'education': {'list_key': 'education_bgs',
'content': '.education-content .items-container',
'item_w_id': '.education-content .items-container #%s',
'items': [('.edu_title', None), ('.edu_school', None), ('.edu_dates', None)]
},
}
def resource_collection(self):
url_ = browse_url_profile_details % self.unique_id
data = self.get_resource(url_)
details = self.extract_details(data)
return details
def extract_details(self, data):
t1 = tm()
details = {}
if not data:
return details
details['name'] = data('#basic_info_row #basic_info_cell #resume-contact').text()
details['title'] = data('#basic_info_row #basic_info_cell #headline').text()
details['address'] = data('#basic_info_row #basic_info_cell #contact_info_container .adr #headline_location').text()
details['skills'] = data('.skills-content #skills-items .data_display .skill-container').text().split(',')
details['additional_info'] = data('.additionalInfo-content #additionalinfo-items .data_display').text().encode('ascii','ignore')
identities = {}
for k, v in self.profile_identities.iteritems():
identities[k] = {'data': []}
for item in data(v['content']).children():
data_= {}
it = pq_(item)
if it.attr('id').startswith(k):
it_id = it.attr('id')
item = data(v['item_w_id'] % it_id)
children = pq_(item.children())
for each, splits in v['items']:
if splits:
item_construct = children(each).text().split('-')
for sub, index in splits.iteritems():
data_[sub] = item_construct[index]
else:
data_[each] = children(each).text().encode('ascii','ignore')
identities[k]['data'].append(data_)
details[k] = identities[k]
t2 = tm()
details['time_taken'] = t2-t1
details['timestamp'] = tm()
return details
def get_resource(self, url_):
user_agent = self.user_agents_cycle.next()
try:
resp = requests.get(url_, headers = {'user_agent': user_agent})
except:
slp(100)
print 'sleeping for 100 secs due to a block..'
user_agent = self.user_agents_cycle.next()
resp = requests.get(url_, headers = {'user_agent': user_agent})
if resp.status_code == 200:
data = pq_(resp.text)
data = data('#resume_body').children()
if not data:
user_agent = self.user_agents_cycle.next()
resp = requests.get(url_, headers = {'user_agent': user_agent})
if resp.status_code == 200:
data = pq_(resp.text)
data = data('#resume_body').children()
return data
else:
return []
else:
return data
else:
return []
def save_distincts():
"""
This method parses the unique ids from the given
data directory of ids scrapped from indeed
"""
t1 = tm()
object_ = {}
data_dir = 'data/'
#export_folder = '/Volume/SKILLZEQ/resumes_v1/%s/%s/'
export_folder = '/Volume/SKILLZEQ/resumes_v1/%s/%s/'
target = 'profile_data/distincts_v2.json'
target_file = open(target, 'wb')
for root, directories, files in os.walk(data_dir):
for filename in files:
file_ = filename.split('.') #--complete filename
file_format = file_[1] #--.json
keyword = file_[0] #--file name
domain = root.split('/')[1] #--parent folder
if file_format == 'json':
filepath = os.path.join(root, filename)
f = open(filepath, 'rb')
for record in f:
try:
record = filter(lambda p: p['type'] == 'resource_id', ast.literal_eval(record))
for i in record:
unique_id = i['data']
if unique_id in object_:
object_[unique_id].append(keyword)
else:
object_[unique_id] = [keyword]
#object_[unique_id] = 1
except:
print filepath
continue
f.close()
target_file.write(json.dumps(object_))
target_file.close()
t2 = tm()
print '%d seconds taken..' % int(t2-t1)
return
def get_distincts():
"""
This method returns the parsed dict of the unique file generated from save_distincts
"""
target = 'profile_data/distincts_v2.json'
f = open(target, 'rb')
for a in f:
data = json.loads(a)
f.close()
print 'data fetched for resume links..'
return data
def scrap_profiles(load_done=False):
done_ = {}
done_target = 'profile_data/done_v1.json'
t1 = tm()
data = get_distincts()
#folder = '/Volumes/SKILLZEQ/%s.json'
folder = '/Users/saif/skillz_eq_samples/%s.json'
for i, key in enumerate(data):
if key not in done_:
try:
obj = indeed_resumes_details(key)
profile = obj.resource_collection()
profile['semantics'] = data[key]
except:
print 'put to sleep for 300 secs due to break..'
slp(300)
try:
obj = indeed_resumes_details(key)
profile = obj.resource_collection()
profile['semantics'] = data[key]
except:
for k_ in data:
if k_ not in done_:
done_[k_] = 0
df = open(done_target, 'wb')
df.write(json.dumps(done_))
df.close()
print 'script terminated at %d records...data for dones in %s' % (i, done_target)
f = open(folder % key, 'wb')
f.write(json.dumps(profile))
f.close()
done_[key] = 1
if i % 1000 == 0:
t2 = tm()
print '%d records saved in %d seconds..' % (i, int(t2-t1))
if i == 2000:
break
t2 = tm()
print 'success... %d records scrapped.. in %d mins..' % (i, int(float(t2-t1)/60))
return
if __name__ == '__main__':
scrap_profiles()
# get_distincts()
# save_distincts()
# get_ids()
|
|
import datetime
import glob
import os
import time
from django.conf import settings
from django.template.loader import render_to_string
from ietf.message.models import Message, SendQueue
from ietf.message.utils import send_scheduled_message_from_send_queue
from ietf.doc.models import DocumentAuthor
from ietf.person.models import Person
def announcement_from_form(data, **kwargs):
'''
This function creates a new message record. Taking as input EmailForm.data
and key word arguments used to override some of the message fields
'''
# possible overrides
by = kwargs.get('by',Person.objects.get(name='(System)'))
from_val = kwargs.get('from_val','ID Tracker <[email protected]>')
content_type = kwargs.get('content_type','')
# from the form
subject = data['subject']
to_val = data['to']
cc_val = data['cc']
body = data['body']
message = Message.objects.create(by=by,
subject=subject,
frm=from_val,
to=to_val,
cc=cc_val,
body=body,
content_type=content_type)
# create SendQueue
send_queue = SendQueue.objects.create(by=by,message=message)
# uncomment for testing
send_scheduled_message_from_send_queue(send_queue)
return message
def get_authors(draft):
"""
Takes a draft object and returns a list of authors suitable for a tombstone document
"""
authors = []
for a in draft.authors.all():
initial = ''
prefix, first, middle, last, suffix = a.person.name_parts()
if first:
initial = first + '. '
entry = '%s%s <%s>' % (initial,last,a.address)
authors.append(entry)
return authors
def get_abbr_authors(draft):
"""
Takes a draft object and returns a string of first author followed by "et al"
for use in New Revision email body.
"""
initial = ''
result = ''
authors = DocumentAuthor.objects.filter(document=draft)
if authors:
prefix, first, middle, last, suffix = authors[0].author.person.name_parts()
if first:
initial = first[0] + '. '
result = '%s%s' % (initial,last)
if len(authors) > 1:
result += ', et al'
return result
def get_last_revision(filename):
"""
This function takes a filename, in the same form it appears in the InternetDraft record,
no revision or extension (ie. draft-ietf-alto-reqs) and returns a string which is the
reivision number of the last active version of the document, the highest revision
txt document in the archive directory. If no matching file is found raise exception.
"""
files = glob.glob(os.path.join(settings.INTERNET_DRAFT_ARCHIVE_DIR,filename) + '-??.txt')
if files:
sorted_files = sorted(files)
return get_revision(sorted_files[-1])
else:
raise Exception('last revision not found in archive')
def get_revision(name):
"""
Takes a draft filename and returns the revision, as a string.
"""
#return name[-6:-4]
base,ext = os.path.splitext(name)
return base[-2:]
def get_revision_emails(draft):
"""
Dervied from the ColdFusion legacy app, we accumulate To: emails for a new
revision by adding:
1) the conents of id_internal.state_change_notice_to, this appears to be largely
custom mail lists for the document or group
2) the main AD, via id_internal.job_owner
3) any ad who has marked "discuss" in the ballot associated with this id_internal
4) And now, also, the RFC Editor if the draft is in the RFC Editor Queue
"""
# from legacy
if not draft.get_state('draft-iesg'):
return ''
emails = []
if draft.notify:
emails.append(draft.notify)
if draft.ad:
emails.append(draft.ad.role_email("ad").address)
if draft.active_ballot():
for ad, pos in draft.active_ballot().active_ad_positions().iteritems():
if pos and pos.pos_id == "discuss":
emails.append(ad.role_email("ad").address)
if draft.get_state('draft-iesg').slug == "rfcqueue":
emails.append('[email protected]')
return ', '.join(emails)
def add_email(emails,person):
if person.email() not in emails:
emails[person.email()] = '"%s %s"' % (person.first_name,person.last_name)
def get_fullcc_list(draft):
"""
This function takes a draft object and returns a string of emails to use in cc field
of a standard notification. Uses an intermediate "emails" dictionary, emails are the
key, name is the value, to prevent adding duplicate emails to the list.
"""
emails = {}
# get authors
for author in draft.authors.all():
if author.address not in emails:
emails[author.address] = '"%s"' % (author.person.name)
if draft.group.acronym != 'none':
# add chairs
for role in draft.group.role_set.filter(name='chair'):
if role.email.address not in emails:
emails[role.email.address] = '"%s"' % (role.person.name)
# add AD
if draft.group.type.slug == 'wg':
emails['%[email protected]' % draft.group.acronym] = '"%s-ads"' % (draft.group.acronym)
elif draft.group.type.slug == 'rg':
email = draft.group.parent.role_set.filter(name='chair')[0].email
emails[email.address] = '"%s"' % (email.person.name)
# add sheperd
if draft.shepherd:
emails[draft.shepherd.address] = '"%s"' % (draft.shepherd.person.name)
# use sort so we get consistently ordered lists
result_list = []
for key in sorted(emails):
if emails[key]:
result_list.append('%s <%s>' % (emails[key],key))
else:
result_list.append('<%s>' % key)
return ','.join(result_list)
def get_email_initial(draft, type=None, input=None):
"""
Takes a draft object, a string representing the email type:
(extend,new,replace,resurrect,revision,update,withdraw) and
a dictonary of the action form input data (for use with replace, update, extend).
Returns a dictionary containing initial field values for a email notification.
The dictionary consists of to, cc, subject, body.
NOTE: for type=new we are listing all authors in the message body to match legacy app.
It appears datatracker abbreviates the list with "et al". Datatracker scheduled_announcement
entries have "Action" in subject whereas this app uses "ACTION"
"""
# assert False, (draft, type, input)
expiration_date = (datetime.date.today() + datetime.timedelta(185)).strftime('%B %d, %Y')
new_revision = str(int(draft.rev)+1).zfill(2)
new_filename = draft.name + '-' + new_revision + '.txt'
curr_filename = draft.name + '-' + draft.rev + '.txt'
data = {}
data['cc'] = get_fullcc_list(draft)
data['to'] = ''
if type == 'extend':
context = {'doc':curr_filename,'expire_date':input['expiration_date']}
data['subject'] = 'Extension of Expiration Date for %s' % (curr_filename)
data['body'] = render_to_string('drafts/message_extend.txt', context)
elif type == 'new':
# if the ID belongs to a group other than "none" add line to message body
if draft.group.type.slug == 'wg':
wg_message = 'This draft is a work item of the %s Working Group of the IETF.' % draft.group.name
else:
wg_message = ''
context = {'wg_message':wg_message,
'draft':draft,
'authors':get_abbr_authors(draft),
'revision_date':draft.latest_event(type='new_revision').time.date(),
'timestamp':time.strftime("%Y-%m-%d%H%M%S", time.localtime())}
data['to'] = '[email protected]'
data['cc'] = draft.group.list_email
data['subject'] = 'I-D ACTION:%s' % (curr_filename)
data['body'] = render_to_string('drafts/message_new.txt', context)
elif type == 'replace':
'''
input['replaced'] is a DocAlias
input['replaced_by'] is a Document
'''
context = {'doc':input['replaced'].name,'replaced_by':input['replaced_by'].name}
data['subject'] = 'Replacement of %s with %s' % (input['replaced'].name,input['replaced_by'].name)
data['body'] = render_to_string('drafts/message_replace.txt', context)
elif type == 'resurrect':
last_revision = get_last_revision(draft.name)
last_filename = draft.name + '-' + last_revision + '.txt'
context = {'doc':last_filename,'expire_date':expiration_date}
data['subject'] = 'Resurrection of %s' % (last_filename)
data['body'] = render_to_string('drafts/message_resurrect.txt', context)
elif type == 'revision':
context = {'rev':new_revision,'doc':new_filename,'doc_base':new_filename[:-4]}
data['to'] = get_revision_emails(draft)
data['cc'] = ''
data['subject'] = 'New Version Notification - %s' % (new_filename)
data['body'] = render_to_string('drafts/message_revision.txt', context)
elif type == 'update':
context = {'doc':input['filename'],'expire_date':expiration_date}
data['subject'] = 'Posting of %s' % (input['filename'])
data['body'] = render_to_string('drafts/message_update.txt', context)
elif type == 'withdraw':
context = {'doc':curr_filename,'by':input['type']}
data['subject'] = 'Withdrawl of %s' % (curr_filename)
data['body'] = render_to_string('drafts/message_withdraw.txt', context)
return data
|
|
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import common
class EdifyGenerator(object):
"""Class to generate scripts in the 'edify' recovery script language
used from donut onwards."""
def __init__(self, version, info):
self.script = []
self.mounts = set()
self.version = version
self.info = info
def MakeTemporary(self):
"""Make a temporary script object whose commands can latter be
appended to the parent script with AppendScript(). Used when the
caller wants to generate script commands out-of-order."""
x = EdifyGenerator(self.version, self.info)
x.mounts = self.mounts
return x
@staticmethod
def _WordWrap(cmd, linelen=80):
"""'cmd' should be a function call with null characters after each
parameter (eg, "somefun(foo,\0bar,\0baz)"). This function wraps cmd
to a given line length, replacing nulls with spaces and/or newlines
to format it nicely."""
indent = cmd.index("(")+1
out = []
first = True
x = re.compile("^(.{,%d})\0" % (linelen-indent,))
while True:
if not first:
out.append(" " * indent)
first = False
m = x.search(cmd)
if not m:
parts = cmd.split("\0", 1)
out.append(parts[0]+"\n")
if len(parts) == 1:
break
else:
cmd = parts[1]
continue
out.append(m.group(1)+"\n")
cmd = cmd[m.end():]
return "".join(out).replace("\0", " ").rstrip("\n")
def AppendScript(self, other):
"""Append the contents of another script (which should be created
with temporary=True) to this one."""
self.script.extend(other.script)
def AssertSomeFingerprint(self, *fp):
"""Assert that the current system build fingerprint is one of *fp."""
if not fp:
raise ValueError("must specify some fingerprints")
cmd = (
' ||\n '.join([('file_getprop("/system/build.prop", '
'"ro.build.fingerprint") == "%s"')
% i for i in fp]) +
' ||\n abort("Package expects build fingerprint of %s; this '
'device has " + getprop("ro.build.fingerprint") + ".");'
) % (" or ".join(fp),)
self.script.append(cmd)
def AssertOlderBuild(self, timestamp, timestamp_text):
"""Assert that the build on the device is older (or the same as)
the given timestamp."""
self.script.append(
('(!less_than_int(%s, getprop("ro.build.date.utc"))) || '
'abort("Can\'t install this package (%s) over newer '
'build (" + getprop("ro.build.date") + ").");'
) % (timestamp, timestamp_text))
def AssertDevice(self, device):
"""Assert that the device identifier is the given string."""
cmd = ('assert(' +
' || \0'.join(['getprop("ro.product.device") == "%s" || getprop("ro.build.product") == "%s"'
% (i, i) for i in device.split(",")]) +
' || abort("This package is for \\"%s\\" devices; '
'this is a \\"" + getprop("ro.product.device") + "\\".");'
');') % device
self.script.append(self._WordWrap(cmd))
def AssertSomeBootloader(self, *bootloaders):
"""Asert that the bootloader version is one of *bootloaders."""
cmd = ("assert(" +
" ||\0".join(['getprop("ro.bootloader") == "%s"' % (b,)
for b in bootloaders]) +
");")
self.script.append(self._WordWrap(cmd))
def RunBackup(self, command):
self.script.append('package_extract_file("system/bin/backuptool.sh", "/tmp/backuptool.sh");')
self.script.append('package_extract_file("system/bin/backuptool.functions", "/tmp/backuptool.functions");')
if not self.info.get("use_set_metadata", False):
self.script.append('set_perm(0, 0, 0755, "/tmp/backuptool.sh");')
self.script.append('set_perm(0, 0, 0644, "/tmp/backuptool.functions");')
else:
self.script.append('set_metadata("/tmp/backuptool.sh", "uid", 0, "gid", 0, "mode", 0755);')
self.script.append('set_metadata("/tmp/backuptool.functions", "uid", 0, "gid", 0, "mode", 0644);')
self.script.append(('run_program("/tmp/backuptool.sh", "%s");' % command))
if command == "restore":
self.script.append('delete("/system/bin/backuptool.sh");')
self.script.append('delete("/system/bin/backuptool.functions");')
def ValidateSignatures(self, command):
if command == "cleanup":
self.script.append('delete("/system/bin/otasigcheck.sh");')
else:
self.script.append('package_extract_file("system/bin/otasigcheck.sh", "/tmp/otasigcheck.sh");')
self.script.append('package_extract_file("META-INF/org/cyanogenmod/releasekey", "/tmp/releasekey");')
self.script.append('set_metadata("/tmp/otasigcheck.sh", "uid", 0, "gid", 0, "mode", 0755);')
self.script.append('run_program("/tmp/otasigcheck.sh");')
## Hax: a failure from run_program doesn't trigger an abort, so have it change the key value and check for "INVALID"
self.script.append('sha1_check(read_file("/tmp/releasekey"),"7241e92725436afc79389d4fc2333a2aa8c20230") && abort("Can\'t install this package on top of incompatible data. Please try another package or run a factory reset");')
def ShowProgress(self, frac, dur):
"""Update the progress bar, advancing it over 'frac' over the next
'dur' seconds. 'dur' may be zero to advance it via SetProgress
commands instead of by time."""
self.script.append("show_progress(%f, %d);" % (frac, int(dur)))
def SetProgress(self, frac):
"""Set the position of the progress bar within the chunk defined
by the most recent ShowProgress call. 'frac' should be in
[0,1]."""
self.script.append("set_progress(%f);" % (frac,))
def PatchCheck(self, filename, *sha1):
"""Check that the given file (or MTD reference) has one of the
given *sha1 hashes, checking the version saved in cache if the
file does not match."""
self.script.append(
'apply_patch_check("%s"' % (filename,) +
"".join([', "%s"' % (i,) for i in sha1]) +
') || abort("\\"%s\\" has unexpected contents.");' % (filename,))
def FileCheck(self, filename, *sha1):
"""Check that the given file (or MTD reference) has one of the
given *sha1 hashes."""
self.script.append('assert(sha1_check(read_file("%s")' % (filename,) +
"".join([', "%s"' % (i,) for i in sha1]) +
'));')
def CacheFreeSpaceCheck(self, amount):
"""Check that there's at least 'amount' space that can be made
available on /cache."""
self.script.append(('apply_patch_space(%d) || abort("Not enough free space '
'on /system to apply patches.");') % (amount,))
def Mount(self, mount_point):
"""Mount the partition with the given mount_point."""
fstab = self.info.get("fstab", None)
if fstab:
p = fstab[mount_point]
self.script.append('mount("%s", "%s", "%s", "%s");' %
(p.fs_type, common.PARTITION_TYPES[p.fs_type],
p.device, p.mount_point))
self.mounts.add(p.mount_point)
def Unmount(self, mount_point):
"""Unmount the partiiton with the given mount_point."""
if mount_point in self.mounts:
self.mounts.remove(mount_point)
self.script.append('unmount("%s");' % (mount_point,))
def UnpackPackageDir(self, src, dst):
"""Unpack a given directory from the OTA package into the given
destination directory."""
self.script.append('package_extract_dir("%s", "%s");' % (src, dst))
def Comment(self, comment):
"""Write a comment into the update script."""
self.script.append("")
for i in comment.split("\n"):
self.script.append("# " + i)
self.script.append("")
def Print(self, message):
"""Log a message to the screen (if the logs are visible)."""
self.script.append('ui_print("%s");' % (message,))
def FormatPartition(self, partition):
"""Format the given partition, specified by its mount point (eg,
"/system")."""
reserve_size = 0
fstab = self.info.get("fstab", None)
if fstab:
p = fstab[partition]
self.script.append('format("%s", "%s", "%s", "%s", "%s");' %
(p.fs_type, common.PARTITION_TYPES[p.fs_type],
p.device, p.length, p.mount_point))
def DeleteFiles(self, file_list):
"""Delete all files in file_list."""
if not file_list: return
cmd = "delete(" + ",\0".join(['"%s"' % (i,) for i in file_list]) + ");"
self.script.append(self._WordWrap(cmd))
def RenameFile(self, srcfile, tgtfile):
"""Moves a file from one location to another."""
if self.info.get("update_rename_support", False):
self.script.append('rename("%s", "%s");' % (srcfile, tgtfile))
else:
raise ValueError("Rename not supported by update binary")
def SkipNextActionIfTargetExists(self, tgtfile, tgtsha1):
"""Prepend an action with an apply_patch_check in order to
skip the action if the file exists. Used when a patch
is later renamed."""
cmd = ('sha1_check(read_file("%s"), %s) || ' % (tgtfile, tgtsha1))
self.script.append(self._WordWrap(cmd))
def ApplyPatch(self, srcfile, tgtfile, tgtsize, tgtsha1, *patchpairs):
"""Apply binary patches (in *patchpairs) to the given srcfile to
produce tgtfile (which may be "-" to indicate overwriting the
source file."""
if len(patchpairs) % 2 != 0 or len(patchpairs) == 0:
raise ValueError("bad patches given to ApplyPatch")
cmd = ['apply_patch("%s",\0"%s",\0%s,\0%d'
% (srcfile, tgtfile, tgtsha1, tgtsize)]
for i in range(0, len(patchpairs), 2):
cmd.append(',\0%s, package_extract_file("%s")' % patchpairs[i:i+2])
cmd.append(');')
cmd = "".join(cmd)
self.script.append(self._WordWrap(cmd))
def WriteRawImage(self, mount_point, fn):
"""Write the given package file into the partition for the given
mount point."""
fstab = self.info["fstab"]
if fstab:
p = fstab[mount_point]
partition_type = common.PARTITION_TYPES[p.fs_type]
args = {'device': p.device, 'fn': fn}
if partition_type == "MTD":
self.script.append(
'package_extract_file("%(fn)s", "/tmp/boot.img");'
'write_raw_image("/tmp/boot.img", "%(device)s");' % args
% args)
elif partition_type == "EMMC":
self.script.append(
'package_extract_file("%(fn)s", "%(device)s");' % args)
elif partition_type == "BML":
self.script.append(
('assert(package_extract_file("%(fn)s", "/tmp/%(device)s.img"),\n'
' write_raw_image("/tmp/%(device)s.img", "%(device)s"),\n'
' delete("/tmp/%(device)s.img"));') % args)
else:
raise ValueError("don't know how to write \"%s\" partitions" % (p.fs_type,))
def SetPermissions(self, fn, uid, gid, mode, selabel, capabilities):
"""Set file ownership and permissions."""
if not self.info.get("use_set_metadata", False):
self.script.append('set_perm(%d, %d, 0%o, "%s");' % (uid, gid, mode, fn))
else:
if capabilities is None: capabilities = "0x0"
cmd = 'set_metadata("%s", "uid", %d, "gid", %d, "mode", 0%o, ' \
'"capabilities", %s' % (fn, uid, gid, mode, capabilities)
if selabel is not None:
cmd += ', "selabel", "%s"' % ( selabel )
cmd += ');'
self.script.append(cmd)
def SetPermissionsRecursive(self, fn, uid, gid, dmode, fmode, selabel, capabilities):
"""Recursively set path ownership and permissions."""
if not self.info.get("use_set_metadata", False):
self.script.append('set_perm_recursive(%d, %d, 0%o, 0%o, "%s");'
% (uid, gid, dmode, fmode, fn))
else:
if capabilities is None: capabilities = "0x0"
cmd = 'set_metadata_recursive("%s", "uid", %d, "gid", %d, ' \
'"dmode", 0%o, "fmode", 0%o, "capabilities", %s' \
% (fn, uid, gid, dmode, fmode, capabilities)
if selabel is not None:
cmd += ', "selabel", "%s"' % ( selabel )
cmd += ');'
self.script.append(cmd)
def MakeSymlinks(self, symlink_list):
"""Create symlinks, given a list of (dest, link) pairs."""
by_dest = {}
for d, l in symlink_list:
by_dest.setdefault(d, []).append(l)
for dest, links in sorted(by_dest.iteritems()):
cmd = ('symlink("%s", ' % (dest,) +
",\0".join(['"' + i + '"' for i in sorted(links)]) + ");")
self.script.append(self._WordWrap(cmd))
def AppendExtra(self, extra):
"""Append text verbatim to the output script."""
self.script.append(extra)
def UnmountAll(self):
for p in sorted(self.mounts):
self.script.append('unmount("%s");' % (p,))
self.mounts = set()
def AddToZip(self, input_zip, output_zip, input_path=None):
"""Write the accumulated script to the output_zip file. input_zip
is used as the source for the 'updater' binary needed to run
script. If input_path is not None, it will be used as a local
path for the binary instead of input_zip."""
self.UnmountAll()
common.ZipWriteStr(output_zip, "META-INF/com/google/android/updater-script",
"\n".join(self.script) + "\n")
if input_path is None:
data = input_zip.read("OTA/bin/updater")
else:
data = open(os.path.join(input_path, "updater")).read()
common.ZipWriteStr(output_zip, "META-INF/com/google/android/update-binary",
data, perms=0755)
|
|
# Copyright (C) 2012-2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Configuration for libvirt objects.
Classes to represent the configuration of various libvirt objects
and support conversion to/from XML. These classes are solely concerned
by providing direct Object <-> XML document conversions. No policy or
operational decisions should be made by code in these classes. Such
policy belongs in the 'designer.py' module which provides simplified
helpers for populating up config object instances.
"""
import time
from lxml import etree
from oslo.utils import units
import six
from nova import exception
from nova.openstack.common import log as logging
from nova.pci import utils as pci_utils
from nova.virt import hardware
LOG = logging.getLogger(__name__)
# Namespace to use for Nova specific metadata items in XML
NOVA_NS = "http://openstack.org/xmlns/libvirt/nova/1.0"
class LibvirtConfigObject(object):
def __init__(self, **kwargs):
super(LibvirtConfigObject, self).__init__()
self.root_name = kwargs.get("root_name")
self.ns_prefix = kwargs.get('ns_prefix')
self.ns_uri = kwargs.get('ns_uri')
def _new_node(self, name, **kwargs):
if self.ns_uri is None:
return etree.Element(name, **kwargs)
else:
return etree.Element("{" + self.ns_uri + "}" + name,
nsmap={self.ns_prefix: self.ns_uri},
**kwargs)
def _text_node(self, name, value, **kwargs):
child = self._new_node(name, **kwargs)
child.text = six.text_type(value)
return child
def format_dom(self):
return self._new_node(self.root_name)
def parse_str(self, xmlstr):
self.parse_dom(etree.fromstring(xmlstr))
def parse_dom(self, xmldoc):
if self.root_name != xmldoc.tag:
raise exception.InvalidInput(
"Root element name should be '%s' not '%s'"
% (self.root_name, xmldoc.tag))
def to_xml(self, pretty_print=True):
root = self.format_dom()
xml_str = etree.tostring(root, pretty_print=pretty_print)
LOG.debug("Generated XML %s ", (xml_str,))
return xml_str
class LibvirtConfigCaps(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCaps, self).__init__(root_name="capabilities",
**kwargs)
self.host = None
self.guests = []
def parse_dom(self, xmldoc):
super(LibvirtConfigCaps, self).parse_dom(xmldoc)
for c in xmldoc.getchildren():
if c.tag == "host":
host = LibvirtConfigCapsHost()
host.parse_dom(c)
self.host = host
elif c.tag == "guest":
guest = LibvirtConfigCapsGuest()
guest.parse_dom(c)
self.guests.append(guest)
def format_dom(self):
caps = super(LibvirtConfigCaps, self).format_dom()
if self.host:
caps.append(self.host.format_dom())
for g in self.guests:
caps.append(g.format_dom())
return caps
class LibvirtConfigCapsNUMATopology(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsNUMATopology, self).__init__(
root_name="topology",
**kwargs)
self.cells = []
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsNUMATopology, self).parse_dom(xmldoc)
xmlcells = xmldoc.getchildren()[0]
for xmlcell in xmlcells.getchildren():
cell = LibvirtConfigCapsNUMACell()
cell.parse_dom(xmlcell)
self.cells.append(cell)
def format_dom(self):
topo = super(LibvirtConfigCapsNUMATopology, self).format_dom()
cells = etree.Element("cells")
cells.set("num", str(len(self.cells)))
topo.append(cells)
for cell in self.cells:
cells.append(cell.format_dom())
return topo
class LibvirtConfigCapsNUMACell(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsNUMACell, self).__init__(root_name="cell",
**kwargs)
self.id = None
self.memory = None
self.mempages = []
self.cpus = []
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsNUMACell, self).parse_dom(xmldoc)
self.id = int(xmldoc.get("id"))
for c in xmldoc.getchildren():
if c.tag == "memory":
self.memory = int(c.text)
elif c.tag == "pages":
pages = LibvirtConfigCapsNUMAPages()
pages.parse_dom(c)
self.mempages.append(pages)
elif c.tag == "cpus":
for c2 in c.getchildren():
cpu = LibvirtConfigCapsNUMACPU()
cpu.parse_dom(c2)
self.cpus.append(cpu)
def format_dom(self):
cell = super(LibvirtConfigCapsNUMACell, self).format_dom()
cell.set("id", str(self.id))
mem = etree.Element("memory")
mem.set("unit", "KiB")
mem.text = str(self.memory)
cell.append(mem)
for pages in self.mempages:
cell.append(pages.format_dom())
cpus = etree.Element("cpus")
cpus.set("num", str(len(self.cpus)))
for cpu in self.cpus:
cpus.append(cpu.format_dom())
cell.append(cpus)
return cell
class LibvirtConfigCapsNUMACPU(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsNUMACPU, self).__init__(root_name="cpu",
**kwargs)
self.id = None
self.socket_id = None
self.core_id = None
self.siblings = None
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsNUMACPU, self).parse_dom(xmldoc)
self.id = int(xmldoc.get("id"))
if xmldoc.get("socket_id") is not None:
self.socket_id = int(xmldoc.get("socket_id"))
if xmldoc.get("core_id") is not None:
self.core_id = int(xmldoc.get("core_id"))
if xmldoc.get("siblings") is not None:
self.siblings = hardware.parse_cpu_spec(
xmldoc.get("siblings"))
def format_dom(self):
cpu = super(LibvirtConfigCapsNUMACPU, self).format_dom()
cpu.set("id", str(self.id))
if self.socket_id is not None:
cpu.set("socket_id", str(self.socket_id))
if self.core_id is not None:
cpu.set("core_id", str(self.core_id))
if self.siblings is not None:
cpu.set("siblings",
hardware.format_cpu_spec(self.siblings))
return cpu
class LibvirtConfigCapsNUMAPages(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsNUMAPages, self).__init__(
root_name="pages", **kwargs)
self.size = None
self.total = None
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsNUMAPages, self).parse_dom(xmldoc)
self.size = int(xmldoc.get("size"))
self.total = int(xmldoc.text)
def format_dom(self):
pages = super(LibvirtConfigCapsNUMAPages, self).format_dom()
pages.text = str(self.total)
pages.set("size", str(self.size))
pages.set("unit", "KiB")
return pages
class LibvirtConfigCapsHost(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsHost, self).__init__(root_name="host",
**kwargs)
self.cpu = None
self.uuid = None
self.topology = None
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsHost, self).parse_dom(xmldoc)
for c in xmldoc.getchildren():
if c.tag == "cpu":
cpu = LibvirtConfigCPU()
cpu.parse_dom(c)
self.cpu = cpu
elif c.tag == "uuid":
self.uuid = c.text
elif c.tag == "topology":
self.topology = LibvirtConfigCapsNUMATopology()
self.topology.parse_dom(c)
def format_dom(self):
caps = super(LibvirtConfigCapsHost, self).format_dom()
if self.uuid:
caps.append(self._text_node("uuid", self.uuid))
if self.cpu:
caps.append(self.cpu.format_dom())
if self.topology:
caps.append(self.topology.format_dom())
return caps
class LibvirtConfigCapsGuest(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsGuest, self).__init__(root_name="guest",
**kwargs)
self.arch = None
self.ostype = None
self.domtype = list()
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsGuest, self).parse_dom(xmldoc)
for c in xmldoc.getchildren():
if c.tag == "os_type":
self.ostype = c.text
elif c.tag == "arch":
self.arch = c.get("name")
for sc in c.getchildren():
if sc.tag == "domain":
self.domtype.append(sc.get("type"))
def format_dom(self):
caps = super(LibvirtConfigCapsGuest, self).format_dom()
if self.ostype is not None:
caps.append(self._text_node("os_type", self.ostype))
if self.arch:
arch = etree.Element("arch", name=self.arch)
for dt in self.domtype:
dte = etree.Element("domain")
dte.set("type", dt)
arch.append(dte)
caps.append(arch)
return caps
class LibvirtConfigGuestTimer(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestTimer, self).__init__(root_name="timer",
**kwargs)
self.name = "platform"
self.track = None
self.tickpolicy = None
self.present = None
def format_dom(self):
tm = super(LibvirtConfigGuestTimer, self).format_dom()
tm.set("name", self.name)
if self.track is not None:
tm.set("track", self.track)
if self.tickpolicy is not None:
tm.set("tickpolicy", self.tickpolicy)
if self.present is not None:
if self.present:
tm.set("present", "yes")
else:
tm.set("present", "no")
return tm
class LibvirtConfigGuestClock(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestClock, self).__init__(root_name="clock",
**kwargs)
self.offset = "utc"
self.adjustment = None
self.timezone = None
self.timers = []
def format_dom(self):
clk = super(LibvirtConfigGuestClock, self).format_dom()
clk.set("offset", self.offset)
if self.adjustment:
clk.set("adjustment", self.adjustment)
elif self.timezone:
clk.set("timezone", self.timezone)
for tm in self.timers:
clk.append(tm.format_dom())
return clk
def add_timer(self, tm):
self.timers.append(tm)
class LibvirtConfigCPUFeature(LibvirtConfigObject):
def __init__(self, name=None, **kwargs):
super(LibvirtConfigCPUFeature, self).__init__(root_name='feature',
**kwargs)
self.name = name
def parse_dom(self, xmldoc):
super(LibvirtConfigCPUFeature, self).parse_dom(xmldoc)
self.name = xmldoc.get("name")
def format_dom(self):
ft = super(LibvirtConfigCPUFeature, self).format_dom()
ft.set("name", self.name)
return ft
def __eq__(self, obj):
return obj.name == self.name
def __ne__(self, obj):
return obj.name != self.name
def __hash__(self):
return hash(self.name)
class LibvirtConfigCPU(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCPU, self).__init__(root_name='cpu',
**kwargs)
self.arch = None
self.vendor = None
self.model = None
self.sockets = None
self.cores = None
self.threads = None
self.features = set()
def parse_dom(self, xmldoc):
super(LibvirtConfigCPU, self).parse_dom(xmldoc)
for c in xmldoc.getchildren():
if c.tag == "arch":
self.arch = c.text
elif c.tag == "model":
self.model = c.text
elif c.tag == "vendor":
self.vendor = c.text
elif c.tag == "topology":
self.sockets = int(c.get("sockets"))
self.cores = int(c.get("cores"))
self.threads = int(c.get("threads"))
elif c.tag == "feature":
f = LibvirtConfigCPUFeature()
f.parse_dom(c)
self.add_feature(f)
def format_dom(self):
cpu = super(LibvirtConfigCPU, self).format_dom()
if self.arch is not None:
cpu.append(self._text_node("arch", self.arch))
if self.model is not None:
cpu.append(self._text_node("model", self.model))
if self.vendor is not None:
cpu.append(self._text_node("vendor", self.vendor))
if (self.sockets is not None and
self.cores is not None and
self.threads is not None):
top = etree.Element("topology")
top.set("sockets", str(self.sockets))
top.set("cores", str(self.cores))
top.set("threads", str(self.threads))
cpu.append(top)
# sorting the features to allow more predictable tests
for f in sorted(self.features, key=lambda x: x.name):
cpu.append(f.format_dom())
return cpu
def add_feature(self, feat):
self.features.add(feat)
class LibvirtConfigGuestCPUFeature(LibvirtConfigCPUFeature):
def __init__(self, name=None, **kwargs):
super(LibvirtConfigGuestCPUFeature, self).__init__(name, **kwargs)
self.policy = "require"
def format_dom(self):
ft = super(LibvirtConfigGuestCPUFeature, self).format_dom()
ft.set("policy", self.policy)
return ft
class LibvirtConfigGuestCPUNUMACell(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUNUMACell, self).__init__(root_name="cell",
**kwargs)
self.id = None
self.cpus = None
self.memory = None
def parse_dom(self, xmldoc):
if xmldoc.get("id") is not None:
self.id = int(xmldoc.get("id"))
if xmldoc.get("memory") is not None:
self.memory = int(xmldoc.get("memory"))
if xmldoc.get("cpus") is not None:
self.cpus = hardware.parse_cpu_spec(xmldoc.get("cpus"))
def format_dom(self):
cell = super(LibvirtConfigGuestCPUNUMACell, self).format_dom()
if self.id is not None:
cell.set("id", str(self.id))
if self.cpus is not None:
cell.set("cpus",
hardware.format_cpu_spec(self.cpus))
if self.memory is not None:
cell.set("memory", str(self.memory))
return cell
class LibvirtConfigGuestCPUNUMA(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUNUMA, self).__init__(root_name="numa",
**kwargs)
self.cells = []
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestCPUNUMA, self).parse_dom(xmldoc)
for child in xmldoc.getchildren():
if child.tag == "cell":
cell = LibvirtConfigGuestCPUNUMACell()
cell.parse_dom(child)
self.cells.append(cell)
def format_dom(self):
numa = super(LibvirtConfigGuestCPUNUMA, self).format_dom()
for cell in self.cells:
numa.append(cell.format_dom())
return numa
class LibvirtConfigGuestCPU(LibvirtConfigCPU):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPU, self).__init__(**kwargs)
self.mode = None
self.match = "exact"
self.numa = None
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestCPU, self).parse_dom(xmldoc)
self.mode = xmldoc.get('mode')
self.match = xmldoc.get('match')
for child in xmldoc.getchildren():
if child.tag == "numa":
numa = LibvirtConfigGuestCPUNUMA()
numa.parse_dom(child)
self.numa = numa
def format_dom(self):
cpu = super(LibvirtConfigGuestCPU, self).format_dom()
if self.mode:
cpu.set("mode", self.mode)
cpu.set("match", self.match)
if self.numa is not None:
cpu.append(self.numa.format_dom())
return cpu
class LibvirtConfigGuestSMBIOS(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSMBIOS, self).__init__(root_name="smbios",
**kwargs)
self.mode = "sysinfo"
def format_dom(self):
smbios = super(LibvirtConfigGuestSMBIOS, self).format_dom()
smbios.set("mode", self.mode)
return smbios
class LibvirtConfigGuestSysinfo(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSysinfo, self).__init__(root_name="sysinfo",
**kwargs)
self.type = "smbios"
self.bios_vendor = None
self.bios_version = None
self.system_manufacturer = None
self.system_product = None
self.system_version = None
self.system_serial = None
self.system_uuid = None
def format_dom(self):
sysinfo = super(LibvirtConfigGuestSysinfo, self).format_dom()
sysinfo.set("type", self.type)
bios = None
system = None
if self.bios_vendor is not None:
if bios is None:
bios = etree.Element("bios")
info = etree.Element("entry", name="vendor")
info.text = self.bios_vendor
bios.append(info)
if self.bios_version is not None:
if bios is None:
bios = etree.Element("bios")
info = etree.Element("entry", name="version")
info.text = self.bios_version
bios.append(info)
if self.system_manufacturer is not None:
if system is None:
system = etree.Element("system")
info = etree.Element("entry", name="manufacturer")
info.text = self.system_manufacturer
system.append(info)
if self.system_product is not None:
if system is None:
system = etree.Element("system")
info = etree.Element("entry", name="product")
info.text = self.system_product
system.append(info)
if self.system_version is not None:
if system is None:
system = etree.Element("system")
info = etree.Element("entry", name="version")
info.text = self.system_version
system.append(info)
if self.system_serial is not None:
if system is None:
system = etree.Element("system")
info = etree.Element("entry", name="serial")
info.text = self.system_serial
system.append(info)
if self.system_uuid is not None:
if system is None:
system = etree.Element("system")
info = etree.Element("entry", name="uuid")
info.text = self.system_uuid
system.append(info)
if bios is not None:
sysinfo.append(bios)
if system is not None:
sysinfo.append(system)
return sysinfo
class LibvirtConfigGuestDevice(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDevice, self).__init__(**kwargs)
class LibvirtConfigGuestDisk(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDisk, self).__init__(root_name="disk",
**kwargs)
self.source_type = "file"
self.source_device = "disk"
self.driver_name = None
self.driver_format = None
self.driver_cache = None
self.driver_discard = None
self.source_path = None
self.source_protocol = None
self.source_name = None
self.source_hosts = []
self.source_ports = []
self.target_dev = None
self.target_path = None
self.target_bus = None
self.auth_username = None
self.auth_secret_type = None
self.auth_secret_uuid = None
self.serial = None
self.disk_read_bytes_sec = None
self.disk_read_iops_sec = None
self.disk_write_bytes_sec = None
self.disk_write_iops_sec = None
self.disk_total_bytes_sec = None
self.disk_total_iops_sec = None
self.logical_block_size = None
self.physical_block_size = None
self.readonly = False
self.snapshot = None
self.backing_store = None
def format_dom(self):
dev = super(LibvirtConfigGuestDisk, self).format_dom()
dev.set("type", self.source_type)
dev.set("device", self.source_device)
if (self.driver_name is not None or
self.driver_format is not None or
self.driver_cache is not None or
self.driver_discard is not None):
drv = etree.Element("driver")
if self.driver_name is not None:
drv.set("name", self.driver_name)
if self.driver_format is not None:
drv.set("type", self.driver_format)
if self.driver_cache is not None:
drv.set("cache", self.driver_cache)
if self.driver_discard is not None:
drv.set("discard", self.driver_discard)
dev.append(drv)
if self.source_type == "file":
dev.append(etree.Element("source", file=self.source_path))
elif self.source_type == "block":
dev.append(etree.Element("source", dev=self.source_path))
elif self.source_type == "mount":
dev.append(etree.Element("source", dir=self.source_path))
elif self.source_type == "network":
source = etree.Element("source", protocol=self.source_protocol)
if self.source_name is not None:
source.set('name', self.source_name)
hosts_info = zip(self.source_hosts, self.source_ports)
for name, port in hosts_info:
host = etree.Element('host', name=name)
if port is not None:
host.set('port', port)
source.append(host)
dev.append(source)
if self.auth_secret_type is not None:
auth = etree.Element("auth")
auth.set("username", self.auth_username)
auth.append(etree.Element("secret", type=self.auth_secret_type,
uuid=self.auth_secret_uuid))
dev.append(auth)
if self.source_type == "mount":
dev.append(etree.Element("target", dir=self.target_path))
else:
dev.append(etree.Element("target", dev=self.target_dev,
bus=self.target_bus))
if self.serial is not None:
dev.append(self._text_node("serial", self.serial))
iotune = etree.Element("iotune")
if self.disk_read_bytes_sec is not None:
iotune.append(self._text_node("read_bytes_sec",
self.disk_read_bytes_sec))
if self.disk_read_iops_sec is not None:
iotune.append(self._text_node("read_iops_sec",
self.disk_read_iops_sec))
if self.disk_write_bytes_sec is not None:
iotune.append(self._text_node("write_bytes_sec",
self.disk_write_bytes_sec))
if self.disk_write_iops_sec is not None:
iotune.append(self._text_node("write_iops_sec",
self.disk_write_iops_sec))
if self.disk_total_bytes_sec is not None:
iotune.append(self._text_node("total_bytes_sec",
self.disk_total_bytes_sec))
if self.disk_total_iops_sec is not None:
iotune.append(self._text_node("total_iops_sec",
self.disk_total_iops_sec))
if len(iotune) > 0:
dev.append(iotune)
# Block size tuning
if (self.logical_block_size is not None or
self.physical_block_size is not None):
blockio = etree.Element("blockio")
if self.logical_block_size is not None:
blockio.set('logical_block_size', self.logical_block_size)
if self.physical_block_size is not None:
blockio.set('physical_block_size', self.physical_block_size)
dev.append(blockio)
if self.readonly:
dev.append(etree.Element("readonly"))
return dev
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestDisk, self).parse_dom(xmldoc)
self.source_type = xmldoc.get('type')
self.snapshot = xmldoc.get('snapshot')
for c in xmldoc.getchildren():
if c.tag == 'driver':
self.driver_name = c.get('name')
self.driver_format = c.get('type')
self.driver_cache = c.get('cache')
self.driver_discard = c.get('discard')
elif c.tag == 'source':
if self.source_type == 'file':
self.source_path = c.get('file')
elif self.source_type == 'block':
self.source_path = c.get('dev')
elif self.source_type == 'mount':
self.source_path = c.get('dir')
elif self.source_type == 'network':
self.source_protocol = c.get('protocol')
self.source_name = c.get('name')
for sub in c.getchildren():
if sub.tag == 'host':
self.source_hosts.append(sub.get('name'))
self.source_ports.append(sub.get('port'))
elif c.tag == 'serial':
self.serial = c.text
elif c.tag == 'target':
if self.source_type == 'mount':
self.target_path = c.get('dir')
else:
self.target_dev = c.get('dev')
self.target_bus = c.get('bus', None)
elif c.tag == 'backingStore':
b = LibvirtConfigGuestDiskBackingStore()
b.parse_dom(c)
self.backing_store = b
class LibvirtConfigGuestDiskBackingStore(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDiskBackingStore, self).__init__(
root_name="backingStore", **kwargs)
self.index = None
self.source_type = None
self.source_file = None
self.source_protocol = None
self.source_name = None
self.source_hosts = []
self.source_ports = []
self.driver_name = None
self.driver_format = None
self.backing_store = None
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestDiskBackingStore, self).parse_dom(xmldoc)
self.source_type = xmldoc.get('type')
self.index = xmldoc.get('index')
for c in xmldoc.getchildren():
if c.tag == 'driver':
self.driver_name = c.get('name')
self.driver_format = c.get('type')
elif c.tag == 'source':
self.source_file = c.get('file')
self.source_protocol = c.get('protocol')
self.source_name = c.get('name')
for d in c.getchildren():
if d.tag == 'host':
self.source_hosts.append(d.get('name'))
self.source_ports.append(d.get('port'))
elif c.tag == 'backingStore':
if c.getchildren():
self.backing_store = LibvirtConfigGuestDiskBackingStore()
self.backing_store.parse_dom(c)
class LibvirtConfigGuestSnapshotDisk(LibvirtConfigObject):
"""Disk class for handling disk information in snapshots.
Similar to LibvirtConfigGuestDisk, but used to represent
disk entities in <domainsnapshot> structures rather than
real devices. These typically have fewer members, and
different expectations for which fields are required.
"""
def __init__(self, **kwargs):
super(LibvirtConfigGuestSnapshotDisk, self).__init__(root_name="disk",
**kwargs)
self.source_type = None
self.source_device = None
self.name = None
self.snapshot = None
self.driver_name = None
self.driver_format = None
self.driver_cache = None
self.source_path = None
self.source_protocol = None
self.source_name = None
self.source_hosts = []
self.source_ports = []
self.target_dev = None
self.target_path = None
self.target_bus = None
self.auth_username = None
self.auth_secret_type = None
self.auth_secret_uuid = None
self.serial = None
def format_dom(self):
dev = super(LibvirtConfigGuestSnapshotDisk, self).format_dom()
if self.name:
dev.attrib['name'] = self.name
if self.snapshot:
dev.attrib['snapshot'] = self.snapshot
if self.source_type:
dev.set("type", self.source_type)
if self.source_device:
dev.set("device", self.source_device)
if (self.driver_name is not None or
self.driver_format is not None or
self.driver_cache is not None):
drv = etree.Element("driver")
if self.driver_name is not None:
drv.set("name", self.driver_name)
if self.driver_format is not None:
drv.set("type", self.driver_format)
if self.driver_cache is not None:
drv.set("cache", self.driver_cache)
dev.append(drv)
if self.source_type == "file":
dev.append(etree.Element("source", file=self.source_path))
elif self.source_type == "block":
dev.append(etree.Element("source", dev=self.source_path))
elif self.source_type == "mount":
dev.append(etree.Element("source", dir=self.source_path))
elif self.source_type == "network":
source = etree.Element("source", protocol=self.source_protocol)
if self.source_name is not None:
source.set('name', self.source_name)
hosts_info = zip(self.source_hosts, self.source_ports)
for name, port in hosts_info:
host = etree.Element('host', name=name)
if port is not None:
host.set('port', port)
source.append(host)
dev.append(source)
if self.auth_secret_type is not None:
auth = etree.Element("auth")
auth.set("username", self.auth_username)
auth.append(etree.Element("secret", type=self.auth_secret_type,
uuid=self.auth_secret_uuid))
dev.append(auth)
if self.source_type == "mount":
dev.append(etree.Element("target", dir=self.target_path))
else:
if self.target_bus and self.target_dev:
dev.append(etree.Element("target", dev=self.target_dev,
bus=self.target_bus))
return dev
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestSnapshotDisk, self).parse_dom(xmldoc)
self.source_type = xmldoc.get('type')
self.snapshot = xmldoc.get('snapshot')
for c in xmldoc.getchildren():
if c.tag == 'driver':
self.driver_name = c.get('name')
self.driver_format = c.get('type')
self.driver_cache = c.get('cache')
elif c.tag == 'source':
if self.source_type == 'file':
self.source_path = c.get('file')
elif self.source_type == 'block':
self.source_path = c.get('dev')
elif self.source_type == 'mount':
self.source_path = c.get('dir')
elif self.source_type == 'network':
self.source_protocol = c.get('protocol')
self.source_name = c.get('name')
for sub in c.getchildren():
if sub.tag == 'host':
self.source_hosts.append(sub.get('name'))
self.source_ports.append(sub.get('port'))
elif c.tag == 'serial':
self.serial = c.text
for c in xmldoc.getchildren():
if c.tag == 'target':
if self.source_type == 'mount':
self.target_path = c.get('dir')
else:
self.target_dev = c.get('dev')
self.target_bus = c.get('bus', None)
class LibvirtConfigGuestFilesys(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestFilesys, self).__init__(root_name="filesystem",
**kwargs)
self.source_type = "mount"
self.source_dir = None
self.target_dir = "/"
def format_dom(self):
dev = super(LibvirtConfigGuestFilesys, self).format_dom()
dev.set("type", self.source_type)
dev.append(etree.Element("source", dir=self.source_dir))
dev.append(etree.Element("target", dir=self.target_dir))
return dev
class LibvirtConfigGuestIDMap(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestIDMap, self).__init__(**kwargs)
self.start = 0
self.target = 0
self.count = 10000
def parse_dom(self, xmldoc):
self.start = int(xmldoc.get('start'))
self.target = int(xmldoc.get('target'))
self.count = int(xmldoc.get('count'))
def format_dom(self):
obj = super(LibvirtConfigGuestIDMap, self).format_dom()
obj.set("start", str(self.start))
obj.set("target", str(self.target))
obj.set("count", str(self.count))
return obj
class LibvirtConfigGuestUIDMap(LibvirtConfigGuestIDMap):
def __init__(self, **kwargs):
super(LibvirtConfigGuestUIDMap, self).__init__(root_name="uid",
**kwargs)
class LibvirtConfigGuestGIDMap(LibvirtConfigGuestIDMap):
def __init__(self, **kwargs):
super(LibvirtConfigGuestGIDMap, self).__init__(root_name="gid",
**kwargs)
class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestInterface, self).__init__(
root_name="interface",
**kwargs)
self.net_type = None
self.target_dev = None
self.model = None
self.mac_addr = None
self.script = None
self.source_dev = None
self.source_mode = "private"
self.vporttype = None
self.vportparams = []
self.filtername = None
self.filterparams = []
self.driver_name = None
self.vif_inbound_peak = None
self.vif_inbound_burst = None
self.vif_inbound_average = None
self.vif_outbound_peak = None
self.vif_outbound_burst = None
self.vif_outbound_average = None
self.vlan = None
def format_dom(self):
dev = super(LibvirtConfigGuestInterface, self).format_dom()
dev.set("type", self.net_type)
if self.net_type == "hostdev":
dev.set("managed", "yes")
dev.append(etree.Element("mac", address=self.mac_addr))
if self.model:
dev.append(etree.Element("model", type=self.model))
if self.driver_name:
dev.append(etree.Element("driver", name=self.driver_name))
if self.net_type == "ethernet":
if self.script is not None:
dev.append(etree.Element("script", path=self.script))
elif self.net_type == "direct":
dev.append(etree.Element("source", dev=self.source_dev,
mode=self.source_mode))
elif self.net_type == "hostdev":
source_elem = etree.Element("source")
domain, bus, slot, func = \
pci_utils.get_pci_address_fields(self.source_dev)
addr_elem = etree.Element("address", type='pci')
addr_elem.set("domain", "0x%s" % (domain))
addr_elem.set("bus", "0x%s" % (bus))
addr_elem.set("slot", "0x%s" % (slot))
addr_elem.set("function", "0x%s" % (func))
source_elem.append(addr_elem)
dev.append(source_elem)
else:
dev.append(etree.Element("source", bridge=self.source_dev))
if self.vlan and self.net_type in ("direct", "hostdev"):
vlan_elem = etree.Element("vlan")
tag_elem = etree.Element("tag", id=self.vlan)
vlan_elem.append(tag_elem)
dev.append(vlan_elem)
if self.target_dev is not None:
dev.append(etree.Element("target", dev=self.target_dev))
if self.vporttype is not None:
vport = etree.Element("virtualport", type=self.vporttype)
for p in self.vportparams:
param = etree.Element("parameters")
param.set(p['key'], p['value'])
vport.append(param)
dev.append(vport)
if self.filtername is not None:
filter = etree.Element("filterref", filter=self.filtername)
for p in self.filterparams:
filter.append(etree.Element("parameter",
name=p['key'],
value=p['value']))
dev.append(filter)
if self.vif_inbound_average or self.vif_outbound_average:
bandwidth = etree.Element("bandwidth")
if self.vif_inbound_average is not None:
vif_inbound = etree.Element("inbound",
average=str(self.vif_inbound_average))
if self.vif_inbound_peak is not None:
vif_inbound.set("peak", str(self.vif_inbound_peak))
if self.vif_inbound_burst is not None:
vif_inbound.set("burst", str(self.vif_inbound_burst))
bandwidth.append(vif_inbound)
if self.vif_outbound_average is not None:
vif_outbound = etree.Element("outbound",
average=str(self.vif_outbound_average))
if self.vif_outbound_peak is not None:
vif_outbound.set("peak", str(self.vif_outbound_peak))
if self.vif_outbound_burst is not None:
vif_outbound.set("burst", str(self.vif_outbound_burst))
bandwidth.append(vif_outbound)
dev.append(bandwidth)
return dev
def add_filter_param(self, key, value):
self.filterparams.append({'key': key, 'value': value})
def add_vport_param(self, key, value):
self.vportparams.append({'key': key, 'value': value})
class LibvirtConfigGuestInput(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestInput, self).__init__(root_name="input",
**kwargs)
self.type = "tablet"
self.bus = "usb"
def format_dom(self):
dev = super(LibvirtConfigGuestInput, self).format_dom()
dev.set("type", self.type)
dev.set("bus", self.bus)
return dev
class LibvirtConfigGuestGraphics(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestGraphics, self).__init__(root_name="graphics",
**kwargs)
self.type = "vnc"
self.autoport = True
self.keymap = None
self.listen = None
def format_dom(self):
dev = super(LibvirtConfigGuestGraphics, self).format_dom()
dev.set("type", self.type)
if self.autoport:
dev.set("autoport", "yes")
else:
dev.set("autoport", "no")
if self.keymap:
dev.set("keymap", self.keymap)
if self.listen:
dev.set("listen", self.listen)
return dev
class LibvirtConfigSeclabel(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigSeclabel, self).__init__(root_name="seclabel",
**kwargs)
self.type = 'dynamic'
self.baselabel = None
def format_dom(self):
seclabel = super(LibvirtConfigSeclabel, self).format_dom()
seclabel.set('type', self.type)
if self.baselabel:
seclabel.append(self._text_node("baselabel", self.baselabel))
return seclabel
class LibvirtConfigGuestVideo(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestVideo, self).__init__(root_name="video",
**kwargs)
self.type = 'cirrus'
self.vram = None
self.heads = None
def format_dom(self):
dev = super(LibvirtConfigGuestVideo, self).format_dom()
model = etree.Element("model")
model.set("type", self.type)
if self.vram:
model.set("vram", str(self.vram))
if self.heads:
model.set("heads", str(self.heads))
dev.append(model)
return dev
class LibvirtConfigMemoryBalloon(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigMemoryBalloon, self).__init__(
root_name='memballoon',
**kwargs)
self.model = None
self.period = None
def format_dom(self):
dev = super(LibvirtConfigMemoryBalloon, self).format_dom()
dev.set('model', str(self.model))
if self.period is not None:
dev.append(etree.Element('stats', period=str(self.period)))
return dev
class LibvirtConfigGuestController(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestController,
self).__init__(root_name="controller", **kwargs)
self.type = None
self.index = None
self.model = None
def format_dom(self):
controller = super(LibvirtConfigGuestController, self).format_dom()
controller.set("type", self.type)
if self.index is not None:
controller.set("index", str(self.index))
if self.model:
controller.set("model", str(self.model))
return controller
class LibvirtConfigGuestHostdev(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestHostdev, self).\
__init__(root_name="hostdev", **kwargs)
self.mode = kwargs.get('mode')
self.type = kwargs.get('type')
self.managed = 'yes'
def format_dom(self):
dev = super(LibvirtConfigGuestHostdev, self).format_dom()
dev.set("mode", self.mode)
dev.set("type", self.type)
dev.set("managed", self.managed)
return dev
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestHostdev, self).parse_dom(xmldoc)
self.mode = xmldoc.get('mode')
self.type = xmldoc.get('type')
self.managed = xmldoc.get('managed')
return xmldoc.getchildren()
class LibvirtConfigGuestHostdevPCI(LibvirtConfigGuestHostdev):
def __init__(self, **kwargs):
super(LibvirtConfigGuestHostdevPCI, self).\
__init__(mode='subsystem', type='pci',
**kwargs)
self.domain = None
self.bus = None
self.slot = None
self.function = None
def format_dom(self):
dev = super(LibvirtConfigGuestHostdevPCI, self).format_dom()
address = etree.Element("address",
domain='0x' + self.domain,
bus='0x' + self.bus,
slot='0x' + self.slot,
function='0x' + self.function)
source = etree.Element("source")
source.append(address)
dev.append(source)
return dev
def parse_dom(self, xmldoc):
childs = super(LibvirtConfigGuestHostdevPCI, self).parse_dom(xmldoc)
for c in childs:
if c.tag == "source":
for sub in c.getchildren():
if sub.tag == 'address':
self.domain = sub.get('domain')
self.bus = sub.get('bus')
self.slot = sub.get('slot')
self.function = sub.get('function')
class LibvirtConfigGuestCharBase(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCharBase, self).__init__(**kwargs)
self.type = "pty"
self.source_path = None
self.listen_port = None
self.listen_host = None
def format_dom(self):
dev = super(LibvirtConfigGuestCharBase, self).format_dom()
dev.set("type", self.type)
if self.type == "file":
dev.append(etree.Element("source", path=self.source_path))
elif self.type == "unix":
dev.append(etree.Element("source", mode="bind",
path=self.source_path))
elif self.type == "tcp":
dev.append(etree.Element("source", mode="bind",
host=self.listen_host,
service=str(self.listen_port)))
return dev
class LibvirtConfigGuestChar(LibvirtConfigGuestCharBase):
def __init__(self, **kwargs):
super(LibvirtConfigGuestChar, self).__init__(**kwargs)
self.target_port = None
self.target_type = None
def format_dom(self):
dev = super(LibvirtConfigGuestChar, self).format_dom()
if self.target_port is not None or self.target_type is not None:
target = etree.Element("target")
if self.target_port is not None:
target.set("port", str(self.target_port))
if self.target_type is not None:
target.set("type", self.target_type)
dev.append(target)
return dev
class LibvirtConfigGuestSerial(LibvirtConfigGuestChar):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSerial, self).__init__(root_name="serial",
**kwargs)
class LibvirtConfigGuestConsole(LibvirtConfigGuestChar):
def __init__(self, **kwargs):
super(LibvirtConfigGuestConsole, self).__init__(root_name="console",
**kwargs)
class LibvirtConfigGuestChannel(LibvirtConfigGuestCharBase):
def __init__(self, **kwargs):
super(LibvirtConfigGuestChannel, self).__init__(root_name="channel",
**kwargs)
self.target_type = "virtio"
self.target_name = None
def format_dom(self):
dev = super(LibvirtConfigGuestChannel, self).format_dom()
target = etree.Element("target", type=self.target_type)
if self.target_name is not None:
target.set("name", self.target_name)
dev.append(target)
return dev
class LibvirtConfigGuestWatchdog(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestWatchdog, self).__init__(root_name="watchdog",
**kwargs)
self.model = 'i6300esb'
self.action = 'reset'
def format_dom(self):
dev = super(LibvirtConfigGuestWatchdog, self).format_dom()
dev.set('model', self.model)
dev.set('action', self.action)
return dev
class LibvirtConfigGuestCPUTuneVCPUPin(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUTuneVCPUPin, self).__init__(
root_name="vcpupin",
**kwargs)
self.id = None
self.cpuset = None
def format_dom(self):
root = super(LibvirtConfigGuestCPUTuneVCPUPin, self).format_dom()
root.set("vcpu", str(self.id))
if self.cpuset is not None:
root.set("cpuset",
hardware.format_cpu_spec(self.cpuset))
return root
class LibvirtConfigGuestCPUTuneEmulatorPin(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUTuneEmulatorPin, self).__init__(
root_name="emulatorpin",
**kwargs)
self.cpuset = None
def format_dom(self):
root = super(LibvirtConfigGuestCPUTuneEmulatorPin, self).format_dom()
if self.cpuset is not None:
root.set("cpuset",
hardware.format_cpu_spec(self.cpuset))
return root
class LibvirtConfigGuestCPUTune(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUTune, self).__init__(root_name="cputune",
**kwargs)
self.shares = None
self.quota = None
self.period = None
self.vcpupin = []
self.emulatorpin = None
def format_dom(self):
root = super(LibvirtConfigGuestCPUTune, self).format_dom()
if self.shares is not None:
root.append(self._text_node("shares", str(self.shares)))
if self.quota is not None:
root.append(self._text_node("quota", str(self.quota)))
if self.period is not None:
root.append(self._text_node("period", str(self.period)))
if self.emulatorpin is not None:
root.append(self.emulatorpin.format_dom())
for vcpu in self.vcpupin:
root.append(vcpu.format_dom())
return root
class LibvirtConfigGuestMemoryBacking(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestMemoryBacking, self).__init__(
root_name="memoryBacking", **kwargs)
self.hugepages = []
self.sharedpages = True
self.locked = False
def format_dom(self):
root = super(LibvirtConfigGuestMemoryBacking, self).format_dom()
if self.hugepages:
hugepages = etree.Element("hugepages")
for item in self.hugepages:
hugepages.append(item.format_dom())
root.append(hugepages)
if not self.sharedpages:
root.append(etree.Element("nosharedpages"))
if self.locked:
root.append(etree.Element("locked"))
return root
class LibvirtConfigGuestMemoryBackingPage(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestMemoryBackingPage, self).__init__(
root_name="page", **kwargs)
self.size_kb = None
self.nodeset = None
def format_dom(self):
page = super(LibvirtConfigGuestMemoryBackingPage, self).format_dom()
page.set("size", str(self.size_kb))
page.set("nodeset", hardware.format_cpu_spec(self.nodeset))
page.set("unit", "KiB")
return page
class LibvirtConfigGuestMemoryTune(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestMemoryTune, self).__init__(
root_name="memtune", **kwargs)
self.hard_limit = None
self.soft_limit = None
self.swap_hard_limit = None
self.min_guarantee = None
def format_dom(self):
root = super(LibvirtConfigGuestMemoryTune, self).format_dom()
if self.hard_limit is not None:
root.append(self._text_node("hard_limit",
str(self.hard_limit),
units="K"))
if self.soft_limit is not None:
root.append(self._text_node("soft_limit",
str(self.soft_limit),
units="K"))
if self.swap_hard_limit is not None:
root.append(self._text_node("swap_hard_limit",
str(self.swap_hard_limit),
units="K"))
if self.min_guarantee is not None:
root.append(self._text_node("min_guarantee",
str(self.min_guarantee),
units="K"))
return root
class LibvirtConfigGuestNUMATuneMemory(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestNUMATuneMemory, self).__init__(
root_name="memory", **kwargs)
self.mode = "strict"
self.nodeset = []
def format_dom(self):
root = super(LibvirtConfigGuestNUMATuneMemory, self).format_dom()
root.set("mode", self.mode)
root.set("nodeset", hardware.format_cpu_spec(self.nodeset))
return root
class LibvirtConfigGuestNUMATuneMemNode(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestNUMATuneMemNode, self).__init__(
root_name="memnode", **kwargs)
self.cellid = 0
self.mode = "strict"
self.nodeset = []
def format_dom(self):
root = super(LibvirtConfigGuestNUMATuneMemNode, self).format_dom()
root.set("cellid", str(self.cellid))
root.set("mode", self.mode)
root.set("nodeset", hardware.format_cpu_spec(self.nodeset))
return root
class LibvirtConfigGuestNUMATune(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestNUMATune, self).__init__(
root_name="numatune", **kwargs)
self.memory = None
self.memnodes = []
def format_dom(self):
root = super(LibvirtConfigGuestNUMATune, self).format_dom()
if self.memory is not None:
root.append(self.memory.format_dom())
for node in self.memnodes:
root.append(node.format_dom())
return root
class LibvirtConfigGuestFeature(LibvirtConfigObject):
def __init__(self, name, **kwargs):
super(LibvirtConfigGuestFeature, self).__init__(root_name=name,
**kwargs)
class LibvirtConfigGuestFeatureACPI(LibvirtConfigGuestFeature):
def __init__(self, **kwargs):
super(LibvirtConfigGuestFeatureACPI, self).__init__("acpi",
**kwargs)
class LibvirtConfigGuestFeatureAPIC(LibvirtConfigGuestFeature):
def __init__(self, **kwargs):
super(LibvirtConfigGuestFeatureAPIC, self).__init__("apic",
**kwargs)
class LibvirtConfigGuestFeaturePAE(LibvirtConfigGuestFeature):
def __init__(self, **kwargs):
super(LibvirtConfigGuestFeaturePAE, self).__init__("pae",
**kwargs)
class LibvirtConfigGuestFeatureHyperV(LibvirtConfigGuestFeature):
# QEMU requires at least this value to be set
MIN_SPINLOCK_RETRIES = 4095
def __init__(self, **kwargs):
super(LibvirtConfigGuestFeatureHyperV, self).__init__("hyperv",
**kwargs)
self.relaxed = False
self.vapic = False
self.spinlocks = False
self.spinlock_retries = self.MIN_SPINLOCK_RETRIES
def format_dom(self):
root = super(LibvirtConfigGuestFeatureHyperV, self).format_dom()
if self.relaxed:
root.append(etree.Element("relaxed", state="on"))
if self.vapic:
root.append(etree.Element("vapic", state="on"))
if self.spinlocks:
root.append(etree.Element("spinlocks", state="on",
retries=str(self.spinlock_retries)))
return root
class LibvirtConfigGuest(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuest, self).__init__(root_name="domain",
**kwargs)
self.virt_type = None
self.uuid = None
self.name = None
self.memory = 500 * units.Mi
self.membacking = None
self.memtune = None
self.numatune = None
self.vcpus = 1
self.cpuset = None
self.cpu = None
self.cputune = None
self.features = []
self.clock = None
self.sysinfo = None
self.os_type = None
self.os_loader = None
self.os_kernel = None
self.os_initrd = None
self.os_cmdline = None
self.os_root = None
self.os_init_path = None
self.os_boot_dev = []
self.os_smbios = None
self.os_mach_type = None
self.devices = []
self.metadata = []
self.idmaps = []
def _format_basic_props(self, root):
root.append(self._text_node("uuid", self.uuid))
root.append(self._text_node("name", self.name))
root.append(self._text_node("memory", self.memory))
if self.membacking is not None:
root.append(self.membacking.format_dom())
if self.memtune is not None:
root.append(self.memtune.format_dom())
if self.numatune is not None:
root.append(self.numatune.format_dom())
if self.cpuset is not None:
vcpu = self._text_node("vcpu", self.vcpus)
vcpu.set("cpuset", hardware.format_cpu_spec(self.cpuset))
root.append(vcpu)
else:
root.append(self._text_node("vcpu", self.vcpus))
if len(self.metadata) > 0:
metadata = etree.Element("metadata")
for m in self.metadata:
metadata.append(m.format_dom())
root.append(metadata)
def _format_os(self, root):
os = etree.Element("os")
type_node = self._text_node("type", self.os_type)
if self.os_mach_type is not None:
type_node.set("machine", self.os_mach_type)
os.append(type_node)
if self.os_kernel is not None:
os.append(self._text_node("kernel", self.os_kernel))
if self.os_loader is not None:
os.append(self._text_node("loader", self.os_loader))
if self.os_initrd is not None:
os.append(self._text_node("initrd", self.os_initrd))
if self.os_cmdline is not None:
os.append(self._text_node("cmdline", self.os_cmdline))
if self.os_root is not None:
os.append(self._text_node("root", self.os_root))
if self.os_init_path is not None:
os.append(self._text_node("init", self.os_init_path))
for boot_dev in self.os_boot_dev:
os.append(etree.Element("boot", dev=boot_dev))
if self.os_smbios is not None:
os.append(self.os_smbios.format_dom())
root.append(os)
def _format_features(self, root):
if len(self.features) > 0:
features = etree.Element("features")
for feat in self.features:
features.append(feat.format_dom())
root.append(features)
def _format_devices(self, root):
if len(self.devices) == 0:
return
devices = etree.Element("devices")
for dev in self.devices:
devices.append(dev.format_dom())
root.append(devices)
def _format_idmaps(self, root):
if len(self.idmaps) == 0:
return
idmaps = etree.Element("idmap")
for idmap in self.idmaps:
idmaps.append(idmap.format_dom())
root.append(idmaps)
def format_dom(self):
root = super(LibvirtConfigGuest, self).format_dom()
root.set("type", self.virt_type)
self._format_basic_props(root)
if self.sysinfo is not None:
root.append(self.sysinfo.format_dom())
self._format_os(root)
self._format_features(root)
if self.cputune is not None:
root.append(self.cputune.format_dom())
if self.clock is not None:
root.append(self.clock.format_dom())
if self.cpu is not None:
root.append(self.cpu.format_dom())
self._format_devices(root)
self._format_idmaps(root)
return root
def parse_dom(self, xmldoc):
# Note: This cover only for: LibvirtConfigGuestDisks
# LibvirtConfigGuestHostdevPCI
# LibvirtConfigGuestUidMap
# LibvirtConfigGuestGidMap
# LibvirtConfigGuestCPU
for c in xmldoc.getchildren():
if c.tag == 'devices':
for d in c.getchildren():
if d.tag == 'disk':
obj = LibvirtConfigGuestDisk()
obj.parse_dom(d)
self.devices.append(obj)
elif d.tag == 'hostdev' and d.get('type') == 'pci':
obj = LibvirtConfigGuestHostdevPCI()
obj.parse_dom(d)
self.devices.append(obj)
if c.tag == 'idmap':
for map in c.getchildren():
obj = None
if map.tag == 'uid':
obj = LibvirtConfigGuestUIDMap()
elif map.tag == 'gid':
obj = LibvirtConfigGuestGIDMap()
if obj:
obj.parse_dom(map)
self.idmaps.append(obj)
elif c.tag == 'cpu':
obj = LibvirtConfigGuestCPU()
obj.parse_dom(c)
self.cpu = obj
def add_device(self, dev):
self.devices.append(dev)
def set_clock(self, clk):
self.clock = clk
class LibvirtConfigGuestSnapshot(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSnapshot, self).__init__(
root_name="domainsnapshot",
**kwargs)
self.name = None
self.disks = []
def format_dom(self):
ss = super(LibvirtConfigGuestSnapshot, self).format_dom()
if self.name:
ss.append(self._text_node("name", self.name))
disks = etree.Element('disks')
for disk in self.disks:
disks.append(disk.format_dom())
ss.append(disks)
return ss
def add_disk(self, disk):
self.disks.append(disk)
class LibvirtConfigNodeDevice(LibvirtConfigObject):
"""Libvirt Node Devices parser."""
def __init__(self, **kwargs):
super(LibvirtConfigNodeDevice, self).__init__(root_name="device",
**kwargs)
self.name = None
self.parent = None
self.driver = None
self.pci_capability = None
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDevice, self).parse_dom(xmldoc)
for c in xmldoc.getchildren():
if c.tag == "name":
self.name = c.text
elif c.tag == "parent":
self.parent = c.text
elif c.tag == "capability" and c.get("type") == 'pci':
pcicap = LibvirtConfigNodeDevicePciCap()
pcicap.parse_dom(c)
self.pci_capability = pcicap
class LibvirtConfigNodeDevicePciCap(LibvirtConfigObject):
"""Libvirt Node Devices pci capability parser."""
def __init__(self, **kwargs):
super(LibvirtConfigNodeDevicePciCap, self).__init__(
root_name="capability", **kwargs)
self.domain = None
self.bus = None
self.slot = None
self.function = None
self.product = None
self.product_id = None
self.vendor = None
self.vendor_id = None
self.numa_node = None
self.fun_capability = list()
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDevicePciCap, self).parse_dom(xmldoc)
for c in xmldoc.getchildren():
if c.tag == "domain":
self.domain = int(c.text)
elif c.tag == "slot":
self.slot = int(c.text)
elif c.tag == "bus":
self.bus = int(c.text)
elif c.tag == "function":
self.function = int(c.text)
elif c.tag == "product":
self.product = c.text
self.product_id = int(c.get('id'), 16)
elif c.tag == "vendor":
self.vendor = c.text
self.vendor_id = int(c.get('id'), 16)
elif c.tag == "numa":
self.numa_node = int(c.get('node'))
elif c.tag == "capability" and c.get('type') in \
('virt_functions', 'phys_function'):
funcap = LibvirtConfigNodeDevicePciSubFunctionCap()
funcap.parse_dom(c)
self.fun_capability.append(funcap)
class LibvirtConfigNodeDevicePciSubFunctionCap(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigNodeDevicePciSubFunctionCap, self).__init__(
root_name="capability", **kwargs)
self.type = None
self.device_addrs = list() # list of tuple (domain,bus,slot,function)
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDevicePciSubFunctionCap, self).parse_dom(xmldoc)
self.type = xmldoc.get("type")
for c in xmldoc.getchildren():
if c.tag == "address":
self.device_addrs.append((int(c.get('domain'), 16),
int(c.get('bus'), 16),
int(c.get('slot'), 16),
int(c.get('function'), 16)))
class LibvirtConfigGuestRng(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestRng, self).__init__(root_name="rng",
**kwargs)
self.model = 'random'
self.backend = None
self.rate_period = None
self.rate_bytes = None
def format_dom(self):
dev = super(LibvirtConfigGuestRng, self).format_dom()
dev.set('model', 'virtio')
backend = etree.Element("backend")
backend.set("model", self.model)
backend.text = self.backend
if self.rate_period and self.rate_bytes:
rate = etree.Element("rate")
rate.set("period", str(self.rate_period))
rate.set("bytes", str(self.rate_bytes))
dev.append(rate)
dev.append(backend)
return dev
class LibvirtConfigGuestMetaNovaInstance(LibvirtConfigObject):
def __init__(self):
super(LibvirtConfigGuestMetaNovaInstance,
self).__init__(root_name="instance",
ns_prefix="nova",
ns_uri=NOVA_NS)
self.package = None
self.flavor = None
self.name = None
self.creationTime = None
self.owner = None
self.roottype = None
self.rootid = None
def format_dom(self):
meta = super(LibvirtConfigGuestMetaNovaInstance, self).format_dom()
pkg = self._new_node("package")
pkg.set("version", self.package)
meta.append(pkg)
if self.name is not None:
meta.append(self._text_node("name", self.name))
if self.creationTime is not None:
timestr = time.strftime("%Y-%m-%d %H:%M:%S",
time.gmtime(self.creationTime))
meta.append(self._text_node("creationTime", timestr))
if self.flavor is not None:
meta.append(self.flavor.format_dom())
if self.owner is not None:
meta.append(self.owner.format_dom())
if self.roottype is not None and self.rootid is not None:
root = self._new_node("root")
root.set("type", self.roottype)
root.set("uuid", str(self.rootid))
meta.append(root)
return meta
class LibvirtConfigGuestMetaNovaFlavor(LibvirtConfigObject):
def __init__(self):
super(LibvirtConfigGuestMetaNovaFlavor,
self).__init__(root_name="flavor",
ns_prefix="nova",
ns_uri=NOVA_NS)
self.name = None
self.memory = None
self.disk = None
self.swap = None
self.ephemeral = None
self.vcpus = None
def format_dom(self):
meta = super(LibvirtConfigGuestMetaNovaFlavor, self).format_dom()
meta.set("name", self.name)
if self.memory is not None:
meta.append(self._text_node("memory", str(self.memory)))
if self.disk is not None:
meta.append(self._text_node("disk", str(self.disk)))
if self.swap is not None:
meta.append(self._text_node("swap", str(self.swap)))
if self.ephemeral is not None:
meta.append(self._text_node("ephemeral", str(self.ephemeral)))
if self.vcpus is not None:
meta.append(self._text_node("vcpus", str(self.vcpus)))
return meta
class LibvirtConfigGuestMetaNovaOwner(LibvirtConfigObject):
def __init__(self):
super(LibvirtConfigGuestMetaNovaOwner,
self).__init__(root_name="owner",
ns_prefix="nova",
ns_uri=NOVA_NS)
self.userid = None
self.username = None
self.projectid = None
self.projectname = None
def format_dom(self):
meta = super(LibvirtConfigGuestMetaNovaOwner, self).format_dom()
if self.userid is not None and self.username is not None:
user = self._text_node("user", self.username)
user.set("uuid", self.userid)
meta.append(user)
if self.projectid is not None and self.projectname is not None:
project = self._text_node("project", self.projectname)
project.set("uuid", self.projectid)
meta.append(project)
return meta
|
|
"""Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Martino Sorbaro <[email protected]>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0,
n_jobs=1):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
X = check_array(X)
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile),
n_jobs=n_jobs)
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (np.linalg.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
.. versionadded:: 0.17
Parallel Execution using *n_jobs*.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
For an example, see :ref:`examples/cluster/plot_mean_shift.py
<sphx_glr_auto_examples_cluster_plot_mean_shift.py>`.
"""
if bandwidth is None:
bandwidth = estimate_bandwidth(X, n_jobs=n_jobs)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=n_jobs).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth,
n_jobs=n_jobs).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1, n_jobs=n_jobs).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will tend
towards O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
y : Ignored
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
|
|
"""
SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
*not* do certificate checking; you need to do additional work to validate
certificates yourself.
This needs the following packages installed:
* pyOpenSSL (tested with 16.0.0)
* cryptography (minimum 1.3.4, from pyopenssl)
* idna (minimum 2.0, from cryptography)
However, pyopenssl depends on cryptography, which depends on idna, so while we
use all three directly here we end up having relatively few packages required.
You can install them with the following command:
pip install pyopenssl cryptography idna
To activate certificate checking, call
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
before you begin making HTTP requests. This can be done in a ``sitecustomize``
module, or at any other time before your application begins using ``urllib3``,
like this::
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
Activating this module also has the positive side effect of disabling SSL/TLS
compression in Python 2 (see `CRIME attack`_).
If you want to configure the default list of supported cipher suites, you can
set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
"""
from __future__ import absolute_import
import OpenSSL.SSL
from cryptography import x509
from cryptography.hazmat.backends.openssl import backend as openssl_backend
from cryptography.hazmat.backends.openssl.x509 import _Certificate
try:
from cryptography.x509 import UnsupportedExtension
except ImportError:
# UnsupportedExtension is gone in cryptography >= 2.1.0
class UnsupportedExtension(Exception):
pass
from socket import timeout, error as SocketError
from io import BytesIO
try: # Platform-specific: Python 2
from socket import _fileobject
except ImportError: # Platform-specific: Python 3
_fileobject = None
from ..packages.backports.makefile import backport_makefile
import logging
import ssl
from ..packages import six
import sys
from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
# SNI always works.
HAS_SNI = True
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'):
_openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'):
_openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
try:
_openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
except AttributeError:
pass
_stdlib_to_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED:
OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
_openssl_to_stdlib_verify = dict(
(v, k) for k, v in _stdlib_to_openssl_verify.items()
)
# OpenSSL will only write 16K at a time
SSL_WRITE_BLOCKSIZE = 16384
orig_util_HAS_SNI = util.HAS_SNI
orig_util_SSLContext = util.ssl_.SSLContext
log = logging.getLogger(__name__)
def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
_validate_dependencies_met()
util.ssl_.SSLContext = PyOpenSSLContext
util.HAS_SNI = HAS_SNI
util.ssl_.HAS_SNI = HAS_SNI
util.IS_PYOPENSSL = True
util.ssl_.IS_PYOPENSSL = True
def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.'
util.ssl_.SSLContext = orig_util_SSLContext
util.HAS_SNI = orig_util_HAS_SNI
util.ssl_.HAS_SNI = orig_util_HAS_SNI
util.IS_PYOPENSSL = False
util.ssl_.IS_PYOPENSSL = False
def _validate_dependencies_met():
"""
Verifies that PyOpenSSL's package-level dependencies have been met.
Throws `ImportError` if they are not met.
"""
# Method added in `cryptography==1.1`; not available in older versions
from cryptography.x509.extensions import Extensions
if getattr(Extensions, "get_extension_for_class", None) is None:
raise ImportError("'cryptography' module missing required functionality. "
"Try upgrading to v1.3.4 or newer.")
# pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509
# attribute is only present on those versions.
from OpenSSL.crypto import X509
x509 = X509()
if getattr(x509, "_x509", None) is None:
raise ImportError("'pyOpenSSL' module missing required functionality. "
"Try upgrading to v0.14 or newer.")
def _dnsname_to_stdlib(name):
"""
Converts a dNSName SubjectAlternativeName field to the form used by the
standard library on the given Python version.
Cryptography produces a dNSName as a unicode string that was idna-decoded
from ASCII bytes. We need to idna-encode that string to get it back, and
then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib
uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8).
If the name cannot be idna-encoded then we return None signalling that
the name given should be skipped.
"""
def idna_encode(name):
"""
Borrowed wholesale from the Python Cryptography Project. It turns out
that we can't just safely call `idna.encode`: it can explode for
wildcard names. This avoids that problem.
"""
import idna
try:
for prefix in [u'*.', u'.']:
if name.startswith(prefix):
name = name[len(prefix):]
return prefix.encode('ascii') + idna.encode(name)
return idna.encode(name)
except idna.core.IDNAError:
return None
if ':' in name:
return name
name = idna_encode(name)
if name is None:
return None
elif sys.version_info >= (3, 0):
name = name.decode('utf-8')
return name
def get_subj_alt_name(peer_cert):
"""
Given an PyOpenSSL certificate, provides all the subject alternative names.
"""
# Pass the cert to cryptography, which has much better APIs for this.
if hasattr(peer_cert, "to_cryptography"):
cert = peer_cert.to_cryptography()
else:
# This is technically using private APIs, but should work across all
# relevant versions before PyOpenSSL got a proper API for this.
cert = _Certificate(openssl_backend, peer_cert._x509)
# We want to find the SAN extension. Ask Cryptography to locate it (it's
# faster than looping in Python)
try:
ext = cert.extensions.get_extension_for_class(
x509.SubjectAlternativeName
).value
except x509.ExtensionNotFound:
# No such extension, return the empty list.
return []
except (x509.DuplicateExtension, UnsupportedExtension,
x509.UnsupportedGeneralNameType, UnicodeError) as e:
# A problem has been found with the quality of the certificate. Assume
# no SAN field is present.
log.warning(
"A problem was encountered with the certificate that prevented "
"urllib3 from finding the SubjectAlternativeName field. This can "
"affect certificate validation. The error was %s",
e,
)
return []
# We want to return dNSName and iPAddress fields. We need to cast the IPs
# back to strings because the match_hostname function wants them as
# strings.
# Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8
# decoded. This is pretty frustrating, but that's what the standard library
# does with certificates, and so we need to attempt to do the same.
# We also want to skip over names which cannot be idna encoded.
names = [
('DNS', name) for name in map(_dnsname_to_stdlib, ext.get_values_for_type(x509.DNSName))
if name is not None
]
names.extend(
('IP Address', str(name))
for name in ext.get_values_for_type(x509.IPAddress)
)
return names
class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.
Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
collector of pypy.
'''
def __init__(self, connection, socket, suppress_ragged_eofs=True):
self.connection = connection
self.socket = socket
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
self._closed = False
def fileno(self):
return self.socket.fileno()
# Copy-pasted from Python 3.5 source code
def _decref_socketios(self):
if self._makefile_refs > 0:
self._makefile_refs -= 1
if self._closed:
self.close()
def recv(self, *args, **kwargs):
try:
data = self.connection.recv(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return b''
else:
raise SocketError(str(e))
except OpenSSL.SSL.ZeroReturnError:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return b''
else:
raise
except OpenSSL.SSL.WantReadError:
if not util.wait_for_read(self.socket, self.socket.gettimeout()):
raise timeout('The read operation timed out')
else:
return self.recv(*args, **kwargs)
else:
return data
def recv_into(self, *args, **kwargs):
try:
return self.connection.recv_into(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return 0
else:
raise SocketError(str(e))
except OpenSSL.SSL.ZeroReturnError:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return 0
else:
raise
except OpenSSL.SSL.WantReadError:
if not util.wait_for_read(self.socket, self.socket.gettimeout()):
raise timeout('The read operation timed out')
else:
return self.recv_into(*args, **kwargs)
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def _send_until_done(self, data):
while True:
try:
return self.connection.send(data)
except OpenSSL.SSL.WantWriteError:
if not util.wait_for_write(self.socket, self.socket.gettimeout()):
raise timeout()
continue
except OpenSSL.SSL.SysCallError as e:
raise SocketError(str(e))
def sendall(self, data):
total_sent = 0
while total_sent < len(data):
sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
total_sent += sent
def shutdown(self):
# FIXME rethrow compatible exceptions should we ever use this
self.connection.shutdown()
def close(self):
if self._makefile_refs < 1:
try:
self._closed = True
return self.connection.close()
except OpenSSL.SSL.Error:
return
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
x509)
return {
'subject': (
(('commonName', x509.get_subject().CN),),
),
'subjectAltName': get_subj_alt_name(x509)
}
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
if _fileobject: # Platform-specific: Python 2
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
else: # Platform-specific: Python 3
makefile = backport_makefile
WrappedSocket.makefile = makefile
class PyOpenSSLContext(object):
"""
I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible
for translating the interface of the standard library ``SSLContext`` object
to calls into PyOpenSSL.
"""
def __init__(self, protocol):
self.protocol = _openssl_versions[protocol]
self._ctx = OpenSSL.SSL.Context(self.protocol)
self._options = 0
self.check_hostname = False
@property
def options(self):
return self._options
@options.setter
def options(self, value):
self._options = value
self._ctx.set_options(value)
@property
def verify_mode(self):
return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()]
@verify_mode.setter
def verify_mode(self, value):
self._ctx.set_verify(
_stdlib_to_openssl_verify[value],
_verify_callback
)
def set_default_verify_paths(self):
self._ctx.set_default_verify_paths()
def set_ciphers(self, ciphers):
if isinstance(ciphers, six.text_type):
ciphers = ciphers.encode('utf-8')
self._ctx.set_cipher_list(ciphers)
def load_verify_locations(self, cafile=None, capath=None, cadata=None):
if cafile is not None:
cafile = cafile.encode('utf-8')
if capath is not None:
capath = capath.encode('utf-8')
self._ctx.load_verify_locations(cafile, capath)
if cadata is not None:
self._ctx.load_verify_locations(BytesIO(cadata))
def load_cert_chain(self, certfile, keyfile=None, password=None):
self._ctx.use_certificate_chain_file(certfile)
if password is not None:
self._ctx.set_passwd_cb(lambda max_length, prompt_twice, userdata: password)
self._ctx.use_privatekey_file(keyfile or certfile)
def wrap_socket(self, sock, server_side=False,
do_handshake_on_connect=True, suppress_ragged_eofs=True,
server_hostname=None):
cnx = OpenSSL.SSL.Connection(self._ctx, sock)
if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3
server_hostname = server_hostname.encode('utf-8')
if server_hostname is not None:
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
if not util.wait_for_read(sock, sock.gettimeout()):
raise timeout('select timed out')
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake: %r' % e)
break
return WrappedSocket(cnx, sock)
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
|
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import numpy as np
import operator
from fate_arch.federation import segment_transfer_enabled
from federatedml.secureprotol.encrypt import Encrypt
from federatedml.util import LOGGER
class TransferableWeights(metaclass=segment_transfer_enabled()):
def __init__(self, weights, cls, *args, **kwargs):
self._weights = weights
self._cls = cls
if args:
self._args = args
if kwargs:
self._kwargs = kwargs
def with_degree(self, degree):
setattr(self, "_degree", degree)
return self
def get_degree(self, default=None):
return getattr(self, "_degree", default)
@property
def unboxed(self):
return self._weights
@property
def weights(self):
if not hasattr(self, "_args") and not hasattr(self, "_kwargs"):
return self._cls(self._weights)
else:
args = self._args if hasattr(self, "_args") else ()
kwargs = self._kwargs if hasattr(self, "_kwargs") else {}
return self._cls(self._weights, *args, **kwargs)
class Weights(metaclass=segment_transfer_enabled()):
def __init__(self, l):
self._weights = l
def for_remote(self):
return TransferableWeights(self._weights, self.__class__)
@property
def unboxed(self):
return self._weights
@abc.abstractmethod
def map_values(self, func, inplace):
pass
@abc.abstractmethod
def binary_op(self, other, func, inplace):
pass
@abc.abstractmethod
def axpy(self, a, y):
pass
def decrypted(self, cipher: Encrypt, inplace=True):
return self.map_values(cipher.decrypt, inplace=inplace)
def encrypted(self, cipher: Encrypt, inplace=True):
return self.map_values(cipher.encrypt, inplace=inplace)
def __imul__(self, other):
return self.map_values(lambda x: x * other, inplace=True)
def __mul__(self, other):
return self.map_values(lambda x: x * other, inplace=False)
def __iadd__(self, other):
return self.binary_op(other, operator.add, inplace=True)
def __add__(self, other):
LOGGER.debug("In binary_op0, _w: {}".format(self._weights))
return self.binary_op(other, operator.add, inplace=False)
def __isub__(self, other):
return self.binary_op(other, operator.sub, inplace=True)
def __sub__(self, other):
return self.binary_op(other, operator.sub, inplace=False)
def __truediv__(self, other):
return self.map_values(lambda x: x / other, inplace=False)
def __itruediv__(self, other):
return self.map_values(lambda x: x / other, inplace=True)
class NumericWeights(Weights):
def __init__(self, v):
super().__init__(v)
def map_values(self, func, inplace):
v = func(self._weights)
if inplace:
self._weights = v
return self
else:
return NumericWeights(v)
def binary_op(self, other: 'NumpyWeights', func, inplace):
v = func(self._weights, other._weights)
if inplace:
self._weights = v
return self
else:
return NumericWeights(v)
def axpy(self, a, y: 'NumpyWeights'):
self._weights = self._weights + a * y._weights
return self
class ListWeights(Weights):
def __init__(self, l):
super().__init__(l)
def map_values(self, func, inplace):
if inplace:
for k, v in enumerate(self._weights):
self._weights[k] = func(v)
return self
else:
_w = []
for v in self._weights:
_w.append(func(v))
return ListWeights(_w)
def binary_op(self, other: 'ListWeights', func, inplace):
if inplace:
for k, v in enumerate(self._weights):
self._weights[k] = func(self._weights[k], other._weights[k])
return self
else:
_w = []
for k, v in enumerate(self._weights):
_w.append(func(self._weights[k], other._weights[k]))
return ListWeights(_w)
def axpy(self, a, y: 'ListWeights'):
for k, v in enumerate(self._weights):
self._weights[k] += a * y._weights[k]
return self
class DictWeights(Weights):
def __init__(self, d):
super().__init__(d)
def map_values(self, func, inplace):
if inplace:
for k, v in self._weights.items():
self._weights[k] = func(v)
return self
else:
_w = dict()
for k, v in self._weights.items():
_w[k] = func(v)
return DictWeights(_w)
def binary_op(self, other: 'DictWeights', func, inplace):
if inplace:
for k, v in self._weights.items():
self._weights[k] = func(other._weights[k], v)
return self
else:
_w = dict()
for k, v in self._weights.items():
_w[k] = func(other._weights[k], v)
return DictWeights(_w)
def axpy(self, a, y: 'DictWeights'):
for k, v in self._weights.items():
self._weights[k] += a * y._weights[k]
return self
class OrderDictWeights(Weights):
"""
This class provide a dict container same as `DictWeights` but with fixed key order.
This feature is useful in secure aggregation random padding generation, which is order sensitive.
"""
def __init__(self, d):
super().__init__(d)
self.walking_order = sorted(d.keys(), key=str)
def map_values(self, func, inplace):
if inplace:
for k in self.walking_order:
self._weights[k] = func(self._weights[k])
return self
else:
_w = dict()
for k in self.walking_order:
_w[k] = func(self._weights[k])
return OrderDictWeights(_w)
def binary_op(self, other: 'OrderDictWeights', func, inplace):
if inplace:
for k in self.walking_order:
self._weights[k] = func(other._weights[k], self._weights[k])
return self
else:
_w = dict()
for k in self.walking_order:
_w[k] = func(other._weights[k], self._weights[k])
return OrderDictWeights(_w)
def axpy(self, a, y: 'OrderDictWeights'):
for k in self.walking_order:
self._weights[k] += a * y._weights[k]
return self
class NumpyWeights(Weights):
def __init__(self, arr):
super().__init__(arr)
def map_values(self, func, inplace):
if inplace:
size = self._weights.size
view = self._weights.view().reshape(size)
for i in range(size):
view[i] = func(view[i])
return self
else:
vec_func = np.vectorize(func)
weights = vec_func(self._weights)
return NumpyWeights(weights)
def binary_op(self, other: 'NumpyWeights', func, inplace):
if inplace:
size = self._weights.size
view = self._weights.view().reshape(size)
view_other = other._weights.view().reshpae(size)
for i in range(size):
view[i] = func(view[i], view_other[i])
return self
else:
vec_func = np.vectorize(func)
weights = vec_func(self._weights, other._weights)
return NumpyWeights(weights)
def axpy(self, a, y: 'NumpyWeights'):
size = self._weights.size
view = self._weights.view().reshape(size)
view_other = y._weights.view().reshpae(size)
for i in range(size):
view[i] += a * view_other[i]
return self
|
|
# coding: utf-8
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Dimitri Desvillechabrol <[email protected]>,
# <[email protected]>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
"""Module to write coverage report"""
import os
import pandas as pd
from sequana import bedtools
from sequana.modules_report.base_module import SequanaBaseModule
from sequana.utils import config
from sequana.utils.datatables_js import DataTable, DataTableFunction
from sequana.plots.canvasjs_linegraph import CanvasJSLineGraph
from sequana.modules_report.summary import SummaryModule
import colorlog
logger = colorlog.getLogger(__name__)
__all__ = ["CoverageModule", "ChromosomeCoverageModule"]
class CoverageModule(SequanaBaseModule):
"""Write HTML report of coverage analysis. This class takes either a
:class:`GenomeCov` instances or a csv file where analysis are stored.
"""
def __init__(self, data, region_window=200000):
""".. rubric:: constructor
:param data: it can be a csv filename created by sequana_coverage or a
:class:`bedtools.GenomeCov` object.
:param region_window:
"""
super().__init__()
self.region_window = region_window
if isinstance(data, bedtools.GenomeCov):
self.bed = data
else:
raise TypeError
try:
html_list = self.create_chromosome_reports()
except TypeError:
msg = (
"Data must be either a csv file or a :class:`GenomeCov` "
"instance where zscore is computed."
)
raise TypeError(msg)
self.title = "Main coverage report ({0})".format(config.sample_name)
self.intro = (
"<p>Report the coverage of your sample ({0}) to check the "
"quality of your mapping and to highlight regions of "
"interest (under and over covered).</p>".format(config.sample_name)
)
self.create_report_content(html_list)
self.create_html("sequana_coverage.html")
def create_report_content(self, html_list):
self.sections = list()
self.create_chromosome_table(html_list)
def create_chromosome_table(self, html_list):
"""Create table with links to chromosome reports"""
df = pd.DataFrame(
[
[chrom.chrom_name, chrom.get_size(), chrom.DOC, chrom.CV, page]
for chrom, page in zip(self.bed.chr_list, html_list)
],
columns=["chromosome", "size", "mean_coverage", "coef_variation", "link"],
)
datatable = DataTable(df, "chrom")
datatable.datatable.datatable_options = {
"pageLength": 15,
"dom": "Bfrtip",
"buttons": ["copy", "csv"],
}
datatable.datatable.set_links_to_column("link", "chromosome")
js = datatable.create_javascript_function()
html_table = datatable.create_datatable(float_format="%.3g")
self.sections.append(
{
"name": "Chromosomes",
"anchor": "chromosomes",
"content": "<p>Link to coverage analysis report for each chromosome. "
"Size, mean coverage and coefficient of variation are reported"
" in the table below.</p>\n{0}\n{1}".format(js, html_table),
}
)
def create_chromosome_reports(self):
"""Create HTML report for each chromosome present in data."""
# FIXME: why bed[0] (i.e. first chromosome)
datatable_js = CoverageModule.init_roi_datatable(self.bed[0])
chrom_output_dir = config.output_dir
if not os.path.exists(chrom_output_dir):
os.makedirs(chrom_output_dir)
page_list = []
for chrom in self.bed:
logger.info("Creating coverage report {}".format(chrom.chrom_name))
chrom_report = ChromosomeCoverageModule(
chrom, datatable_js, region_window=self.region_window
)
page_list.append(chrom_report.html_page)
return page_list
# a static method because we need it in the coverage standalone
# to initiate the datatables
def init_roi_datatable(rois):
"""Initiate :class:`DataTableFunction` to create table to link each
row with sub HTML report. All table will have the same appearance.
We can therefore initialise the roi once for all.
:param rois: can be a ROIs from ChromosomeCov instance or a simple
dataframe
"""
# computed
try:
df = rois.df.copy()
except:
df = rois.copy()
df["link"] = ""
# set datatable options
datatable_js = DataTableFunction(df, "roi")
if "start" in df.columns:
datatable_js.set_links_to_column("link", "start")
if "end" in df.columns:
datatable_js.set_links_to_column("link", "end")
datatable_js.datatable_options = {
"scrollX": "true",
"pageLength": 15,
"scrollCollapse": "true",
"dom": "Bfrtip",
"buttons": ["copy", "csv"],
}
return datatable_js
class ChromosomeCoverageModule(SequanaBaseModule):
"""Write HTML report of coverage analysis for each chromosome. It is
created by CoverageModule.
"""
def __init__(
self, chromosome, datatable, region_window=200000, options=None, command=""
):
"""
:param chromosome:
:param datatable:
:param directory:
:param int region_window: length of the sub coverage plot
:param options: should contain "W", "k", "circular"
"""
super().__init__()
self.region_window = region_window
directory = chromosome.chrom_name
# to define where are css and js
if directory in {None, "."}:
self.path = ""
directory = "."
else:
self.path = "../"
self.chromosome = chromosome
self.datatable = datatable
self.command = command
self.command += "\nSequana version: {}".format(config.version)
self.title = "Coverage analysis of chromosome {0}".format(
self.chromosome.chrom_name
)
self.intro = (
"<p>The genome coverage analysis of the chromosome "
"<b>{0}</b>.</p>".format(self.chromosome.chrom_name)
)
self.create_report_content(directory, options=options)
self.html_page = "{0}{1}{2}.cov.html".format(
directory, os.sep, self.chromosome.chrom_name
)
self.create_html(self.html_page)
def create_report_content(self, directory, options=None):
"""Generate the sections list to fill the HTML report."""
self.sections = list()
if self.chromosome._mode == "memory":
# nothing to do, all computations should be already available
# and in memory
rois = self.chromosome.get_rois()
elif self.chromosome._mode == "chunks":
# we need to reset the data to the first chunk
# and compute the median and zscore. So, first, we save the entire
# data set
# self.chromosome.reset()
# self.chromosome.running_median(options['W'], circular=options['circular'])
# self.chromosome.compute_zscore(options['k'])
# We mus set the ROI manually
rois = options["ROIs"]
if self.chromosome.DOC > 0:
self.coverage_plot()
if self.chromosome._mode == "memory":
links = self.subcoverage(rois, directory)
else:
links = None
else:
links = None
self.basic_stats()
if self.chromosome.DOC:
self.regions_of_interest(rois, links)
self.coverage_barplot()
if "gc" in self.chromosome.df.columns:
self.gc_vs_coverage()
self.normalized_coverage()
self.zscore_distribution()
self.add_command()
def add_command(self):
self.sections.append(
{
"name": "Command",
"anchor": "command",
"content": ("<p>Command used: <pre>{}</pre>.</p>".format(self.command)),
}
)
def _add_large_data_section(self, name, anchor):
self.sections.append(
{
"name": name,
"anchor": anchor,
"content": (
"<p>Large data sets (--chunk-size argument "
"used), skipped plotting.</p>"
),
}
)
def coverage_plot(self):
"""Coverage section."""
if self.chromosome._mode == "chunks":
self._add_large_data_section("Coverage", "coverage")
return
image = self.create_embedded_png(
self.chromosome.plot_coverage, input_arg="filename"
)
self.sections.append(
{
"name": "Coverage",
"anchor": "coverage",
"content": "<p>The following figures shows the per-base coverage along the"
" reference genome (black line). The blue line indicates the "
"running median. From the normalised coverage, we estimate "
"z-scores on a per-base level. The red lines indicates the "
"z-scores at plus or minus N standard deviations, where N is "
"chosen by the user. (default:4). Only a million point are "
"shown. This may explain some visual discrepancies with. </p>\n{0}".format(
image
),
}
)
def coverage_barplot(self):
"""Coverage barplots section."""
if self.chromosome._mode == "chunks":
self._add_large_data_section("Coverage histogram", "cov_barplot")
return
image1 = self.create_embedded_png(
self.chromosome.plot_hist_coverage, input_arg="filename", style="width:45%"
)
image2 = self.create_embedded_png(
self.chromosome.plot_hist_coverage,
input_arg="filename",
style="width:45%",
logx=False,
)
self.sections.append(
{
"name": "Coverage histogram",
"anchor": "cov_barplot",
"content": "<p>The following figures contain the histogram of the genome "
"coverage. The X and Y axis being in log scale in the left panel"
"while only the Y axis is in log scale in the right panel.</p>\n"
"{0}\n{1}".format(image1, image2),
}
)
def subcoverage(self, rois, directory):
"""Create subcoverage reports to have access to a zoomable line plot.
:params rois:
:param directory: directory name for the chromsome
This method create sub reports for each region of 200,000 bases (can be
changed). Usually, it starts at position 0 so reports will be stored
in e.g. for a genome of 2,300,000 bases::
chromosome_name/chromosome_name_0_200000.html
chromosome_name/chromosome_name_200000_400000.html
...
...
chromosome_name/chromosome_name_2000000_2200000.html
chromosome_name/chromosome_name_2200000_2300000.html
Note that if the BED file positions does not start at zero, then
names will take care of that.
"""
# an aliases
W = self.region_window
name = self.chromosome.chrom_name
chrom = self.chromosome
N = len(self.chromosome)
# position does not always start at position zero
shift = self.chromosome.df.pos.iloc[0]
maxpos = self.chromosome.df.pos.iloc[-1]
# create directory
chrom_output_dir = os.sep.join([config.output_dir, str(directory), str(name)])
if not os.path.exists(chrom_output_dir):
os.makedirs(chrom_output_dir)
# create the combobox to link toward different sub coverage
# Here, we should (1) get the length of the data and (2)
# figure out the boundary. Indeed, you can imagine a BED file
# that does not start at position zero, but from POS1>0 to POS2
links = [
"{0}/{0}_{1}_{2}.html".format(name, i, min(i + W, maxpos))
for i in range(shift, shift + N, W)
]
intra_links = (
"{0}_{1}_{2}.html".format(name, i, min(i + W, maxpos))
for i in range(shift, shift + N, W)
)
combobox = self.create_combobox(links, "sub", True)
combobox_intra = self.create_combobox(intra_links, "sub", False)
datatable = self._init_datatable_function(rois)
# break the chromosome as pieces of 200,000 bp
for i in range(shift, shift + N, W):
SubCoverageModule(
chrom, rois, combobox_intra, datatable, i, min(i + W, maxpos), directory
)
self.sections.append(
{"name": "Subcoverage", "anchor": "subcoverage", "content": combobox}
)
return links
def _init_datatable_function(self, rois):
"""Initiate :class:`DataTableFunction` to create table to link each
row with sub HTML report. All table will have the same appearance. So,
let's initiate its only once.
"""
datatable_js = DataTableFunction(rois.df, "roi")
datatable_js.datatable_options = {
"scrollX": "true",
"pageLength": 15,
"scrollCollapse": "true",
"dom": "Bfrtip",
"buttons": ["copy", "csv"],
}
return datatable_js
def basic_stats(self):
"""Basics statistics section."""
li = "<li><b>{0}</b> ({1}): {2:.2f}</li>"
stats = self.chromosome.get_stats()
description = {
"BOC": "breadth of coverage: the proportion (in %s) "
+ " of the genome covered by at least one read.",
"CV": "the coefficient of variation.",
"DOC": "the sequencing depth (Depth of Coverage), that is the "
+ "average the genome coverage.",
"MAD": "median of the absolute median deviation defined as median(|X-median(X)|).",
"Median": "Median of the coverage.",
"STD": "standard deviation.",
"GC": "GC content (%)",
}
data = [
li.format(key, description[key], stats[key])
for key in ["BOC", "CV", "DOC", "MAD", "Median", "STD", "GC"]
if key in stats.keys()
]
stats = "<ul>{0}</ul>".format("\n".join(data))
self.sections.append(
{
"name": "Basic stats",
"anchor": "basic_stats",
"content": "<p>Here are some basic statistics about the "
"genome coverage.</p>\n{0}".format(stats),
}
)
def regions_of_interest(self, rois, links):
"""Region of interest section."""
def connect_link(x):
for link in links:
_, x1, x2 = link.rsplit(os.sep)[1].rstrip(".html").rsplit("_", 2)
x1 = int(x1)
x2 = int(x2)
if x >= x1 and x <= x2:
return link
# for the case where the data is fully stored in memory, we must
# find all events !
if self.chromosome._mode == "memory" and self.chromosome.binning == 1:
raise Exception("{} position not in the range of reports".format(x))
if links:
links_list = [connect_link(n) for n in rois.df["start"]]
else:
links_list = [None for n in rois.df["start"]]
rois.df["link"] = links_list
# create datatable
low_roi = rois.get_low_rois()
high_roi = rois.get_high_rois()
datatable = CoverageModule.init_roi_datatable(low_roi)
datatable.set_links_to_column("link", "chr")
js = datatable.create_javascript_function()
lroi = DataTable(low_roi, "lroi", datatable)
hroi = DataTable(high_roi, "hroi", datatable)
html_low_roi = lroi.create_datatable(float_format="%.3g")
html_high_roi = hroi.create_datatable(float_format="%.3g")
rois.df.drop("link", 1, inplace=True)
roi_paragraph = (
"<p>Regions with a z-score {0}er than {1:.2f} and at "
"least one base with a z-score {0}er than {2:.2f} are detected."
"There are {3} {0} regions of interest."
"</p>"
)
low_paragraph = roi_paragraph.format(
"low",
self.chromosome.thresholds.low2,
self.chromosome.thresholds.low,
len(low_roi),
)
high_paragraph = roi_paragraph.format(
"high",
self.chromosome.thresholds.high2,
self.chromosome.thresholds.high,
len(high_roi),
)
self.sections.append(
{
"name": "Regions Of Interest (ROI)",
"anchor": "roi",
"content": "{4}\n"
"<p>The following tables give regions of "
"interest detected by sequana. Here are the definitions of the "
"columns:</p>\n"
"<ul><li>mean_cov: the average of coverage</li>\n"
"<li>mean_rm: the average of running median</li>\n"
"<li>mean_zscore: the average of zscore</li>\n"
"<li>max_zscore: the higher zscore contains in the region</li>"
"</ul>\n"
"<h3>Low coverage region</h3>\n{0}\n{1}\n"
"<h3>High coverage region</h3>\n{2}\n{3}\n".format(
low_paragraph, html_low_roi, high_paragraph, html_high_roi, js
),
}
)
def gc_vs_coverage(self):
"""3 dimensional plot of GC content versus coverage."""
image = self.create_embedded_png(
self.chromosome.plot_gc_vs_coverage, input_arg="filename"
)
corr = self.chromosome.get_gc_correlation()
self.sections.append(
{
"name": "Coverage vs GC content",
"anchor": "cov_vs_gc",
"content": "<p>The correlation coefficient between the coverage and GC "
"content is <b>{0:.3g}</b> with a window size of {1}bp.</p>\n"
"{2}\n"
"<p>Note: the correlation coefficient has to be between -1.0 "
"and 1.0. A coefficient of 0 means no correlation, while a "
"coefficient of -1 or 1 means an existing "
"correlation between GC and Coverage</p>".format(
corr, self.chromosome.bed.gc_window_size, image
),
}
)
def normalized_coverage(self):
"""Barplot of normalized coverage section."""
if self.chromosome._mode == "chunks":
self._add_large_data_section("Normalised coverage", "normalised_coverage")
return
image = self.create_embedded_png(
self.chromosome.plot_hist_normalized_coverage, input_arg="filename"
)
self.sections.append(
{
"name": "Normalised coverage",
"anchor": "normalised_coverage",
"content": "<p>Distribution of the normalised coverage with predicted "
"Gaussian. The red line should be followed the trend of the "
"barplot.</p>\n{0}".format(image),
}
)
def zscore_distribution(self):
"""Barplot of zscore distribution section."""
if self.chromosome._mode == "chunks":
self._add_large_data_section("Z-Score distribution", "zscore_distribution")
return
image = self.create_embedded_png(
self.chromosome.plot_hist_zscore, input_arg="filename"
)
self.sections.append(
{
"name": "Z-Score distribution",
"anchor": "zscore_distribution",
"content": "<p>Distribution of the z-score (normalised coverage); You "
"should see a Gaussian distribution centered around 0. The "
"estimated parameters are mu={0:.2f} and sigma={1:.2f}.</p>\n"
"{2}".format(
self.chromosome.best_gaussian["mu"],
self.chromosome.best_gaussian["sigma"],
image,
),
}
)
class SubCoverageModule(SequanaBaseModule):
"""Write HTML report of subsection of chromosome with a javascript
coverage plot.
"""
def __init__(self, chromosome, rois, combobox, datatable, start, stop, directory):
super().__init__()
if directory == ".":
self.path = "../"
else:
self.path = "../../"
self.chromosome = chromosome
self.rois = rois
self.combobox = combobox
self.datatable = datatable
self.start = start
self.stop = stop
self.title = (
"Coverage analysis of chromosome {0}<br>"
"positions {1} and {2}".format(self.chromosome.chrom_name, start, stop)
)
self.create_report_content()
self.create_html(
"{0}{4}{1}{4}{1}_{2}_{3}.html".format(
directory, self.chromosome.chrom_name, start, stop, os.sep
)
)
def create_report_content(self):
"""Generate the sections list to fill the HTML report."""
self.sections = list()
self.canvasjs_line_plot()
self.regions_of_interest()
def canvasjs_line_plot(self):
"""Create the CanvasJS line plot section."""
# set column of interest and create the csv
x_col = "pos"
y_col = ("cov", "mapq0", "gc")
columns = self.chromosome.df.columns
y_col = [n for n in y_col if n in columns]
csv = self.chromosome.to_csv(
start=self.start,
stop=self.stop,
columns=[x_col] + y_col,
index=False,
float_format="%.3g",
)
# create CanvasJS stuff
cjs = CanvasJSLineGraph(csv, "cov", x_col, y_col)
# set options
cjs.set_options(
{"zoomEnabled": "true", "zoomType": "x", "exportEnabled": "true"}
)
# set title
cjs.set_title("Genome Coverage")
# set legend
cjs.set_legend(
{
"verticalAlign": "bottom",
"horizontalAlign": "center",
"cursor": "pointer",
},
hide_on_click=True,
)
# set axis
cjs.set_axis_x(
{
"title": "Position (bp)",
"labelAngle": 30,
"minimum": self.start,
"maximum": self.stop,
}
)
cjs.set_axis_y({"title": "Coverage (Count)"})
cjs.set_axis_y2(
{
"title": "GC content (ratio)",
"minimum": 0,
"maximum": 1,
"lineColor": "#FFC425",
"titleFontColor": "#FFC425",
"labelFontColor": "#FFC425",
}
)
# set datas
cjs.set_data(
index=0,
data_dict={
"type": "line",
"name": "Coverage",
"showInLegend": "true",
"color": "#5BC0DE",
"lineColor": "#5BC0DE",
},
)
try:
i = y_col.index("mapq0")
cjs.set_data(
index=i,
data_dict={
"type": "line",
"name": "Filtered coverage",
"showInLegend": "true",
"color": "#D9534F",
"lineColor": "#D9534F",
},
)
except ValueError:
pass
try:
i = y_col.index("gc")
cjs.set_data(
index=i,
data_dict={
"type": "line",
"axisYType": "secondary",
"name": "GC content",
"showInLegend": "true",
"color": "#FFC425",
"lineColor": "#FFC425",
},
)
except ValueError:
pass
# create canvasJS
html_cjs = cjs.create_canvasjs()
self.sections.append(
{
"name": "Interactive coverage plot",
"anchor": "iplot",
"content": ("{0}{1}\n".format(self.combobox, html_cjs)),
}
)
def regions_of_interest(self):
"""Region of interest section."""
subseq = [self.start, self.stop]
low_roi = self.rois.get_low_rois(subseq)
high_roi = self.rois.get_high_rois(subseq)
js = self.datatable.create_javascript_function()
lroi = DataTable(low_roi, "lroi", self.datatable)
hroi = DataTable(high_roi, "hroi", self.datatable)
html_low_roi = lroi.create_datatable(float_format="%.3g")
html_high_roi = hroi.create_datatable(float_format="%.3g")
roi_paragraph = (
"<p>Regions with a z-score {0}er than {1:.2f} and at "
"least one base with a z-score {0}er than {2:.2f} are detected as "
"{0} coverage region. Thus, there are {3} {0} coverage regions "
"between the position {4} and the position {5}</p>"
)
low_paragraph = roi_paragraph.format(
"low",
self.chromosome.thresholds.low2,
self.chromosome.thresholds.low,
len(low_roi),
self.start,
self.stop,
)
high_paragraph = roi_paragraph.format(
"high",
self.chromosome.thresholds.high2,
self.chromosome.thresholds.high,
len(high_roi),
self.start,
self.stop,
)
self.sections.append(
{
"name": "Regions Of Interest (ROI)",
"anchor": "roi",
"content": "{4}\n"
"<p>Running median is the median computed along the genome "
"using a sliding window. The following tables give regions of "
"interest detected by sequana. Here are some definition of the "
"table's columns:</p>\n"
"<ul><li><b>mean_cov</b>: the average of coverage</li>\n"
"<li><b>mean_rm</b>: the average of running median</li>\n"
"<li><b>mean_zscore</b>: the average of zscore</li>\n"
"<li><b>max_zscore</b>: the higher zscore contains in the "
"region</li>\n"
"<li><b>log2_ratio</b>:log2(mean_cov/mean_rm)</li></ul>\n"
"<h3>Low coverage region</h3>\n{0}\n{1}\n"
"<h3>High coverage region</h3>\n{2}\n{3}\n".format(
low_paragraph, html_low_roi, high_paragraph, html_high_roi, js
),
}
)
|
|
import sys
import sqlalchemy
class Field(sqlalchemy.Column):
""" Base class for all field types
"""
def __get__(self, obj, obj_type):
if obj is None:
return self
return getattr(obj, self._apply_suffix(self.name), self.default)
def __set__(self, obj, value):
self._check_type(self, value)
setattr(obj, self._apply_suffix(self.name), value)
def __repr__(self):
return self._repr()
def _repr(self, **extra_kw):
kwargs = ['name']
if self.key != self.name:
kwargs.append('key')
if self.primary_key:
kwargs.append('primary_key')
if not self.nullable:
kwargs.append('nullable')
if self.onupdate:
kwargs.append('onupdate')
if self.default:
kwargs.append('default')
if self.server_default:
kwargs.append('server_default')
kwargs = {k: repr(getattr(self, k)) for k in kwargs}
kwargs.update({k: repr(v) for k, v in extra_kw.items()})
args = ['%s=%s' % (k, v) for k, v in kwargs.items()]
cls_name = self.__class__.__name__
return "%s(%s)" % (cls_name, ', '.join(args))
def _check_type(self, obj, value):
python_type = obj.type.python_type
if value is not None and not isinstance(value, python_type):
name = self.name
detail = (
"`%s` should be of type `%s`, " % (name, python_type.__name__),
"it was found to be of type `%s`" % type(value).__name__,
)
detail = ''.join(detail)
raise ValueError(detail)
@staticmethod
def _apply_suffix(raw_str):
return "_prop__%s" % raw_str
class StringField(Field):
"""
Example
-------
Reprents VARCHAR in the database.
class Person(BaseModel):
name = StringField()
Most databases would require a length variable as well.
class Person(BaseModel):
name = StringField(100)
See sqlalchemy.String for further documentation on the arguments
accepted.
"""
def __init__(self, length=None, *args, **kwargs):
if length is None:
_type = sqlalchemy.String
else:
_type = sqlalchemy.String(length)
kwargs['type_'] = _type
super(StringField, self).__init__(**kwargs)
def __repr__(self):
kwargs = {}
length = self.type.length
if length is not None:
kwargs['length'] = length
return self._repr(**kwargs)
def __set__(self, obj, value):
"""Sanitize `value` for Python 2
In Python 2 `str` and `unicode` are different, sqlalchemy reads data
`str` as `unicode` from the DB which fails check.
"""
if sys.version_info < (3, 0):
if isinstance(value, unicode):
value = str(value)
super(StringField, self).__set__(obj, value)
class IntegerField(Field):
"""
Represents int in the database.
See sqlalchemy.Integer for further documentation
"""
def __init__(self, *args, **kwargs):
kwargs['type_'] = sqlalchemy.Integer
super(IntegerField, self).__init__(**kwargs)
class BooleanField(Field):
"""
Represents bool in the database.
See sqlalchemy.Boolean for further information.
"""
def __init__(self, *args, **kwargs):
kwargs['type_'] = sqlalchemy.Boolean
super(BooleanField, self).__init__(**kwargs)
class FloatField(Field):
"""
Represents float or real field in the database
See sqlalchemy.Float for further documentation
"""
def __init__(self, *args, **kwargs):
kwargs['type_'] = sqlalchemy.Float
super(FloatField, self).__init__(**kwargs)
class DateTimeField(Field):
"""
Represents float or real field in the database
See sqlalchemy.DateTime for further documentation
"""
def __init__(self, *args, **kwargs):
kwargs['type_'] = sqlalchemy.DateTime
super(DateTimeField, self).__init__(**kwargs)
class ReferenceField(sqlalchemy.ForeignKeyConstraint):
"""
Represents ForeignKey constraint.
>>> class Parent(Model):
name = StringField()
>>> class Child(Model):
name = StringField()
parent = ReferenceField(Parent)
To access:
>>> parent = Parent(name='parent')
parent.save()
>>> child = Child(name='child', parent=parent)
child.save()
`print(child.parent.name)` should output 'parent'
See sqlalchemy.ForeignKeyConstraint for further documentation.
"""
def __init__(self, reference, *args, **kwargs):
self.reference = reference
ref_name = reference.__name__
ref_fk_columns = reference.get_key_name()
columns = list(self.get_fk_columns().keys())
refcolumns = ['%s.%s' % (ref_name, c) for c in ref_fk_columns]
super(ReferenceField, self).__init__(columns, refcolumns, **kwargs)
def __get__(self, obj, obj_type):
if obj is None:
return self
cache_name = 'cache_%s' % self.name
if hasattr(obj, cache_name):
return getattr(obj, cache_name)
reference = self.reference
query = reference.select()
columns = self.get_fk_columns().keys()
fk_columns = reference.get_key_name()
for column, fk_column in zip(columns, fk_columns):
value = getattr(obj, column)
query.where(getattr(reference, fk_column) == value)
_object = query.get()
setattr(obj, cache_name, _object)
return _object
def __set__(self, obj, value):
columns = self.get_fk_columns().keys()
fk_columns = self.reference.get_key_name()
for column, fk_column in zip(columns, fk_columns):
setattr(obj, column, getattr(value, fk_column))
def get_fk_columns(self, reference=None):
"""
reference:
The reference table. If reference is None self.reference
will be used
"""
reference = reference or self.reference
columns = reference.get_columns()
reference_name = reference.__name__.lower()
return {
self.apply_prefix(reference_name, c): columns[c].__class__()
for c in reference.get_key_name()
}
@staticmethod
def apply_prefix(*args):
return "fk_%s" % '_'.join(args)
|
|
import json
from normality import ascii_text
from pprint import pprint # noqa
from aleph.core import url_for, es, es_index, schemata
from aleph.index import TYPE_ENTITY, TYPE_DOCUMENT
from aleph.search.util import execute_basic
from aleph.search.fragments import match_all, filter_query, multi_match
from aleph.search.fragments import add_filter, aggregate, authz_filter
from aleph.search.facet import parse_facet_result
DEFAULT_FIELDS = ['collection_id', 'roles', 'dataset', 'name', 'data',
'countries', 'schema', 'schemata', 'properties',
'fingerprints', 'state']
def entities_query(state, fields=None, facets=True, doc_counts=False):
"""Parse a user query string, compose and execute a query."""
if state.has_text:
q = {
"query_string": {
"query": state.text,
"fields": ['name^5', 'names^2', 'text'],
"default_operator": "AND",
"use_dis_max": True
}
}
else:
q = match_all()
if state.raw_query:
q = {"bool": {"must": [q, state.raw_query]}}
q = authz_filter(q, state.authz, roles=True)
aggs = {'scoped': {'global': {}, 'aggs': {}}}
if facets:
facets = list(state.facet_names)
if 'collections' in facets:
aggs = facet_collections(state, q, aggs)
facets.remove('collections')
aggs = aggregate(state, q, aggs, facets)
if state.sort == 'doc_count':
sort = [{'doc_count': 'desc'}, '_score']
elif state.sort == 'score':
sort = ['_score', {'name_sort': 'asc'}]
else:
sort = [{'name_sort': 'asc'}]
# pprint(q)
q = {
'sort': sort,
'query': filter_query(q, state.filters),
'aggregations': aggs,
'size': state.limit,
'from': state.offset,
'_source': fields or DEFAULT_FIELDS
}
result, hits, output = execute_basic(TYPE_ENTITY, q)
output['facets'] = parse_facet_result(state, result)
sub_queries = []
for doc in hits.get('hits', []):
entity = doc.get('_source')
entity['id'] = doc.get('_id')
entity['score'] = doc.get('_score')
entity['api_url'] = url_for('entities_api.view', id=doc.get('_id'))
output['results'].append(entity)
sq = {'term': {'entities.id': entity['id']}}
sq = add_filter(sq, {
'terms': {'collection_id': state.authz.collections_read}
})
sq = {'size': 0, 'query': sq}
sub_queries.append(json.dumps({}))
sub_queries.append(json.dumps(sq))
if doc_counts and len(sub_queries):
# Get the number of matching documents for each entity.
body = '\n'.join(sub_queries)
res = es.msearch(index=es_index, doc_type=TYPE_DOCUMENT, body=body)
for (entity, res) in zip(output['results'], res.get('responses')):
entity['doc_count'] = res.get('hits', {}).get('total')
return output
def load_entity(entity_id):
"""Load a single entity by ID."""
result = es.get(index=es_index, doc_type=TYPE_ENTITY, id=entity_id,
ignore=[404])
if result.get('found') is False:
return
entity = result.get('_source')
entity.pop('text', None)
entity['id'] = result.get('_id')
return entity
def facet_collections(state, q, aggs):
filters = state.filters
filters['collection_id'] = state.authz.collections_read
aggs['scoped']['aggs']['collections'] = {
'filter': {
'query': filter_query(q, filters)
},
'aggs': {
'collections': {
'terms': {'field': 'collection_id', 'size': state.facet_size}
}
}
}
return aggs
def suggest_entities(prefix, authz, min_count=0, schemas=None, size=5):
"""Auto-complete API."""
options = []
if prefix is not None and len(prefix.strip()):
q = {
'match_phrase_prefix': {'name': prefix.strip()}
}
if min_count > 0:
q = add_filter(q, {'range': {'doc_count': {'gte': min_count}}})
if schemas is not None and len(schemas):
q = add_filter(q, {'terms': {'$schema': schemas}})
# TODO: is this correct? should we allow filter by dataset entities?
q = add_filter(q, {'terms': {'collection_id': authz.collections_read}})
q = {
'size': size,
'sort': [{'doc_count': 'desc'}, '_score'],
'query': q,
'_source': ['name', 'schema', 'fingerprints', 'doc_count']
}
ref = ascii_text(prefix)
result = es.search(index=es_index, doc_type=TYPE_ENTITY, body=q)
for res in result.get('hits', {}).get('hits', []):
ent = res.get('_source')
terms = [ascii_text(t) for t in ent.pop('fingerprints', [])]
ent['match'] = ref in terms
ent['score'] = res.get('_score')
ent['id'] = res.get('_id')
options.append(ent)
return {
'prefix': prefix,
'results': options
}
def similar_entities(entity, state):
"""Merge suggestions API."""
required = []
boosters = []
must = None
entity_ids = entity.get('ids') or [entity.get('id')]
# search for fingerprints
for fp in entity.get('fingerprints', []):
required.append(multi_match(fp, ['fingerprints'], 1))
if not state.getbool('strict', False):
# broaden search to similar names
for name in entity.get('names', []):
required.append(multi_match(name, ['names', 'text'], 1))
# make it mandatory to have either a fingerprint or name match
must = {"bool": {"should": required, "minimum_should_match": 1}}
# boost by "contributing criteria"
for field in ['dates', 'countries', 'addresses', 'schemata']:
for val in entity.get(field, []):
boosters.append(multi_match(val, [field]))
# filter types which cannot be resolved via fuzzy matching.
nonfuzzy = [s.name for s in schemata if not s.fuzzy]
state.raw_query = {
"bool": {
"should": boosters,
"must": must,
"must_not": [
{"ids": {"values": entity_ids}},
{"terms": {"schema": nonfuzzy}},
]
}
}
# pprint(state.raw_query)
return entities_query(state)
def get_dataset_countries(dataset_name):
"""Create a list of the top 300 countries mentioned in a dataset."""
q = {'term': {'dataset': dataset_name}}
aggs = {'countries': {'terms': {'field': 'countries', 'size': 300}}}
q = {'size': 0, 'query': q, 'aggregations': aggs}
result = es.search(index=es_index, doc_type=TYPE_ENTITY, body=q)
result = result.get('aggregations', {}).get('countries', {})
return [b.get('key') for b in result.get('buckets', [])]
|
|
#!/usr/bin/python
__author__ = 'vilelag'
import os
import argparse
import itertools
from multiprocessing import Pool
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib import patheffects
def create_parsers():
#parser for the main program
parser = argparse.ArgumentParser(description='Create plots from logs generated by compute-accuracy or SE-Test1.py.'
' But it is preferable the usage of make_logs.py to get those logs. It'
' works with logs of a single draw or averaged logs.')
parser.add_argument('-se', metavar='<int>', nargs='?', default=0, const=1, type=int,
help='Must be present to use logs generated using "SE-Test1.py" (1 : On, else: Off,'
' Default: Off)')
parser.add_argument('-f', '-folder', metavar='<folder>', default='./Logs/Log0',
help='Folder with logs generated by "run_dics.py')
parser.add_argument('-gdl', metavar='<file>', default='./Logs/out.csv',
help='csv generated by "generate_dics.py"')
parser.add_argument('-o', '-out', metavar='<folder>', default='./Plots',
help="Folder where the plots will be generated")
parser.add_argument('-t', '-threads', metavar='<int>', nargs=1, default=[8], type=int,
help='Use <int> threads (default 8)')
parser.add_argument('-2Ds', metavar='[<int>]', default=0, const=1, type=int, nargs='?',
help="If present in the case of only one change, a plot for each class will be generated"
" (1=On, else = Off; Default=Off)")
parser.add_argument('-3D', metavar='[<int>]', default=0, const=1, type=int, nargs='?',
help="If present 3D plots will be produced (1=On, else = Off; Default=Off)")
parser.add_argument('-rot', metavar='[<int>]', default=0, const=1, type=int, nargs='?',
help="If present rotations of 3D plots will be produced (1=On, else = Off; Default=Off)")
parser.add_argument('-cm', '-ca_mean', metavar='<int>', nargs='?', default=0, const=1, type=int,
help='Must be present to use logs generated from averaging ca or se logs (1 : On, else: Off,'
' Default: Off)')
return parser
def create_output_folder(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def read_log(log):
with open(log) as f:
content = f.read().splitlines()
file_name = content[0]
data = dict()
for i in range(1, len(content)-1, 3):
kind = content[i][:-1]
top1 = float(content[i+1].split(" ")[2])
line3 = content[i+2].split(" ")
ta = float(line3[2])
sem = float(line3[8])
syn = float(line3[14])
data[kind] = [top1, ta, sem, syn]
# Adding a plot for the average!
flag = 0
mean = []
for key in data:
if flag == 0:
mean = list(data[key])
flag = 1
else:
for i in range(len(mean)):
mean[i] += data[key][i]
for i in range(len(mean)):
mean[i] /= len(data)
data['Average'] = mean
ll = content[-1].split(" ")
dic2 = {'seen': ll[4], 'total': ll[5]}
return data, dic2
def read_log_ca_mean(log):
with open(log) as f:
content = f.read().splitlines()
data = dict()
for i in range(1, len(content), 1):
_split = content[i].split(',')
kind = _split[0]
_data = [float(n) for n in _split[1:]]
data[kind] = _data
# Adding a plot for the average!
flag = 0
mean = []
for key in data:
if flag == 0:
mean = list(data[key])
flag = 1
else:
for i in range(len(mean)):
mean[i] += data[key][i]
for i in range(len(mean)):
mean[i] /= len(data)
data['Average'] = mean
return data
def read_se_log(log):
with open(log) as f:
content = f.read().splitlines()
data = dict()
for case in content[1:]:
tmp = case.split(',')
data[tmp[0]] = [float(tmp[1])]
return data
def read_csv(csv):
with open(csv) as f:
content = f.read().splitlines()
csv_data = dict()
for i in range(1, len(content)):
tmp = content[i].split(';')
name = os.path.splitext(os.path.basename(tmp[0]))[0]
size = int(tmp[4])
window = int(tmp[5])
sample = float(tmp[6])
hs = int(tmp[7])
negative = int(tmp[8])
min_count = int(tmp[10])
alpha = float(tmp[11])
cbow = int(tmp[15])
csv_data[name] = [size, window, sample, hs, negative, min_count, alpha, cbow]
return csv_data
def check_changes(llist):
flags = [0 for i in range(len(llist[0]))]
elem = [llist[0][i] for i in range(len(llist[0]))]
for w in llist:
for i in range(len(w)):
if elem[i] != w[i]:
flags[i] = 1
changes = []
for i in range(len(flags)):
if flags[i] == 1:
changes.append(i)
return changes
def get_possible_values(llist, pos):
changes = dict()
for row in llist:
try:
if changes[row[pos]] == 1:
pass
except KeyError:
changes[row[pos]] = 1
changes = sorted(changes.keys())
return changes
def get_sub_dic(dic, pos, val):
new_dic = dict()
for k in sorted(dic):
if dic[k][pos] == val:
new_dic[k] = dic[k]
return new_dic
def plot_gen(c, changes, csv_data, log_data, out, se):
t_out = out+'/'+names[c[0]]+'/'+names[c[1]]
create_output_folder(t_out)
t_changes = []
for i in changes:
if i != c[0] and i != c[1]:
t_changes.append(i)
all_values = [get_possible_values(csv_data.values(), el) for el in t_changes]
all_values_comb = []
if len(all_values) == 1:
all_values_comb = list(itertools.product(all_values[0]))
elif len(all_values) == 2:
all_values_comb = list(itertools.product(all_values[0], all_values[1]))
elif len(all_values) == 3:
all_values_comb = list(itertools.product(all_values[0], all_values[1], all_values[2]))
elif len(all_values) == 4:
all_values_comb = list(itertools.product(all_values[0], all_values[1], all_values[2], all_values[3]))
elif len(all_values) == 5:
all_values_comb = list(itertools.product(all_values[0], all_values[1], all_values[2], all_values[3],
all_values[4]))
elif len(all_values) == 6:
all_values_comb = list(itertools.product(all_values[0], all_values[1], all_values[2], all_values[3],
all_values[4], all_values[5]))
#The next loop can be optimized
count = 0
for tp in all_values_comb:
tmp_csv = csv_data
for i in range(len(tp)):
tmp_csv = get_sub_dic(tmp_csv, t_changes[i], tp[i])
plot3d_gen([c[0], c[1]], tmp_csv, log_data, t_out, se, count=str(count)+'_')
count += 1
print c
def init_plot_gen(plot_gen_args):
return plot_gen(*plot_gen_args)
def plot_percent(var, pos=0):
return '{0:2.1f} %'.format(var)
def plot2d_gen(pos, csv_data, log_data, folder, se, flag=0):
X = []
Y = dict()
for k in sorted(csv_data):
X.append(csv_data[k][pos])
for t, data in log_data[k].items():
try:
Y[t] = np.append(Y[t], [data], axis=0)
except KeyError:
Y[t] = np.array([data])
x = np.array(X)
#Joint plot
plt.figure(figsize=(16, 9))
# plt.subplot(111)
num_plots = len(Y.keys())
markers = itertools.cycle(['o', "v", '^', '<', '>', 's', '*', 'x', 'd'])
colormap = plt.cm.get_cmap('rainbow')
plt.gca().set_color_cycle([colormap(i) for i in np.linspace(0, 1, num_plots)])
for k in sorted(Y):
y = Y[k][:, 0]
plt.plot(x, y, label=k, marker=markers.next())
add_table(csv_data, [pos])
plt.xlabel(names[pos])
if se != 0:
plt.ylabel("Spearman's rho")
plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda x, pos=0: "{0:.3f} ".format(x)))
else:
plt.ylabel('Accuracy TOP1')
plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda x, pos=0: "{0:.1f} \%".format(x)))
plt.grid(True)
lgd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0)
lgd.get_frame().set_alpha(0.4)
# plt.savefig(folder+'/'+names[pos], bbox_extra_artists=(lgd,), bbox_inches='tight', transparent=False)
plt.savefig(folder+'/'+names[pos], bbox_inches='tight', transparent=False)
plt.close()
#Separated Plots
if flag != 0:
num_plots = len(Y.keys())
markers = itertools.cycle(['o', "v", '^', '<', '>', 's', '*', 'x', 'd'])
colormap = plt.cm.get_cmap('rainbow')
colors = itertools.cycle([colormap(i) for i in np.linspace(0, 1, num_plots)])
for k in sorted(Y):
print k
y = Y[k][:, 0]
plt.figure(figsize=(16, 9))
plt.plot(x, y, label=k, marker=markers.next(), color=colors.next())
plt.xlabel(names[pos])
if se != 0:
plt.ylabel("Spearman's rho")
plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda x, pos=0: "{0:.3f} ".format(x)))
else:
plt.ylabel('Accuracy TOP1')
plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda x, pos=0: "{0:.1f} \%".format(x)))
plt.grid(True)
plt.title(k)
add_table(csv_data, [pos])
plt.savefig(folder+'/'+names[pos]+'_'+k+'.png', bbox_inches='tight', transparent=False)
plt.close()
def plot3d_gen(changes, csv_data, log_data, folder, se, rot=0, flag3D=0, count=''):
xdic = dict()
ydic = dict()
zdic = dict()
for k in sorted(csv_data):
t_x = csv_data[k][changes[0]]
t_y = csv_data[k][changes[1]]
try:
xdic[t_x].append(t_x)
except KeyError:
xdic[t_x] = [t_x]
try:
ydic[t_y].append(t_y)
except KeyError:
ydic[t_y] = [t_y]
for t, data in log_data[k].items():
try:
zdic[t][(t_x, t_y)] = data
except KeyError:
zdic[t] = {(t_x, t_y): data}
x = [xdic[i] for i in sorted(xdic)]
y = [ydic[i] for i in sorted(ydic)]
x = np.array(x).T
y = np.array(y)
z_ = dict()
z = dict()
for ty in zdic:
z_[ty] = []
for k in sorted(zdic[ty]):
z_[ty].append(zdic[ty][k][0])
z[ty] = np.array(z_[ty])
z[ty] = z[ty].reshape((x.shape[1], x.shape[0])).T
for k in sorted(z):
fig = plt.figure(figsize=(16, 9))
if flag3D == 1:
ax = fig.add_subplot(211, projection='3d')
ax.plot_surface(x, y, z[k], rstride=1, cstride=1, cmap=cm.get_cmap('bwr'))
if se != 0:
ax.set_zlabel("Spearman's rho")
plt.gca().zaxis.set_major_formatter(FuncFormatter(lambda x, pos=0: "{0:.3f} ".format(x)))
else:
ax.set_zlabel('Accuracy TOP1')
plt.gca().zaxis.set_major_formatter(FuncFormatter(plot_percent))
else:
ax = fig.add_subplot(111)
if se != 0:
cax = ax.imshow(z[k], cmap=cm.jet, interpolation='None', origin='lower',
extent=[x[0][0], x[0][-1], y[0][0], y[-1][0]], aspect='auto', vmin=-1, vmax=1)
bar = fig.colorbar(cax, format='%.3f')
bar.set_label("Spearman's rho")
else:
cax = ax.imshow(z[k], cmap=cm.jet, interpolation='None', origin='lower',
extent=[x[0][0], x[0][-1], y[0][0], y[-1][0]], aspect='auto', vmin=0, vmax=100)
bar = fig.colorbar(cax, format='%.1f%%')
bar.set_label('Accuracy TOP1')
a1 = len(z[k])
a2 = len(z[k][0])
for i in xrange(a1):
for j in xrange(a2):
ax.text(float(j+0.5)/a2, float(i+0.5)/a1, '{0:.3}'.format(z[k][i][j]),
size='medium', ha='center', va='center', transform=ax.transAxes,
path_effects=[patheffects.withSimplePatchShadow(shadow_rgbFace=(1, 1, 1))])
plt.title(k)
plt.xlabel(names[changes[0]])
plt.ylabel(names[changes[1]])
plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda x, pos=0: "{0:.1E}".format(x)))
plt.gca().xaxis.set_major_formatter(FuncFormatter(lambda x, pos=0: "{0:.1E}".format(x)))
add_table(csv_data, changes)
if rot == 1 and flag3D == 1 and count == '':
create_output_folder('{0}/{1}'.format(folder, k))
for ii in xrange(0, 360, 20):
ax.view_init(elev=30., azim=ii)
plt.savefig(folder+'/'+k+'/{}'.format(ii)+names[changes[0]]+'-'+names[changes[1]]+'_'+k+'.png',
bbox_inches='tight', transparent=False)
else:
plt.savefig(folder+'/'+count+names[changes[0]]+'-'+names[changes[1]]+'_'+k, bbox_inches='tight',
transparent=False)
plt.close()
def add_table(csv_data, changes):
plt.rc('text', usetex=True)
table = '\\begin{tabular}{|'+''.join(['c |'*len(names)]) + '} \\hline '
table += ' & '.join(['\\textbf{{{}}}'.format(n) for n in names]) + '\\\\ \hline '
vals = csv_data.itervalues().next()
vals = [str(i) for i in vals]
for i in changes:
vals[i] = '--'
table += ' & '.join(vals) + '\\\\ \hline '
table += '\\end{tabular}'
plt.figtext(0.3, -0.02, table)
def main():
parser = create_parsers()
args = vars(parser.parse_args())
se = args['se']
gdl = args['gdl']
out = args['o'].rstrip('/')
flag3D = args['3D']
flag2D = args['2Ds']
rot = args['rot']
folder = args['f'].rstrip('/')
threads = args['t'][0]
cm = args['cm']
create_output_folder(out)
csv_data = read_csv(gdl)
log_data = dict()
log_list = csv_data.keys()
for log in log_list:
try:
if se == 0:
if cm == 0:
log_data[log], trash = read_log(folder+'/'+log+'.log')
else:
log_data[log] = read_log_ca_mean(folder+'/'+log+'.log')
else:
n = int(log.split('_')[-1].rstrip('.bin'))
log_data[log] = read_se_log(folder+'/SE-2012/{}/log.csv'.format(n))
except:
print "Couldn't find {0}.log\n".format(log)
log_data.pop(log, None)
csv_data.pop(log, None)
changes = check_changes(csv_data.values())
if len(changes) == 1:
plot2d_gen(changes[0], csv_data, log_data, out, se, flag=flag2D)
elif len(changes) == 2:
plot3d_gen(changes, csv_data, log_data, out, se, rot=rot, flag3D=flag3D)
else:
combinations = [i for i in itertools.combinations(changes, 2)]
pool = Pool(threads)
pool.map(init_plot_gen, itertools.izip(combinations, itertools.repeat(changes), itertools.repeat(csv_data),
itertools.repeat(log_data), itertools.repeat(out), itertools.repeat(se)))
names = ['size', 'window', 'sample', 'hs', 'negative', 'min-count', 'alpha', 'cbow']
main()
|
|
from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyxl
## Incomplete!
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Typed,
Float,
Integer,
Set,
String,
Bool,
)
from openpyxl.descriptors.excel import Guid, ExtensionList
from openpyxl.descriptors.sequence import NestedSequence
from openpyxl.utils.indexed_list import IndexedList
from openpyxl.xml.constants import SHEET_MAIN_NS
from openpyxl.xml.functions import tostring
from openpyxl.cell.text import Text
#from openpyxl.worksheet.ole import ObjectAnchor
from .author import AuthorList
from .comments import Comment
from .shape_writer import ShapeWriter
class Properties(Serialisable):
locked = Bool(allow_none=True)
defaultSize = Bool(allow_none=True)
_print = Bool(allow_none=True)
disabled = Bool(allow_none=True)
uiObject = Bool(allow_none=True)
autoFill = Bool(allow_none=True)
autoLine = Bool(allow_none=True)
altText = String(allow_none=True)
textHAlign = Set(values=(['left', 'center', 'right', 'justify', 'distributed']))
textVAlign = Set(values=(['top', 'center', 'bottom', 'justify', 'distributed']))
lockText = Bool(allow_none=True)
justLastX = Bool(allow_none=True)
autoScale = Bool(allow_none=True)
rowHidden = Bool(allow_none=True)
colHidden = Bool(allow_none=True)
#anchor = Typed(expected_type=ObjectAnchor, )
__elements__ = ('anchor',)
def __init__(self,
locked=None,
defaultSize=None,
_print=None,
disabled=None,
uiObject=None,
autoFill=None,
autoLine=None,
altText=None,
textHAlign=None,
textVAlign=None,
lockText=None,
justLastX=None,
autoScale=None,
rowHidden=None,
colHidden=None,
anchor=None,
):
self.locked = locked
self.defaultSize = defaultSize
self._print = _print
self.disabled = disabled
self.uiObject = uiObject
self.autoFill = autoFill
self.autoLine = autoLine
self.altText = altText
self.textHAlign = textHAlign
self.textVAlign = textVAlign
self.lockText = lockText
self.justLastX = justLastX
self.autoScale = autoScale
self.rowHidden = rowHidden
self.colHidden = colHidden
self.anchor = anchor
class CommentRecord(Serialisable):
tagname = "comment"
ref = String()
authorId = Integer()
guid = Guid(allow_none=True)
shapeId = Integer(allow_none=True)
text = Typed(expected_type=Text)
commentPr = Typed(expected_type=Properties, allow_none=True)
author = String(allow_none=True)
__elements__ = ('text', 'commentPr')
__attrs__ = ('ref', 'authorId', 'guid', 'shapeId')
def __init__(self,
ref="",
authorId=0,
guid=None,
shapeId=0,
text=None,
commentPr=None,
author=None,
height=79,
width=144
):
self.ref = ref
self.authorId = authorId
self.guid = guid
self.shapeId = shapeId
if text is None:
text = Text()
self.text = text
self.commentPr = commentPr
self.author = author
self.height = height
self.width = width
@classmethod
def from_cell(cls, cell):
"""
Class method to convert cell comment
"""
comment = cell._comment
ref = cell.coordinate
self = cls(ref=ref, author=comment.author)
self.text.t = comment.content
self.height = comment.height
self.width = comment.width
return self
@property
def content(self):
"""
Remove all inline formatting and stuff
"""
return self.text.content
class CommentSheet(Serialisable):
tagname = "comments"
authors = Typed(expected_type=AuthorList)
commentList = NestedSequence(expected_type=CommentRecord, count=0)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
_id = None
_path = "/xl/comments/comment{0}.xml"
mime_type = "application/vnd.openxmlformats-officedocument.spreadsheetml.comments+xml"
_rel_type = "comments"
_rel_id = None
__elements__ = ('authors', 'commentList')
def __init__(self,
authors=None,
commentList=None,
extLst=None,
):
self.authors = authors
self.commentList = commentList
def to_tree(self):
tree = super(CommentSheet, self).to_tree()
tree.set("xmlns", SHEET_MAIN_NS)
return tree
@property
def comments(self):
"""
Return a dictionary of comments keyed by coord
"""
authors = self.authors.author
for c in self.commentList:
yield c.ref, Comment(c.content, authors[c.authorId], c.height, c.width)
@classmethod
def from_comments(cls, comments):
"""
Create a comment sheet from a list of comments for a particular worksheet
"""
authors = IndexedList()
# dedupe authors and get indexes
for comment in comments:
comment.authorId = authors.add(comment.author)
return cls(authors=AuthorList(authors), commentList=comments)
def write_shapes(self, vml=None):
"""
Create the VML for comments
"""
sw = ShapeWriter(self.comments)
return sw.write(vml)
@property
def path(self):
"""
Return path within the archive
"""
return self._path.format(self._id)
|
|
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - LikePages action
This action generates a list of pages that either start or end
with the same word as the current pagename. If only one matching
page is found, that page is displayed directly.
@copyright: 2001 Richard Jones <[email protected]>,
2001 Juergen Hermann <[email protected]>
@license: GNU GPL, see COPYING for details.
"""
import re
from MoinMoin import config, wikiutil
from MoinMoin.support import difflib
from MoinMoin.Page import Page
def execute(pagename, request):
_ = request.getText
start, end, matches = findMatches(pagename, request)
# Error?
if isinstance(matches, (str, unicode)):
request.theme.add_msg(wikiutil.escape(matches), "info")
Page(request, pagename).send_page()
return
# No matches
if not matches:
request.theme.add_msg(_('No pages like "%s"!') % (wikiutil.escape(pagename), ), "error")
Page(request, pagename).send_page()
return
# One match - display it
if len(matches) == 1:
request.theme.add_msg(_('Exactly one page like "%s" found, redirecting to page.') % (wikiutil.escape(pagename), ), "info")
Page(request, matches.keys()[0]).send_page()
return
# more than one match, list 'em
# This action generate data using the user language
request.setContentLanguage(request.lang)
request.theme.send_title(_('Pages like "%s"') % (pagename), pagename=pagename)
# Start content - IMPORTANT - without content div, there is no
# direction support!
request.write(request.formatter.startContent("content"))
showMatches(pagename, request, start, end, matches)
# End content and send footer
request.write(request.formatter.endContent())
request.theme.send_footer(pagename)
request.theme.send_closing_html()
def findMatches(pagename, request, s_re=None, e_re=None):
""" Find like pages
@param pagename: name to match
@param request: current reqeust
@param s_re: start re for wiki matching
@param e_re: end re for wiki matching
@rtype: tuple
@return: start word, end word, matches dict
"""
# Get full list of pages, with no filtering - very fast. We will
# first search for like pages, then filter the results.
pages = request.rootpage.getPageList(user='', exists='')
# Remove current page
try:
pages.remove(pagename)
except ValueError:
pass
# Get matches using wiki way, start and end of word
start, end, matches = wikiMatches(pagename, pages, start_re=s_re,
end_re=e_re)
# Get the best 10 close matches
close_matches = {}
found = 0
for name in closeMatches(pagename, pages):
# Skip names already in matches
if name in matches:
continue
# Filter deleted pages or pages the user can't read
page = Page(request, name)
if page.exists() and request.user.may.read(name):
close_matches[name] = 8
found += 1
# Stop after 10 matches
if found == 10:
break
# Filter deleted pages or pages the user can't read from
# matches. Order is important!
for name in matches.keys(): # we need .keys() because we modify the dict
page = Page(request, name)
if not (page.exists() and request.user.may.read(name)):
del matches[name]
# Finally, merge both dicts
matches.update(close_matches)
return start, end, matches
def wikiMatches(pagename, pages, start_re=None, end_re=None):
"""
Get pages that starts or ends with same word as this page
Matches are ranked like this:
4 - page is subpage of pagename
3 - match both start and end
2 - match end
1 - match start
@param pagename: page name to match
@param pages: list of page names
@param start_re: start word re (compile regex)
@param end_re: end word re (compile regex)
@rtype: tuple
@return: start, end, matches dict
"""
if start_re is None:
start_re = re.compile('([%s][%s]+)' % (config.chars_upper,
config.chars_lower))
if end_re is None:
end_re = re.compile('([%s][%s]+)$' % (config.chars_upper,
config.chars_lower))
# If we don't get results with wiki words matching, fall back to
# simple first word and last word, using spaces.
words = pagename.split()
match = start_re.match(pagename)
if match:
start = match.group(1)
else:
start = words[0]
match = end_re.search(pagename)
if match:
end = match.group(1)
else:
end = words[-1]
matches = {}
subpage = pagename + '/'
# Find any matching pages and rank by type of match
for name in pages:
if name.startswith(subpage):
matches[name] = 4
else:
if name.startswith(start):
matches[name] = 1
if name.endswith(end):
matches[name] = matches.get(name, 0) + 2
return start, end, matches
def closeMatches(pagename, pages):
""" Get close matches.
Return all matching pages with rank above cutoff value.
@param pagename: page name to match
@param pages: list of page names
@rtype: list
@return: list of matching pages, sorted by rank
"""
# Match using case insensitive matching
# Make mapping from lowerpages to pages - pages might have same name
# with different case (although its stupid).
lower = {}
for name in pages:
key = name.lower()
if key in lower:
lower[key].append(name)
else:
lower[key] = [name]
# Get all close matches
all_matches = difflib.get_close_matches(pagename.lower(), lower.keys(),
len(lower), cutoff=0.6)
# Replace lower names with original names
matches = []
for name in all_matches:
matches.extend(lower[name])
return matches
def showMatches(pagename, request, start, end, matches, show_count=True):
keys = matches.keys()
keys.sort()
_showMatchGroup(request, matches, keys, 8, pagename, show_count)
_showMatchGroup(request, matches, keys, 4, "%s/..." % pagename, show_count)
_showMatchGroup(request, matches, keys, 3, "%s...%s" % (start, end), show_count)
_showMatchGroup(request, matches, keys, 1, "%s..." % (start, ), show_count)
_showMatchGroup(request, matches, keys, 2, "...%s" % (end, ), show_count)
def _showMatchGroup(request, matches, keys, match, title, show_count=True):
_ = request.getText
matchcount = matches.values().count(match)
if matchcount:
if show_count:
# Render title line
request.write(request.formatter.paragraph(1))
request.write(request.formatter.strong(1))
request.write(request.formatter.text(
_('%(matchcount)d %(matches)s for "%(title)s"') % {
'matchcount': matchcount,
'matches': ' ' + (_('match'), _('matches'))[matchcount != 1],
'title': title}))
request.write(request.formatter.strong(0))
request.write(request.formatter.paragraph(0))
# Render links
request.write(request.formatter.bullet_list(1))
for key in keys:
if matches[key] == match:
request.write(request.formatter.listitem(1))
request.write(request.formatter.pagelink(1, key, generated=True))
request.write(request.formatter.text(key))
request.write(request.formatter.pagelink(0, key, generated=True))
request.write(request.formatter.listitem(0))
request.write(request.formatter.bullet_list(0))
|
|
"""Xdawn implementation."""
# Authors: Alexandre Barachant <[email protected]>
#
# License: BSD (3-clause)
import copy as cp
import numpy as np
from scipy import linalg
from ..io.base import _BaseRaw
from ..epochs import _BaseEpochs
from .. import Covariance, EvokedArray, Evoked, EpochsArray
from ..io.pick import pick_types
from .ica import _get_fast_dot
from ..utils import logger
from ..decoding.mixin import TransformerMixin
from ..cov import _regularized_covariance
from ..channels.channels import ContainsMixin
def _least_square_evoked(data, events, event_id, tmin, tmax, sfreq):
"""Least square estimation of evoked response from data.
Parameters
----------
data : ndarray, shape (n_channels, n_times)
The data to estimates evoked
events : ndarray, shape (n_events, 3)
The events typically returned by the read_events function.
If some events don't match the events of interest as specified
by event_id, they will be ignored.
event_id : dict
The id of the events to consider
tmin : float
Start time before event.
tmax : float
End time after event.
sfreq : float
Sampling frequency.
Returns
-------
evokeds_data : dict of ndarray
A dict of evoked data for each event type in event_id.
toeplitz : dict of ndarray
A dict of toeplitz matrix for each event type in event_id.
"""
nmin = int(tmin * sfreq)
nmax = int(tmax * sfreq)
window = nmax - nmin
n_samples = data.shape[1]
toeplitz_mat = dict()
full_toep = list()
for eid in event_id:
# select events by type
ix_ev = events[:, -1] == event_id[eid]
# build toeplitz matrix
trig = np.zeros((n_samples, 1))
ix_trig = (events[ix_ev, 0]) + nmin
trig[ix_trig] = 1
toep_mat = linalg.toeplitz(trig[0:window], trig)
toeplitz_mat[eid] = toep_mat
full_toep.append(toep_mat)
# Concatenate toeplitz
full_toep = np.concatenate(full_toep)
# least square estimation
predictor = np.dot(linalg.pinv(np.dot(full_toep, full_toep.T)), full_toep)
all_evokeds = np.dot(predictor, data.T)
all_evokeds = np.vsplit(all_evokeds, len(event_id))
# parse evoked response
evoked_data = dict()
for idx, eid in enumerate(event_id):
evoked_data[eid] = all_evokeds[idx].T
return evoked_data, toeplitz_mat
def _check_overlapp(epochs):
"""check if events are overlapped."""
isi = np.diff(epochs.events[:, 0])
window = int((epochs.tmax - epochs.tmin) * epochs.info['sfreq'])
# Events are overlapped if the minimal inter-stimulus interval is smaller
# than the time window.
return isi.min() < window
def _construct_signal_from_epochs(epochs):
"""Reconstruct pseudo continuous signal from epochs."""
start = (np.min(epochs.events[:, 0]) +
int(epochs.tmin * epochs.info['sfreq']))
stop = (np.max(epochs.events[:, 0]) +
int(epochs.tmax * epochs.info['sfreq']) + 1)
n_samples = stop - start
epochs_data = epochs.get_data()
n_epochs, n_channels, n_times = epochs_data.shape
events_pos = epochs.events[:, 0] - epochs.events[0, 0]
data = np.zeros((n_channels, n_samples))
for idx in range(n_epochs):
onset = events_pos[idx]
offset = onset + n_times
data[:, onset:offset] = epochs_data[idx]
return data
def least_square_evoked(epochs, return_toeplitz=False):
"""Least square estimation of evoked response from a Epochs instance.
Parameters
----------
epochs : Epochs instance
An instance of Epochs.
return_toeplitz : bool (default False)
If true, compute the toeplitz matrix.
Returns
-------
evokeds : dict of evoked instance
An dict of evoked instance for each event type in epochs.event_id.
toeplitz : dict of ndarray
If return_toeplitz is true, return the toeplitz matrix for each event
type in epochs.event_id.
"""
if not isinstance(epochs, _BaseEpochs):
raise ValueError('epochs must be an instance of `mne.Epochs`')
events = epochs.events.copy()
events[:, 0] -= events[0, 0] + int(epochs.tmin * epochs.info['sfreq'])
data = _construct_signal_from_epochs(epochs)
evoked_data, toeplitz = _least_square_evoked(data, events, epochs.event_id,
tmin=epochs.tmin,
tmax=epochs.tmax,
sfreq=epochs.info['sfreq'])
evokeds = dict()
info = cp.deepcopy(epochs.info)
for name, data in evoked_data.items():
n_events = len(events[events[:, 2] == epochs.event_id[name]])
evoked = EvokedArray(data, info, tmin=epochs.tmin,
comment=name, nave=n_events)
evokeds[name] = evoked
if return_toeplitz:
return evokeds, toeplitz
return evokeds
class Xdawn(TransformerMixin, ContainsMixin):
"""Implementation of the Xdawn Algorithm.
Xdawn is a spatial filtering method designed to improve the signal
to signal + noise ratio (SSNR) of the ERP responses. Xdawn was originaly
designed for P300 evoked potential by enhancing the target response with
respect to the non-target response. This implementation is a generalization
to any type of ERP.
Parameters
----------
n_components : int (default 2)
The number of components to decompose M/EEG signals.
signal_cov : None | Covariance | ndarray, shape (n_channels, n_channels)
(default None). The signal covariance used for whitening of the data.
if None, the covariance is estimated from the epochs signal.
correct_overlap : 'auto' or bool (default 'auto')
Apply correction for overlaped ERP for the estimation of evokeds
responses. if 'auto', the overlapp correction is chosen in function
of the events in epochs.events.
reg : float | str | None (default None)
if not None, allow regularization for covariance estimation
if float, shrinkage covariance is used (0 <= shrinkage <= 1).
if str, optimal shrinkage using Ledoit-Wolf Shrinkage ('ledoit_wolf')
or Oracle Approximating Shrinkage ('oas').
Attributes
----------
filters_ : dict of ndarray
If fit, the Xdawn components used to decompose the data for each event
type, else empty.
patterns_ : dict of ndarray
If fit, the Xdawn patterns used to restore M/EEG signals for each event
type, else empty.
evokeds_ : dict of evoked instance
If fit, the evoked response for each event type.
Notes
-----
.. versionadded:: 0.10
See Also
--------
ICA
CSP
References
----------
[1] Rivet, B., Souloumiac, A., Attina, V., & Gibert, G. (2009). xDAWN
algorithm to enhance evoked potentials: application to brain-computer
interface. Biomedical Engineering, IEEE Transactions on, 56(8), 2035-2043.
[2] Rivet, B., Cecotti, H., Souloumiac, A., Maby, E., & Mattout, J. (2011,
August). Theoretical analysis of xDAWN algorithm: application to an
efficient sensor selection in a P300 BCI. In Signal Processing Conference,
2011 19th European (pp. 1382-1386). IEEE.
"""
def __init__(self, n_components=2, signal_cov=None, correct_overlap='auto',
reg=None):
"""init xdawn."""
self.n_components = n_components
self.signal_cov = signal_cov
self.reg = reg
self.filters_ = dict()
self.patterns_ = dict()
self.evokeds_ = dict()
if correct_overlap not in ['auto', True, False]:
raise ValueError('correct_overlap must be a bool or "auto"')
self.correct_overlap = correct_overlap
def fit(self, epochs, y=None):
"""Fit Xdawn from epochs.
Parameters
----------
epochs : Epochs object
An instance of Epoch on which Xdawn filters will be trained.
y : ndarray | None (default None)
Not used, here for compatibility with decoding API.
Returns
-------
self : Xdawn instance
The Xdawn instance.
"""
if self.correct_overlap == 'auto':
self.correct_overlap = _check_overlapp(epochs)
# Extract signal covariance
if self.signal_cov is None:
if self.correct_overlap:
sig_data = _construct_signal_from_epochs(epochs)
else:
sig_data = np.hstack(epochs.get_data())
self.signal_cov_ = _regularized_covariance(sig_data, self.reg)
elif isinstance(self.signal_cov, Covariance):
self.signal_cov_ = self.signal_cov.data
elif isinstance(self.signal_cov, np.ndarray):
self.signal_cov_ = self.signal_cov
else:
raise ValueError('signal_cov must be None, a covariance instance '
'or a ndarray')
# estimates evoked covariance
self.evokeds_cov_ = dict()
if self.correct_overlap:
if epochs.baseline is not None:
raise ValueError('Baseline correction must be None if overlap '
'correction activated')
evokeds, toeplitz = least_square_evoked(epochs,
return_toeplitz=True)
else:
evokeds = dict()
toeplitz = dict()
for eid in epochs.event_id:
evokeds[eid] = epochs[eid].average()
toeplitz[eid] = 1.0
self.evokeds_ = evokeds
for eid in epochs.event_id:
data = np.dot(evokeds[eid].data, toeplitz[eid])
self.evokeds_cov_[eid] = _regularized_covariance(data, self.reg)
# estimates spatial filters
for eid in epochs.event_id:
if self.signal_cov_.shape != self.evokeds_cov_[eid].shape:
raise ValueError('Size of signal cov must be the same as the'
' number of channels in epochs')
evals, evecs = linalg.eigh(self.evokeds_cov_[eid],
self.signal_cov_)
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
evecs /= np.sqrt(np.sum(evecs ** 2, axis=0))
self.filters_[eid] = evecs
self.patterns_[eid] = linalg.inv(evecs.T)
# store some values
self.ch_names = epochs.ch_names
self.exclude = list(range(self.n_components, len(self.ch_names)))
self.event_id = epochs.event_id
return self
def transform(self, epochs):
"""Apply Xdawn dim reduction.
Parameters
----------
epochs : Epochs | ndarray, shape (n_epochs, n_channels, n_times)
Data on which Xdawn filters will be applied.
Returns
-------
X : ndarray, shape (n_epochs, n_components * event_types, n_times)
Spatially filtered signals.
"""
if isinstance(epochs, _BaseEpochs):
data = epochs.get_data()
elif isinstance(epochs, np.ndarray):
data = epochs
else:
raise ValueError('Data input must be of Epoch '
'type or numpy array')
# create full matrix of spatial filter
full_filters = list()
for filt in self.filters_.values():
full_filters.append(filt[:, 0:self.n_components])
full_filters = np.concatenate(full_filters, axis=1)
# Apply spatial filters
X = np.dot(full_filters.T, data)
X = X.transpose((1, 0, 2))
return X
def apply(self, inst, event_id=None, include=None, exclude=None):
"""Remove selected components from the signal.
Given the unmixing matrix, transform data,
zero out components, and inverse transform the data.
This procedure will reconstruct M/EEG signals from which
the dynamics described by the excluded components is subtracted.
Parameters
----------
inst : instance of Raw | Epochs | Evoked
The data to be processed.
event_id : dict | list of str | None (default None)
The kind of event to apply. if None, a dict of inst will be return
one for each type of event xdawn has been fitted.
include : array_like of int | None (default None)
The indices refering to columns in the ummixing matrix. The
components to be kept. If None, the first n_components (as defined
in the Xdawn constructor) will be kept.
exclude : array_like of int | None (default None)
The indices refering to columns in the ummixing matrix. The
components to be zeroed out. If None, all the components except the
first n_components will be exclude.
Returns
-------
out : dict of instance
A dict of instance (from the same type as inst input) for each
event type in event_id.
"""
if event_id is None:
event_id = self.event_id
if isinstance(inst, _BaseRaw):
out = self._apply_raw(raw=inst, include=include, exclude=exclude,
event_id=event_id)
elif isinstance(inst, _BaseEpochs):
out = self._apply_epochs(epochs=inst, include=include,
exclude=exclude, event_id=event_id)
elif isinstance(inst, Evoked):
out = self._apply_evoked(evoked=inst, include=include,
exclude=exclude, event_id=event_id)
else:
raise ValueError('Data input must be Raw, Epochs or Evoked type')
return out
def _apply_raw(self, raw, include, exclude, event_id):
"""Aux method."""
if not raw.preload:
raise ValueError('Raw data must be preloaded to apply Xdawn')
picks = pick_types(raw.info, meg=False, include=self.ch_names,
exclude='bads')
raws = dict()
for eid in event_id:
data = raw[picks, :][0]
data = self._pick_sources(data, include, exclude, eid)
raw_r = raw.copy()
raw_r[picks, :] = data
raws[eid] = raw_r
return raws
def _apply_epochs(self, epochs, include, exclude, event_id):
"""Aux method."""
if not epochs.preload:
raise ValueError('Epochs must be preloaded to apply Xdawn')
picks = pick_types(epochs.info, meg=False, ref_meg=False,
include=self.ch_names, exclude='bads')
# special case where epochs come picked but fit was 'unpicked'.
if len(picks) != len(self.ch_names):
raise RuntimeError('Epochs don\'t match fitted data: %i channels '
'fitted but %i channels supplied. \nPlease '
'provide Epochs compatible with '
'xdawn.ch_names' % (len(self.ch_names),
len(picks)))
epochs_dict = dict()
data = np.hstack(epochs.get_data()[:, picks])
for eid in event_id:
data_r = self._pick_sources(data, include, exclude, eid)
data_r = np.array(np.split(data_r, len(epochs.events), 1))
info_r = cp.deepcopy(epochs.info)
epochs_r = EpochsArray(data=data_r, info=info_r,
events=epochs.events, tmin=epochs.tmin,
event_id=epochs.event_id, verbose=False)
epochs_r.preload = True
epochs_dict[eid] = epochs_r
return epochs_dict
def _apply_evoked(self, evoked, include, exclude, event_id):
"""Aux method."""
picks = pick_types(evoked.info, meg=False, ref_meg=False,
include=self.ch_names,
exclude='bads')
# special case where evoked come picked but fit was 'unpicked'.
if len(picks) != len(self.ch_names):
raise RuntimeError('Evoked does not match fitted data: %i channels'
' fitted but %i channels supplied. \nPlease '
'provide an Evoked object that\'s compatible '
'with xdawn.ch_names' % (len(self.ch_names),
len(picks)))
data = evoked.data[picks]
evokeds = dict()
for eid in event_id:
data_r = self._pick_sources(data, include, exclude, eid)
evokeds[eid] = evoked.copy()
# restore evoked
evokeds[eid].data[picks] = data_r
return evokeds
def _pick_sources(self, data, include, exclude, eid):
"""Aux method."""
fast_dot = _get_fast_dot()
if exclude is None:
exclude = self.exclude
else:
exclude = list(set(list(self.exclude) + list(exclude)))
logger.info('Transforming to Xdawn space')
# Apply unmixing
sources = fast_dot(self.filters_[eid].T, data)
if include not in (None, []):
mask = np.ones(len(sources), dtype=np.bool)
mask[np.unique(include)] = False
sources[mask] = 0.
logger.info('Zeroing out %i Xdawn components' % mask.sum())
elif exclude not in (None, []):
exclude_ = np.unique(exclude)
sources[exclude_] = 0.
logger.info('Zeroing out %i Xdawn components' % len(exclude_))
logger.info('Inverse transforming to sensor space')
data = fast_dot(self.patterns_[eid], sources)
return data
|
|
# Copyright (c) 2005-2009 Jaroslav Gresula
#
# Distributed under the MIT license (See accompanying file
# LICENSE.txt or copy at http://jagpdf.org/LICENSE.txt)
#
from pygccxml import parser
from pygccxml import declarations
import StringIO
import re
def global_namespace():
#configure GCC-XML parser
config = parser.config_t( gccxml_path= "c:/Progra~1/GCC_XML/bin/gccxml.exe",\
include_paths= ["e:/starteam/docplatform/nextrelease/code/common"] )
#parsing source file
decls = parser.parse( ['interf.h'], config )
return declarations.get_global_namespace( decls )
def apply_type_metainfo_visitor( mi, visitor ):
name = 'on_' + mi['category']
if hasattr( visitor, name ):
return getattr( visitor, name )( mi )
else:
return getattr( visitor, 'on_default_type' )( mi )
class _simple_sig_handler:
def __init__( self, o ):
self.o = o
def write( self, str_ ):
self.o.write( str_ )
def on_name( self, name ):
self.o.write( name )
def on_link_start( self, lnk ):
pass
def on_link_end( self ):
pass
class type_visitor:
def __init__( self, lang, opts ):
self.type_map = opts[lang+'_type_map']
self.opts = opts
def _map_type( self, mi ):
t = mi['base_name'].rstrip('::')
return self.type_map[t]
def on_const_pointer( self, mi ):
return self._map_type(mi) + ' const*'
def on_default_type( self, mi ):
return self._map_type(mi)
class java_type_visitor:
def __init__(self, opts):
self.type_map = opts['java_type_map']
self.opts = opts
def _map_type( self, mi ):
t = mi['base_name'].rstrip('::')
return self.type_map[t]
def on_const_pointer( self, mi ):
t = mi['base_name'].rstrip('::')
if t == 'jag::Char':
return 'String'
else:
return self._map_type(mi) + "``\[\]``"
def on_default_type( self, mi ):
return self._map_type(mi)
###########################################################################
class sig_base:
def _output_linked_entity( self, mi, h, action, *args ):
if self.is_doc:
id_ = mi['gcc'].id()
if id_:
h.on_link_start( id_ )
action( *args )
if self.is_doc and id_:
if id_:
h.on_link_end()
def _on_start_output(self, callable_, handler ):
return True
def get( self, m, handler_type=_simple_sig_handler ):
sig = StringIO.StringIO()
h = handler_type( sig )
if not self._on_start_output( m, h ):
return sig.getvalue()
gcc = m.gcc
freefun = isinstance( gcc, declarations.calldef.free_function_t )
num_args = len( m.metainfo()[1:] )
mi = m.metainfo()
self._output_linked_entity( mi.return_type(), h, self._on_return_type, mi, h )
self._on_callable_name( freefun, m, h )
self._on_arglist_start( h )
if not freefun:
self._on_before_first_method_argument( m, h, num_args )
self._on_arguments( mi[1:], h, num_args )
self._on_arglist_end( h )
if not freefun and gcc.has_const:
self._on_const(h)
self._on_sig_end(h)
return sig.getvalue()
def _on_arglist_start( self, h ):
h.write( '(' )
def _on_arglist_end( self, h ):
h.write( ')' )
def _on_callable_name( self, is_freefun, m, h ):
h.on_name( m.name() )
def _on_sig_end( self, h):
pass
def _on_before_first_method_argument( self, m, h, num_args ):
pass
def _on_const( self, h ):
pass
class c_cpp_base( sig_base ):
def __init__( self, type_, opts, is_doc, default_args ):
self.opts = opts
self.tv = type_visitor( type_, opts )
self.is_doc = is_doc
self.default_args = default_args
def map_type( self, mi ):
return apply_type_metainfo_visitor( mi, self.tv )
def _output_type( self, mi, h, write_space=True ):
self._output_linked_entity( mi, h, h.write, self.map_type(mi) )
if write_space:
h.write( ' ' )
def _on_arguments( self, mis, h, num_args ):
for i, mi in enumerate( mis ):
self._output_type( mi, h )
h.write( mi['argname'] )
if self.default_args and mi['default_value']:
if mi['category'] == 'const_pointer':
assert mi['default_value'] == '0'
h.write( '=0' )
elif mi['category'] == 'interface':
#print ">> %s" % mi['default_value']
assert mi['default_value'].endswith( '()' ) or mi['default_value'].endswith( '( )' )
self._on_default_argument_interface( mi, h )
elif mi['category'] == 'enum':
h.write('='+mi['default_value'].split('::')[-1])
else:
assert not "unknown default argument type"
if i<num_args-1:
h.write( ', ' )
def _on_sig_end( self, h):
pass
###########################################################################
class cpp_signature( c_cpp_base ):
def __init__( self, opts, is_doc=False ):
# do support default arguments
c_cpp_base.__init__( self, 'cpp', opts, is_doc, True )
def _on_const( self, h ):
h.write( ' const' )
def _on_default_argument_interface( self, mi, h ):
assert mi['category'] == 'interface'
h.write( '=' )
self._output_type( mi, h, False )
h.write( '()' )
def _on_return_type( self, cm, h ):
cat = cm.return_type_category()
if cat == 'void':
key = self.is_doc and 'cpp_void_return' or 'cpp_void_return_noexc'
h.write( self.opts[key] )
elif cat in [ 'interface', 'other' ]:
h.write( self.map_type( cm.return_type() ) )
else:
assert not "shouldn't get here"
h.write( ' ' )
def _on_callable_name( self, is_freefun, m, h ):
orig_name = m.gcc.demangled.split( '(' )[0]
try:
name = m.opts['rename'][orig_name]
except KeyError:
name = m.name()
h.on_name( name )
#
#
#
class java_signature( c_cpp_base ):
def __init__( self, opts, is_doc=False ):
# do support default arguments
c_cpp_base.__init__( self, 'java', opts, is_doc, True )
self.tv = java_type_visitor(opts)
# ugly, it should come from the configuration
self.rename_d = {'jag::IDocument::finalize': 'finalize_doc'}
self.rename_d.update(self.opts['rename'])
def _on_start_output(self, callable_, handler ):
if callable_.qname() in self.opts['ignore']:
handler.write( 'n/a' )
return False
return True
def _on_arguments( self, mis, h, num_args ):
num_default_args = 0
for i, mi in enumerate( mis ):
if self.default_args and mi['default_value']:
h.write(' [')
num_default_args += 1
if i > 0:
h.write(', ')
self._output_type(mi, h)
h.write( mi['argname'] )
h.write(num_default_args * ']')
def _on_return_type( self, cm, h ):
cat = cm.return_type_category()
if cat == 'void':
key = self.is_doc and 'cpp_void_return' or 'cpp_void_return_noexc'
h.write( self.opts[key] )
elif cat in [ 'interface', 'other' ]:
h.write( self.map_type( cm.return_type() ) )
else:
assert not "shouldn't get here"
h.write( ' ' )
def _on_callable_name( self, is_freefun, m, h ):
orig_name = m.gcc.demangled.split( '(' )[0]
try:
name = self.rename_d[orig_name]
except KeyError:
name = m.name()
h.on_name( name )
###########################################################################
class c_signature( c_cpp_base ):
def __init__( self, opts, is_doc=False ):
c_cpp_base.__init__( self, 'c', opts, is_doc, False )
def _on_callable_name( self, is_freefun, m, h ):
if is_freefun:
h.on_name( self.opts['c_freefun_name'](m))
else:
h.on_name( self.opts['c_method_name'](m))
def _on_before_first_method_argument( self, m, h, num_args ):
cls_mi = m.cls_metainfo()
self._output_linked_entity( cls_mi, h, h.write, self.map_type( cls_mi ) )
h.write( ' hobj' )
if num_args:
h.write( ', ' )
def _on_default_argument_interface( self, mi, h ):
assert mi['category'] == 'interface'
h.write( '=0' )
def _on_return_type( self, cm, h ):
cat = cm.return_type_category()
if not self.is_doc:
h.write( self.opts['export_tag'] + ' ' )
if cat == 'void':
h.write( self.opts['c_void_return'] )
elif cat in [ 'interface', 'other' ]:
h.write( self.map_type( cm.return_type() ) )
else:
assert not "shouldn't get here"
if not self.is_doc:
h.write( ' ' + self.opts['callspec'] )
h.write( ' ' )
###########################################################################
class py_signature( sig_base ):
def __init__( self, opts, is_doc=False ):
self.is_doc=is_doc
self.opts=opts
self.default_args=True
def _on_start_output(self, callable_, handler ):
if callable_.qname() in self.opts['ignore']:
handler.write( 'n/a' )
return False
return True
def _on_callable_name( self, is_freefun, m, h ):
orig_name = m.gcc.demangled.split( '(' )[0]
try:
name = m.opts['rename'][orig_name]
except KeyError:
name = m.name()
h.on_name(name)
def _output_linked_entity( self, mi, h, action, *args ):
pass
def _on_return_type( self, mi, h ):
return #do nothing here
id_ = mi.return_type()['gcc'].id()
if not id_:
return
h.on_link_start( id_ )
h.write( 'rettype')
h.on_link_end()
h.write( ' <-- ')
def _on_arguments( self, mis, h, len_args ):
ignore = set()
for tm in self.opts['typemap_lst']:
assert(len(tm)==2)
ti = [i for i in range(len_args) if mis[i]['argname']==tm[0] and i+1<len_args]
for i in ti:
try:
if mis[i+1]['argname']==tm[1]:
ignore.add(i+1)
except ValueError:
pass
args_i=list(set(range(len_args)).difference(ignore))
args_i.sort()
len_args = len(args_i)
closing_brackets = 0
for i in args_i:
if self.default_args and mis[i]['default_value']:
if mis[i]['category'] == 'const_pointer':
assert mis[i]['default_value'] == '0'
h.write( ' [' )
elif mis[i]['category'] == 'interface':
assert mis[i]['default_value'].endswith( '()' ) or mis[i]['default_value'].endswith( '( )' )
h.write( ' [' )
elif mis[i]['category'] == 'enum':
h.write( ' [' )
else:
assert not "unknown default argument type"
closing_brackets += 1
if i!=args_i[0]:
h.write( ', ' )
id_ = mis[i]['gcc'].id()
if id_:
h.on_link_start( id_ )
h.write( mis[i]['argname'] )
if id_:
h.on_link_end()
if closing_brackets:
h.write(closing_brackets * ']')
def main():
global_ns = global_namespace()
docp = global_ns.namespace( "DocPlatform" )
cls = docp.class_( "IStoreManager" )
ns = '::Strs::DocPlatform::'
#round up metainfo for methods
minfo = []
for m in cls.mem_funs( function=lambda m: not m.name.endswith( '_internal' ) ):
assert m.virtuality == 'pure virtual'
assert m.access_type == 'public'
minfo.append( (m, method_metainfo( m, ns ) ) )
#generate signatures for various languages
for sig_type in [cpp_signature, c_signature, py_signature]:
sig = sig_type( ns )
print 10*'-'
for m, mi in minfo:
print sig.get_method( m, mi)
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper class for interacting with the Dev Server."""
from __future__ import print_function
import ast
import base64
import distutils.version # pylint: disable=no-name-in-module,import-error
import hashlib
import os
import re
import shutil
import tempfile
import threading
from chromite.lib import constants
from chromite.lib import cros_build_lib
from chromite.lib import gs
from chromite.lib import osutils
from chromite.lib.xbuddy import cherrypy_log_util
# Module-local log function.
def _Log(message, *args):
return cherrypy_log_util.LogWithTag('UTIL', message, *args)
_HASH_BLOCK_SIZE = 8192
class CommonUtilError(Exception):
"""Exception classes used by this module."""
def MkDirP(directory):
"""Thread-safely create a directory like mkdir -p.
If the directory already exists, call chown on the directory and its subfiles
recursively with current user and group to make sure current process has full
access to the directory.
"""
if os.path.isdir(directory):
# Fix permissions and ownership of the directory and its subfiles by
# calling chown recursively with current user and group.
try:
osutils.Chown(directory, user=os.getuid(), group=os.getgid(),
recursive=True)
except cros_build_lib.RunCommandError as e:
_Log('Could not chown: %s', e)
else:
osutils.SafeMakedirs(directory)
def GetLatestBuildVersion(static_dir, target, milestone=None):
"""Retrieves the latest build version for a given board.
Searches the static_dir for builds for target, and returns the highest
version number currently available locally.
Args:
static_dir: Directory where builds are served from.
target: The build target, typically a combination of the board and the
type of build e.g. x86-mario-release.
milestone: For latest build set to None, for builds only in a specific
milestone set to a str of format Rxx (e.g. R16). Default: None.
Returns:
If latest found, a full build string is returned e.g. R17-1234.0.0-a1-b983.
If no latest is found for some reason or another a '' string is returned.
Raises:
CommonUtilError: If for some reason the latest build cannot be
deteremined, this could be due to the dir not existing or no builds
being present after filtering on milestone.
"""
target_path = os.path.join(static_dir, target)
if not os.path.isdir(target_path):
raise CommonUtilError('Cannot find path %s' % target_path)
# pylint: disable=no-member
builds = [distutils.version.LooseVersion(build) for build in
os.listdir(target_path) if not build.endswith('.exception')]
if milestone and builds:
# Check if milestone Rxx is in the string representation of the build.
builds = [build for build in builds if milestone.upper() in str(build)]
if not builds:
raise CommonUtilError('Could not determine build for %s' % target)
return str(max(builds))
def PathInDir(directory, path):
"""Returns True if the path is in directory.
Args:
directory: Directory where the path should be in.
path: Path to check.
Returns:
True if path is in static_dir, False otherwise
"""
directory = os.path.realpath(directory)
path = os.path.realpath(path)
return path.startswith(directory) and len(path) != len(directory)
def GetControlFile(static_dir, build, control_path):
"""Attempts to pull the requested control file from the Dev Server.
Args:
static_dir: Directory where builds are served from.
build: Fully qualified build string; e.g. R17-1234.0.0-a1-b983.
control_path: Path to control file on Dev Server relative to Autotest root.
Returns:
Content of the requested control file.
Raises:
CommonUtilError: If lock can't be acquired.
"""
# Be forgiving if the user passes in the control_path with a leading /
control_path = control_path.lstrip('/')
control_path = os.path.join(static_dir, build, 'autotest',
control_path)
if not PathInDir(static_dir, control_path):
raise CommonUtilError('Invalid control file "%s".' % control_path)
if not os.path.exists(control_path):
# TODO(scottz): Come up with some sort of error mechanism.
# crosbug.com/25040
return 'Unknown control path %s' % control_path
with open(control_path, 'r') as control_file:
return control_file.read()
def GetControlFileListForSuite(static_dir, build, suite_name):
"""List all control files for a specified build, for the given suite.
If the specified suite_name isn't found in the suite to control file
map, this method will return all control files for the build by calling
GetControlFileList.
Args:
static_dir: Directory where builds are served from.
build: Fully qualified build string; e.g. R17-1234.0.0-a1-b983.
suite_name: Name of the suite for which we require control files.
Returns:
String of each control file separated by a newline.
Raises:
CommonUtilError: If the suite_to_control_file_map isn't found in
the specified build's staged directory.
"""
suite_to_control_map = os.path.join(static_dir, build,
'autotest', 'test_suites',
'suite_to_control_file_map')
if not PathInDir(static_dir, suite_to_control_map):
raise CommonUtilError('suite_to_control_map not in "%s".' %
suite_to_control_map)
if not os.path.exists(suite_to_control_map):
raise CommonUtilError('Could not find this file. '
'Is it staged? %s' % suite_to_control_map)
with open(suite_to_control_map, 'r') as fd:
try:
return '\n'.join(ast.literal_eval(fd.read())[suite_name])
except KeyError:
return GetControlFileList(static_dir, build)
def GetControlFileList(static_dir, build):
"""List all control|control. files in the specified board/build path.
Args:
static_dir: Directory where builds are served from.
build: Fully qualified build string; e.g. R17-1234.0.0-a1-b983.
Returns:
String of each file separated by a newline.
Raises:
CommonUtilError: If path is outside of sandbox.
"""
autotest_dir = os.path.join(static_dir, build, 'autotest/')
if not PathInDir(static_dir, autotest_dir):
raise CommonUtilError('Autotest dir not in sandbox "%s".' % autotest_dir)
control_files = set()
if not os.path.exists(autotest_dir):
raise CommonUtilError('Could not find this directory.'
'Is it staged? %s' % autotest_dir)
for entry in os.walk(autotest_dir):
dir_path, _, files = entry
for file_entry in files:
if file_entry.startswith('control.') or file_entry == 'control':
control_files.add(os.path.join(dir_path,
file_entry).replace(autotest_dir, ''))
return '\n'.join(control_files)
# Hashlib is strange and doesn't actually define these in a sane way that
# pylint can find them. Disable checks for them.
# pylint: disable=E1101,W0106
def GetFileHashes(file_path, do_sha256=False, do_md5=False):
"""Computes and returns a list of requested hashes.
Args:
file_path: path to file to be hashed
do_sha256: whether or not to compute a SHA256 hash
do_md5: whether or not to compute a MD5 hash
Returns:
A dictionary containing binary hash values, keyed by 'sha1', 'sha256' and
'md5', respectively.
"""
hashes = {}
if any((do_sha256, do_md5)):
# Initialize hashers.
hasher_sha256 = hashlib.sha256() if do_sha256 else None
hasher_md5 = hashlib.md5() if do_md5 else None
# Read blocks from file, update hashes.
with open(file_path, 'rb') as fd:
while True:
block = fd.read(_HASH_BLOCK_SIZE)
if not block:
break
hasher_sha256 and hasher_sha256.update(block)
hasher_md5 and hasher_md5.update(block)
# Update return values.
if hasher_sha256:
hashes['sha256'] = hasher_sha256.digest()
if hasher_md5:
hashes['md5'] = hasher_md5.digest()
return hashes
def GetFileSha256(file_path):
"""Returns the SHA256 checksum of the file given (base64 encoded)."""
return base64.b64encode(
GetFileHashes(file_path, do_sha256=True)['sha256']).decode('utf-8')
def CopyFile(source, dest):
"""Copies a file from |source| to |dest|."""
_Log('Copy File %s -> %s' % (source, dest))
shutil.copy(source, dest)
def SymlinkFile(target, link):
"""Atomically creates or replaces the symlink |link| pointing to |target|.
If the specified |link| file already exists it is replaced with the new link
atomically.
"""
if not os.path.exists(target):
_Log('Could not find target for symlink: %s', target)
return
_Log('Creating symlink: %s --> %s', link, target)
# Use the created link_base file to prevent other calls to SymlinkFile() to
# pick the same link_base temp file, thanks to mkstemp().
with tempfile.NamedTemporaryFile(prefix=os.path.basename(link)) as link_fd:
link_base = link_fd.name
# Use the unique link_base filename to create a symlink, but on the same
# directory as the required |link| to ensure the created symlink is in the
# same file system as |link|.
link_name = os.path.join(os.path.dirname(link),
os.path.basename(link_base) + '-link')
# Create the symlink and then rename it to the final position. This ensures
# the symlink creation is atomic.
os.symlink(target, link_name)
os.rename(link_name, link)
class LockDict(object):
"""A dictionary of locks.
This class provides a thread-safe store of threading.Lock objects, which can
be used to regulate access to any set of hashable resources. Usage:
foo_lock_dict = LockDict()
...
with foo_lock_dict.lock('bar'):
# Critical section for 'bar'
"""
def __init__(self):
self._lock = self._new_lock()
self._dict = {}
@staticmethod
def _new_lock():
return threading.Lock()
def lock(self, key):
with self._lock:
lock = self._dict.get(key)
if not lock:
lock = self._new_lock()
self._dict[key] = lock
return lock
# TODO(ahassani, achuith, vapier): Move to cros_build_lib, which has a
# CreateTarball utility function.
def ExtractTarball(tarball_path, install_path, files_to_extract=None,
excluded_files=None, return_extracted_files=False):
"""Extracts a tarball using tar.
Detects whether the tarball is compressed or not based on the file
extension and extracts the tarball into the install_path.
Args:
tarball_path: Path to the tarball to extract.
install_path: Path to extract the tarball to.
files_to_extract: String of specific files in the tarball to extract.
excluded_files: String of files to not extract.
return_extracted_files: whether or not the caller expects the list of
files extracted; if False, returns an empty list.
Returns:
List of absolute paths of the files extracted (possibly empty).
"""
# Deal with exclusions.
# Add 'm' for not extracting file's modified time. All extracted files are
# marked with current system time.
cmd = ['tar', 'xf', tarball_path, '--directory', install_path]
# If caller requires the list of extracted files, get verbose.
if return_extracted_files:
cmd += ['--verbose']
# Determine how to decompress.
tarball = os.path.basename(tarball_path)
if tarball.endswith('.tgz') or tarball.endswith('.tar.gz'):
cmd.append('--gzip')
if excluded_files:
for exclude in excluded_files:
cmd.extend(['--exclude', exclude])
if files_to_extract:
cmd.extend(files_to_extract)
try:
result = cros_build_lib.run(cmd, capture_output=True, encoding='utf-8')
if result.stderr:
_Log('Error happened while in extracting tarball: %s',
result.stderr.rstrip())
if return_extracted_files:
return [os.path.join(install_path, filename)
for filename in result.stdout.splitlines()
if not filename.endswith('/')]
return []
except cros_build_lib.RunCommandError as e:
raise CommonUtilError(
'An error occurred when attempting to untar %s:\n%s' %
(tarball_path, e))
def IsInsideChroot():
"""Returns True if we are inside chroot."""
return os.path.exists('/etc/debian_chroot')
def IsRunningOnMoblab():
"""Returns True if this code is running on a chromiumOS DUT."""
try:
return bool(re.search(
r'[_-]+moblab', osutils.ReadFile(constants.LSB_RELEASE_PATH)))
except IOError:
# File doesn't exist.
return False
def IsAnonymousCaller(error):
"""Checks if we're an anonymous caller.
Check if |error| is a GSCommandError due to a lack of credentials.
Args:
error: Exception raised.
Returns:
True if we're an anonymous caller.
"""
if not isinstance(error, gs.GSCommandError):
return False
anon_msg = ('ServiceException: 401 Anonymous caller does not have '
'storage.objects.get access to')
is_anon = error.stderr.find(anon_msg) != -1
_Log('IsAnonymousCaller? %s', is_anon)
return is_anon
|
|
"""
sentry.db.models.fields.node
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import collections
import logging
import six
import warnings
from django.conf import settings
from django.db import models
from django.db.models.signals import post_delete
from sentry import nodestore
from sentry.utils.cache import memoize
from sentry.utils.compat import pickle
from sentry.utils.strings import decompress, compress
from sentry.utils.canonical import CANONICAL_TYPES, CanonicalKeyDict
from .gzippeddict import GzippedDictField
__all__ = ('NodeField', )
logger = logging.getLogger('sentry')
class NodeUnpopulated(Exception):
pass
class NodeIntegrityFailure(Exception):
pass
class NodeData(collections.MutableMapping):
def __init__(self, field, id, data=None):
self.field = field
self.id = id
self.ref = None
# ref version is used to discredit a previous ref
# (this does not mean the Event is mutable, it just removes ref checking
# in the case of something changing on the data model)
self.ref_version = None
self._node_data = data
def __getstate__(self):
data = dict(self.__dict__)
# downgrade this into a normal dict in case it's a shim dict.
# This is needed as older workers might not know about newer
# collection types. For isntance we have events where this is a
# CanonicalKeyDict
data.pop('data', None)
data['_node_data_CANONICAL'] = isinstance(data['_node_data'], CANONICAL_TYPES)
data['_node_data'] = dict(data['_node_data'].items())
return data
def __setstate__(self, state):
# If there is a legacy pickled version that used to have data as a
# duplicate, reject it.
state.pop('data', None)
if state.pop('_node_data_CANONICAL', False):
state['_node_data'] = CanonicalKeyDict(state['_node_data'])
self.__dict__ = state
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __delitem__(self, key):
del self.data[key]
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def __repr__(self):
cls_name = type(self).__name__
if self._node_data:
return '<%s: id=%s data=%r>' % (cls_name, self.id, repr(self._node_data))
return '<%s: id=%s>' % (cls_name, self.id, )
def get_ref(self, instance):
ref_func = self.field.ref_func
if not ref_func:
return
return ref_func(instance)
def copy(self):
return self.data.copy()
@memoize
def data(self):
if self._node_data is not None:
return self._node_data
elif self.id:
if settings.DEBUG:
raise NodeUnpopulated('You should populate node data before accessing it.')
else:
warnings.warn('You should populate node data before accessing it.')
self.bind_data(nodestore.get(self.id) or {})
return self._node_data
rv = {}
if self.field.wrapper is not None:
rv = self.field.wrapper(rv)
return rv
def bind_data(self, data, ref=None):
self.ref = data.pop('_ref', ref)
self.ref_version = data.pop('_ref_version', None)
if self.ref_version == self.field.ref_version and ref is not None and self.ref != ref:
raise NodeIntegrityFailure(
'Node reference for %s is invalid: %s != %s' % (self.id, ref, self.ref, )
)
if self.field.wrapper is not None:
data = self.field.wrapper(data)
self._node_data = data
def bind_ref(self, instance):
ref = self.get_ref(instance)
if ref:
self.data['_ref'] = ref
self.data['_ref_version'] = self.field.ref_version
class NodeField(GzippedDictField):
"""
Similar to the gzippedictfield except that it stores a reference
to an external node.
"""
def __init__(self, *args, **kwargs):
self.ref_func = kwargs.pop('ref_func', None)
self.ref_version = kwargs.pop('ref_version', None)
self.wrapper = kwargs.pop('wrapper', None)
super(NodeField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name):
super(NodeField, self).contribute_to_class(cls, name)
post_delete.connect(self.on_delete, sender=self.model, weak=False)
def on_delete(self, instance, **kwargs):
value = getattr(instance, self.name)
if not value.id:
return
nodestore.delete(value.id)
def to_python(self, value):
if isinstance(value, six.string_types) and value:
try:
value = pickle.loads(decompress(value))
except Exception as e:
logger.exception(e)
value = {}
elif not value:
value = {}
if 'node_id' in value:
node_id = value.pop('node_id')
data = None
else:
node_id = None
data = value
if self.wrapper is not None and data is not None:
data = self.wrapper(data)
return NodeData(self, node_id, data)
def get_prep_value(self, value):
if not value and self.null:
# save ourselves some storage
return None
# We can't put our wrappers into the nodestore, so we need to
# ensure that the data is converted into a plain old dict
data = value.data
if isinstance(data, CANONICAL_TYPES):
data = dict(data.items())
# TODO(dcramer): we should probably do this more intelligently
# and manually
if not value.id:
value.id = nodestore.create(data)
else:
nodestore.set(value.id, data)
return compress(pickle.dumps({'node_id': value.id}))
if hasattr(models, 'SubfieldBase'):
NodeField = six.add_metaclass(models.SubfieldBase)(NodeField)
if 'south' in settings.INSTALLED_APPS:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^sentry\.db\.models\.fields\.node\.NodeField"])
|
|
import pytest
from ethereum import tester as t
from ethereum import blocks, utils, transactions, vm, abi, opcodes
from ethereum.exceptions import InvalidTransaction
import rlp
from rlp.utils import decode_hex, encode_hex, ascii_chr, str_to_bytes
from ethereum import processblock as pb
import copy
from ethereum.db import EphemDB
from ethereum.utils import to_string, safe_ord, parse_int_or_hex
from ethereum.utils import remove_0x_head, int_to_hex, normalize_address
from ethereum.config import Env
import json
import os
import time
from ethereum import ethash
from ethereum import ethash_utils
db = EphemDB()
db_env = Env(db)
env = {
"currentCoinbase": b"2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
"currentDifficulty": "256",
"currentGasLimit": "1000000000",
"currentNumber": "257",
"currentTimestamp": "1",
"previousHash": b"5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6"
}
# from ethereum.slogging import LogRecorder, configure_logging, set_level
# config_string = ':info,eth.vm.log:trace,eth.vm.op:trace,eth.vm.stack:trace,eth.vm.exit:trace,eth.pb.msg:trace'
# configure_logging(config_string=config_string)
FILL = 1
VERIFY = 2
TIME = 3
VM = 4
STATE = 5
fill_vm_test = lambda params: run_vm_test(params, FILL)
check_vm_test = lambda params: run_vm_test(params, VERIFY)
time_vm_test = lambda params: run_vm_test(params, TIME)
fill_state_test = lambda params: run_state_test(params, FILL)
check_state_test = lambda params: run_state_test(params, VERIFY)
time_state_test = lambda params: run_state_test(params, TIME)
fill_ethash_test = lambda params: run_ethash_test(params, FILL)
check_ethash_test = lambda params: run_ethash_test(params, VERIFY)
time_ethash_test = lambda params: run_ethash_test(params, TIME)
fill_abi_test = lambda params: run_abi_test(params, FILL)
check_abi_test = lambda params: run_abi_test(params, VERIFY)
time_abi_test = lambda params: run_abi_test(params, TIME)
fill_genesis_test = lambda params: run_genesis_test(params, FILL)
check_genesis_test = lambda params: run_genesis_test(params, VERIFY)
time_genesis_test = lambda params: run_genesis_test(params, TIME)
fixture_path = os.path.join(os.path.dirname(__file__), '..', 'fixtures')
def normalize_hex(s):
return s if len(s) > 2 else b'0x00'
def acct_standard_form(a):
return {
"balance": parse_int_or_hex(a["balance"]),
"nonce": parse_int_or_hex(a["nonce"]),
"code": to_string(a["code"]),
"storage": {normalize_hex(k): normalize_hex(v) for
k, v in a["storage"].items() if normalize_hex(v).rstrip(b'0') != b'0x'}
}
def compare_post_states(shouldbe, reallyis):
if shouldbe is None and reallyis is None:
return True
if shouldbe is None or reallyis is None:
raise Exception("Shouldbe: %r \n\nreallyis: %r" % (shouldbe, reallyis))
for k in shouldbe:
if k not in reallyis:
r = {"nonce": 0, "balance": 0, "code": b"0x", "storage": {}}
else:
r = acct_standard_form(reallyis[k])
s = acct_standard_form(shouldbe[k])
if s != r:
raise Exception("Key %r\n\nshouldbe: %r \n\nreallyis: %r" %
(k, s, r))
return True
def callcreate_standard_form(c):
return {
"gasLimit": parse_int_or_hex(c["gasLimit"]),
"value": parse_int_or_hex(c["value"]),
"data": to_string(c["data"])
}
def mktest(code, language, data=None, fun=None, args=None,
gas=1000000, value=0, test_type=VM):
s = t.state(1)
if language == 'evm':
ca = s.contract('x = 5')
s.block.set_code(ca, code)
d = data or b''
else:
c = s.abi_contract(code, language=language)
d = c._translator.encode(fun, args) if fun else (data or b'')
ca = c.address
pre = s.block.to_dict(True)['state']
if test_type == VM:
exek = {"address": ca, "caller": t.a0,
"code": b'0x' + encode_hex(s.block.get_code(ca)),
"data": b'0x' + encode_hex(d), "gas": to_string(gas),
"gasPrice": to_string(1), "origin": t.a0,
"value": to_string(value)}
return fill_vm_test({"env": env, "pre": pre, "exec": exek})
else:
tx = {"data": b'0x' + encode_hex(d), "gasLimit": parse_int_or_hex(gas),
"gasPrice": to_string(1), "nonce": to_string(s.block.get_nonce(t.a0)),
"secretKey": encode_hex(t.k0), "to": ca, "value": to_string(value)}
return fill_state_test({"env": env, "pre": pre, "transaction": tx})
# Fills up a vm test without post data, or runs the test
def run_vm_test(params, mode, profiler=None):
pre = params['pre']
exek = params['exec']
env = params['env']
if 'previousHash' not in env:
env['previousHash'] = encode_hex(db_env.config['GENESIS_PREVHASH'])
assert set(env.keys()) == set(['currentGasLimit', 'currentTimestamp',
'previousHash', 'currentCoinbase',
'currentDifficulty', 'currentNumber'])
# setup env
header = blocks.BlockHeader(
prevhash=decode_hex(env['previousHash']),
number=parse_int_or_hex(env['currentNumber']),
coinbase=decode_hex(env['currentCoinbase']),
difficulty=parse_int_or_hex(env['currentDifficulty']),
gas_limit=parse_int_or_hex(env['currentGasLimit']),
timestamp=parse_int_or_hex(env['currentTimestamp']))
blk = blocks.Block(header, env=db_env)
# setup state
for address, h in list(pre.items()):
assert len(address) == 40
address = decode_hex(address)
assert set(h.keys()) == set(['code', 'nonce', 'balance', 'storage'])
blk.set_nonce(address, parse_int_or_hex(h['nonce']))
blk.set_balance(address, parse_int_or_hex(h['balance']))
blk.set_code(address, decode_hex(h['code'][2:]))
for k, v in h['storage'].items():
blk.set_storage_data(address,
utils.big_endian_to_int(decode_hex(k[2:])),
utils.big_endian_to_int(decode_hex(v[2:])))
# execute transactions
sender = decode_hex(exek['caller']) # a party that originates a call
recvaddr = decode_hex(exek['address'])
nonce = blk._get_acct_item(sender, 'nonce')
gasprice = parse_int_or_hex(exek['gasPrice'])
startgas = parse_int_or_hex(exek['gas'])
value = parse_int_or_hex(exek['value'])
data = decode_hex(exek['data'][2:])
# bypass gas check in tx initialization by temporarily increasing startgas
num_zero_bytes = str_to_bytes(data).count(ascii_chr(0))
num_non_zero_bytes = len(data) - num_zero_bytes
intrinsic_gas = (opcodes.GTXCOST + opcodes.GTXDATAZERO * num_zero_bytes +
opcodes.GTXDATANONZERO * num_non_zero_bytes)
startgas += intrinsic_gas
tx = transactions.Transaction(nonce=nonce, gasprice=gasprice, startgas=startgas,
to=recvaddr, value=value, data=data)
tx.startgas -= intrinsic_gas
tx.sender = sender
# capture apply_message calls
apply_message_calls = []
orig_apply_msg = pb.apply_msg
ext = pb.VMExt(blk, tx)
def msg_wrapper(msg):
hexdata = encode_hex(msg.data.extract_all())
apply_message_calls.append(dict(gasLimit=to_string(msg.gas),
value=to_string(msg.value),
destination=encode_hex(msg.to),
data=b'0x' + hexdata))
return 1, msg.gas, b''
def create_wrapper(msg):
sender = decode_hex(msg.sender) if \
len(msg.sender) == 40 else msg.sender
nonce = utils.encode_int(ext._block.get_nonce(msg.sender))
addr = utils.sha3(rlp.encode([sender, nonce]))[12:]
hexdata = encode_hex(msg.data.extract_all())
apply_message_calls.append(dict(gasLimit=to_string(msg.gas),
value=to_string(msg.value),
destination=b'', data=b'0x' + hexdata))
return 1, msg.gas, addr
ext.msg = msg_wrapper
ext.create = create_wrapper
def blkhash(n):
if n >= ext.block_number or n < ext.block_number - 256:
return b''
else:
return utils.sha3(to_string(n))
ext.block_hash = blkhash
msg = vm.Message(tx.sender, tx.to, tx.value, tx.startgas,
vm.CallData([safe_ord(x) for x in tx.data]))
code = decode_hex(exek['code'][2:])
time_pre = time.time()
if profiler:
profiler.enable()
success, gas_remained, output = vm.vm_execute(ext, msg, code)
if profiler:
profiler.disable()
pb.apply_msg = orig_apply_msg
blk.commit_state()
for s in blk.suicides:
blk.del_account(s)
time_post = time.time()
"""
generally expected that the test implementer will read env, exec and pre
then check their results against gas, logs, out, post and callcreates.
If an exception is expected, then latter sections are absent in the test.
Since the reverting of the state is not part of the VM tests.
"""
params2 = copy.deepcopy(params)
if success:
params2['callcreates'] = apply_message_calls
params2['out'] = b'0x' + encode_hex(b''.join(map(ascii_chr, output)))
params2['gas'] = to_string(gas_remained)
params2['logs'] = [log.to_dict() for log in blk.logs]
params2['post'] = blk.to_dict(with_state=True)['state']
if mode == FILL:
return params2
elif mode == VERIFY:
if not success:
assert 'post' not in params, 'failed, but expected to succeed'
params1 = copy.deepcopy(params)
shouldbe, reallyis = params1.get('post', None), params2.get('post', None)
compare_post_states(shouldbe, reallyis)
def normalize_value(k, p):
if k in p:
if k == 'gas':
return parse_int_or_hex(p[k])
elif k == 'callcreates':
return list(map(callcreate_standard_form, p[k]))
else:
return utils.to_string(k)
return None
for k in ['pre', 'exec', 'env', 'callcreates',
'out', 'gas', 'logs']:
shouldbe = normalize_value(k, params1)
reallyis = normalize_value(k, params2)
if shouldbe != reallyis:
raise Exception("Mismatch: " + k + ':\n shouldbe %r\n reallyis %r' %
(shouldbe, reallyis))
elif mode == TIME:
return time_post - time_pre
# Fills up a vm test without post data, or runs the test
def run_state_test(params, mode):
pre = params['pre']
exek = params['transaction']
env = params['env']
assert set(env.keys()) == set(['currentGasLimit', 'currentTimestamp',
'previousHash', 'currentCoinbase',
'currentDifficulty', 'currentNumber'])
assert len(env['currentCoinbase']) == 40
# setup env
header = blocks.BlockHeader(
prevhash=decode_hex(env['previousHash']),
number=parse_int_or_hex(env['currentNumber']),
coinbase=decode_hex(env['currentCoinbase']),
difficulty=parse_int_or_hex(env['currentDifficulty']),
timestamp=parse_int_or_hex(env['currentTimestamp']),
# work around https://github.com/ethereum/pyethereum/issues/390 [1]:
gas_limit=min(db_env.config['MAX_GAS_LIMIT'], parse_int_or_hex(env['currentGasLimit'])))
blk = blocks.Block(header, env=db_env)
# work around https://github.com/ethereum/pyethereum/issues/390 [2]:
blk.gas_limit = parse_int_or_hex(env['currentGasLimit'])
# setup state
for address, h in list(pre.items()):
assert len(address) == 40
address = decode_hex(address)
assert set(h.keys()) == set(['code', 'nonce', 'balance', 'storage'])
blk.set_nonce(address, parse_int_or_hex(h['nonce']))
blk.set_balance(address, parse_int_or_hex(h['balance']))
blk.set_code(address, decode_hex(h['code'][2:]))
for k, v in h['storage'].items():
blk.set_storage_data(address,
utils.big_endian_to_int(decode_hex(k[2:])),
utils.big_endian_to_int(decode_hex(v[2:])))
for address, h in list(pre.items()):
address = decode_hex(address)
assert blk.get_nonce(address) == parse_int_or_hex(h['nonce'])
assert blk.get_balance(address) == parse_int_or_hex(h['balance'])
assert blk.get_code(address) == decode_hex(h['code'][2:])
for k, v in h['storage'].items():
assert blk.get_storage_data(address, utils.big_endian_to_int(
decode_hex(k[2:]))) == utils.big_endian_to_int(decode_hex(v[2:]))
# execute transactions
orig_apply_msg = pb.apply_msg
def apply_msg_wrapper(ext, msg):
def blkhash(n):
if n >= blk.number or n < blk.number - 256:
return b''
else:
return utils.sha3(to_string(n))
ext.block_hash = blkhash
return orig_apply_msg(ext, msg)
pb.apply_msg = apply_msg_wrapper
try:
tx = transactions.Transaction(
nonce=parse_int_or_hex(exek['nonce'] or b"0"),
gasprice=parse_int_or_hex(exek['gasPrice'] or b"0"),
startgas=parse_int_or_hex(exek['gasLimit'] or b"0"),
to=normalize_address(exek['to'], allow_blank=True),
value=parse_int_or_hex(exek['value'] or b"0"),
data=decode_hex(remove_0x_head(exek['data'])))
except InvalidTransaction:
tx = None
success, output = False, b''
time_pre = time.time()
time_post = time_pre
else:
if 'secretKey' in exek:
tx.sign(exek['secretKey'])
elif all(key in exek for key in ['v', 'r', 's']):
tx.v = decode_hex(remove_0x_head(exek['v']))
tx.r = decode_hex(remove_0x_head(exek['r']))
tx.s = decode_hex(remove_0x_head(exek['s']))
else:
assert False
time_pre = time.time()
try:
print('trying')
success, output = pb.apply_transaction(blk, tx)
blk.commit_state()
print('success', blk.get_receipts()[-1].gas_used)
except InvalidTransaction:
success, output = False, b''
blk.commit_state()
pass
time_post = time.time()
if tx.to == b'':
output = blk.get_code(output)
pb.apply_msg = orig_apply_msg
params2 = copy.deepcopy(params)
if success:
params2['logs'] = [log.to_dict() for log in blk.get_receipt(0).logs]
params2['out'] = b'0x' + encode_hex(output)
params2['post'] = copy.deepcopy(blk.to_dict(True)['state'])
params2['postStateRoot'] = encode_hex(blk.state.root_hash)
if mode == FILL:
return params2
elif mode == VERIFY:
params1 = copy.deepcopy(params)
shouldbe, reallyis = params1.get('post', None), params2.get('post', None)
compare_post_states(shouldbe, reallyis)
for k in ['pre', 'exec', 'env', 'callcreates',
'out', 'gas', 'logs', 'postStateRoot']:
_shouldbe = params1.get(k, None)
_reallyis = params2.get(k, None)
if str_to_bytes(k) == b'out' and _shouldbe[:1] in ('#', b'#'):
_reallyis = str_to_bytes('#%s' % ((len(_reallyis) - 2) // 2))
if _shouldbe != _reallyis:
print(('Mismatch {key}: shouldbe {shouldbe_key} != reallyis {reallyis_key}.\n'
'post: {shouldbe_post} != {reallyis_post}').format(
shouldbe_key=_shouldbe, reallyis_key=_reallyis,
shouldbe_post=shouldbe, reallyis_post=reallyis, key=k))
raise Exception("Mismatch: " + k + ':\n shouldbe %r\n reallyis %r' %
(_shouldbe, _reallyis))
elif mode == TIME:
return time_post - time_pre
def run_ethash_test(params, mode):
if 'header' not in params:
b = blocks.genesis(db)
b.nonce = decode_hex(params['nonce'])
b.number = params.get('number', 0)
header = b.header
params['header'] = encode_hex(rlp.encode(b.header))
else:
header = blocks.BlockHeader(decode_hex(params['header']))
header_hash = header.mining_hash
cache_size = ethash.get_cache_size(header.number)
full_size = ethash.get_full_size(header.number)
seed = b'\x00' * 32
for i in range(header.number // ethash_utils.EPOCH_LENGTH):
seed = utils.sha3(seed)
nonce = header.nonce
assert len(nonce) == 8
assert len(seed) == 32
t1 = time.time()
cache = ethash.mkcache(cache_size, seed)
t2 = time.time()
cache_hash = encode_hex(utils.sha3(ethash.serialize_cache(cache)))
t6 = time.time()
light_verify = ethash.hashimoto_light(full_size, cache, header_hash, nonce)
t7 = time.time()
# assert full_mine == light_mine
out = {
"seed": encode_hex(seed),
"header_hash": encode_hex(header_hash),
"nonce": encode_hex(nonce),
"cache_size": cache_size,
"full_size": full_size,
"cache_hash": cache_hash,
"mixhash": encode_hex(light_verify["mix digest"]),
"result": encode_hex(light_verify["result"]),
}
if mode == FILL:
header.mixhash = light_verify["mixhash"]
params["header"] = encode_hex(rlp.encode(header))
for k, v in list(out.items()):
params[k] = v
return params
elif mode == VERIFY:
should, actual = header.mixhash, light_verify['mixhash']
assert should == actual, "Mismatch: mixhash %r %r" % (should, actual)
for k, v in list(out.items()):
assert params[k] == v, "Mismatch: " + k + ' %r %r' % (params[k], v)
elif mode == TIME:
return {
"cache_gen": t2 - t1,
"verification_time": t7 - t6
}
def run_abi_test(params, mode):
types, args = params['types'], params['args']
out = abi.encode_abi(types, args)
assert abi.decode_abi(types, out) == args
if mode == FILL:
params['result'] = encode_hex(out)
return params
elif mode == VERIFY:
assert params['result'] == encode_hex(out)
elif mode == TIME:
x = time.time()
abi.encode_abi(types, args)
y = time.time()
abi.decode_abi(out, args)
return {
'encoding': y - x,
'decoding': time.time() - y
}
def run_genesis_test(params, mode):
params = copy.deepcopy(params)
if 'difficulty' not in params:
params['difficulty'] = int_to_hex(2 ** 34)
if 'mixhash' not in params:
params['mixhash'] = '0x' + '0' * 64
if 'nonce' not in params:
params['nonce'] = '0x0000000000000042'
if 'timestamp' not in params:
params['timestamp'] = int_to_hex(5000)
if 'parentHash' not in params:
params['parentHash'] = '0x' + '0' * 64
if 'gasLimit' not in params:
params['gasLimit'] = int_to_hex(5000)
if 'extraData' not in params:
params['extraData'] = '0x'
if 'coinbase' not in params:
params['coinbase'] = '0x' + '3' * 40
x = time.time()
b = blocks.genesis(EphemDB(), start_alloc=params['alloc'],
difficulty=parse_int_or_hex(params['difficulty']),
timestamp=parse_int_or_hex(params['timestamp']),
extra_data=decode_hex(remove_0x_head(params['extraData'])),
gas_limit=parse_int_or_hex(params['gasLimit']),
mixhash=decode_hex(remove_0x_head(params['mixhash'])),
prevhash=decode_hex(remove_0x_head(params['parentHash'])),
coinbase=decode_hex(remove_0x_head(params['coinbase'])),
nonce=decode_hex(remove_0x_head(params['nonce'])))
assert b.difficulty == parse_int_or_hex(params['difficulty'])
assert b.timestamp == parse_int_or_hex(params['timestamp'])
assert b.extra_data == decode_hex(remove_0x_head(params['extraData']))
assert b.gas_limit == parse_int_or_hex(params['gasLimit'])
assert b.mixhash == decode_hex(remove_0x_head(params['mixhash']))
assert b.prevhash == decode_hex(remove_0x_head(params['parentHash']))
assert b.nonce == decode_hex(remove_0x_head(params['nonce']))
print(9)
if mode == FILL:
params['result'] = encode_hex(rlp.encode(b))
return params
elif mode == VERIFY:
assert params['result'] == encode_hex(rlp.encode(b))
elif mode == TIME:
return {
'creation': time.time() - x
}
def get_tests_from_file_or_dir(dname, json_only=False):
if os.path.isfile(dname):
if dname[-5:] == '.json' or not json_only:
with open(dname) as f:
return {dname: json.load(f)}
else:
return {}
else:
o = {}
for f in os.listdir(dname):
fullpath = os.path.join(dname, f)
for k, v in list(get_tests_from_file_or_dir(fullpath, True).items()):
o[k] = v
return o
def get_blocks_from_textdump(data):
if '\n' not in data:
r = rlp.decode(decode_hex(data))
if len(r[0]) != 3:
blocks = [r]
else:
blocks = r
else:
blocks = [rlp.decode(decode_hex(ln)) for ln in data.split('\n')]
return blocks
def fixture_to_bytes(value):
if isinstance(value, str):
return str_to_bytes(value)
elif isinstance(value, list):
return [fixture_to_bytes(v) for v in value]
elif isinstance(value, dict):
ret = {}
for k, v in list(value.items()):
if isinstance(k, str) and (len(k) == 40 or k[:2] == '0x'):
key = str_to_bytes(k)
else:
key = k
ret[key] = fixture_to_bytes(v)
return ret
else:
return value
def get_config_overrides(filename):
override = {}
if os.path.join('BlockchainTests', 'Homestead') in filename:
override['HOMESTEAD_FORK_BLKNUM'] = 0
elif os.path.join('BlockchainTests', 'TestNetwork') in filename:
override['HOMESTEAD_FORK_BLKNUM'] = 5
override['DAO_FORK_BLKNUM'] = 8
override['ANTI_DOS_FORK_BLKNUM'] = 10
elif os.path.join('BlockchainTests', 'EIP150') in filename:
override['HOMESTEAD_FORK_BLKNUM'] = 0
override['ANTI_DOS_FORK_BLKNUM'] = 0
override['DAO_FORK_BLKNUM'] = 2 ** 99 # not applicable
if 'bcTheDaoTest' in filename:
override['DAO_FORK_BLKNUM'] = 8
return override
def generate_test_params(testsource, metafunc, skip_func=None, exclude_func=None):
if ['filename', 'testname', 'testdata'] != metafunc.fixturenames:
return
fixtures = get_tests_from_file_or_dir(
os.path.join(fixture_path, testsource))
base_dir = os.path.dirname(os.path.dirname(__file__))
params = []
for filename, tests in fixtures.items():
if isinstance(tests, dict):
filename = os.path.relpath(filename, base_dir)
for testname, testdata in tests.items():
if exclude_func and exclude_func(filename, testname, testdata):
continue
if skip_func:
skipif = pytest.mark.skipif(
skip_func(filename, testname, testdata),
reason="Excluded"
)
params.append(skipif((filename, testname, testdata)))
else:
params.append((filename, testname, testdata))
metafunc.parametrize(
('filename', 'testname', 'testdata'),
params
)
return params
|
|
#!/usr/bin/python2
import sys
import base64
import StringIO
import xml.etree.ElementTree as ET
from PyQt4 import QtCore, QtGui, QtNetwork
class PackageTypeWindow(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
# state constants
self.Disconnected, self.Pending, self.Connected = 0, 1, 2
# the xml file being modified
self.repodata = None
# the networking handler
self.net = QtNetwork.QNetworkAccessManager(self)
self.net.finished.connect(self.handleResponse)
# the status bar
self.status = self.statusBar()
self.progress = QtGui.QProgressBar(self)
self.progress.hide()
# create the window and populate it
self.win = QtGui.QWidget(self)
self.win.setLayout(self.buildUI())
self.setCentralWidget(self.win)
# set the current state
self.stage = self.Disconnected
# title, size, and show the window
self.setWindowTitle("Artifactory Package Type Migration Tool")
self.resize(800, 600)
self.show()
# return a layout containing the contents of the window
def buildUI(self):
vbox = QtGui.QVBoxLayout()
vbox.addLayout(self.buildUrlBar())
vbox.addLayout(self.buildTypeList())
vbox.addLayout(self.buildButtonBar())
return vbox
# return a layout containing the contents of the url bar
def buildUrlBar(self):
self.urlEntry = QtGui.QLineEdit(self.win)
self.userEntry = QtGui.QLineEdit(self.win)
self.passEntry = QtGui.QLineEdit(self.win)
self.passEntry.setEchoMode(QtGui.QLineEdit.Password)
formbox = QtGui.QFormLayout()
formbox.addRow("URL", self.urlEntry)
formbox.addRow("Username", self.userEntry)
formbox.addRow("Password", self.passEntry)
return formbox
# return a layout containing the repository type list box
def buildTypeList(self):
self.model = RepositoryListModel(self.win)
self.filtermodel = MavenFilterModel(self.win)
self.filtermodel.setSourceModel(self.model)
self.typeList = QtGui.QTreeView(self.win)
self.typeList.setSortingEnabled(True)
self.typeList.setModel(self.filtermodel)
self.typeList.setItemDelegate(ComboBoxDelegate(self.typeList))
self.typeList.setEditTriggers(QtGui.QAbstractItemView.AllEditTriggers)
self.typeList.header().setMovable(False)
hbox = QtGui.QHBoxLayout()
hbox.addWidget(self.typeList)
return hbox
# return a layout containing the button bar
def buildButtonBar(self):
def toggleFilterCallback(event):
self.filtermodel.mavenOnly = event != 0
self.filtermodel.invalidateFilter()
self.toggleFilter = QtGui.QCheckBox("Maven Only", self.win)
self.toggleFilter.stateChanged.connect(toggleFilterCallback)
self.buttonExport = QtGui.QPushButton("Export", self.win)
tooltip = "Export repositories with non-default layouts"
self.buttonExport.setToolTip(tooltip)
self.buttonExport.clicked.connect(self.exportCallback)
self.buttonConnect = QtGui.QPushButton("Connect", self.win)
self.buttonConnect.clicked.connect(self.connectCallback)
self.buttonQuit = QtGui.QPushButton("Quit", self.win)
self.buttonQuit.clicked.connect(self.quitCallback)
self.buttonSave = QtGui.QPushButton("Save", self.win)
self.buttonSave.clicked.connect(self.saveCallback)
hbox = QtGui.QHBoxLayout()
hbox.addWidget(self.toggleFilter)
hbox.addStretch(1)
hbox.addWidget(self.buttonExport)
hbox.addWidget(self.buttonConnect)
hbox.addWidget(self.buttonQuit)
hbox.addWidget(self.buttonSave)
return hbox
# given a subpath, return a request object
def getRequest(self, subpath):
url = self.baseurl.resolved(QtCore.QUrl(subpath))
if url.isRelative() or not url.isValid(): return None
request = QtNetwork.QNetworkRequest(url)
request.setRawHeader("Authorization", self.auth)
return request
# when the 'Export' button is pressed
def exportCallback(self, event):
table = []
# gather every line in the table with a non-default layout
for repo in self.model.table:
line = self.model.table[repo]
if line[3] == None: continue
ptype = self.model.getPackTypes(line[1])[line[2]]
layout = self.model.layoutList[line[3]]
layouts = self.model.deflayouts
if layout not in layouts or (layout in layouts
and ptype not in layouts[layout]):
newlayout = None
# find the default layout for each package type
for lt in layouts:
if ptype in layouts[lt]:
newlayout = lt
break
table.append((repo, ptype, layout, newlayout))
# if the entire table has default layouts, don't do anything
if len(table) <= 0:
self.status.showMessage("Nothing to export", 3000)
return
# get a file to export to
title = "Export Repositories with Non-Default Layouts"
fname = QtGui.QFileDialog.getSaveFileName(self.win, title)
# save to the file
try:
with open(fname, 'w') as f:
for line in table:
ln = line[0] + " (" + line[1] + "): layout is "
ln += line[2] + ", default is " + line[3] + "\n"
f.write(ln)
self.status.showMessage("Exported successfully", 3000)
except IOError as ex: self.status.showMessage(str(ex), 3000)
# when the 'Connect' button is pressed
def connectCallback(self, event):
# if we are disconnected, connect
if self.stage == self.Disconnected:
# set the base url and auth data from the form fields
self.baseurl = QtCore.QUrl(self.urlEntry.text())
if not self.baseurl.path().endsWith("/"):
self.baseurl.setPath(self.baseurl.path() + "/")
userpass = self.userEntry.text() + ":" + self.passEntry.text()
self.auth = "Basic " + base64.b64encode(unicode(userpass), "utf-8")
# get a request for the config descriptor file
request = self.getRequest("api/system/configuration")
if request == None:
self.status.showMessage("Error: Invalid URL", 3000)
return
# set the state appropriately
self.stage = self.Pending
msg = "Querying '" + request.url().toString() + "' ..."
self.status.showMessage(msg)
# send a get request
response = self.net.get(request)
response.error.connect(self.handleNetError)
response.downloadProgress.connect(self.handleProgress)
# if we are connected, disconnect
elif self.stage == self.Connected:
# get a list of changes that have been made to the repository data
diff = self.model.diffTable()
# if changes were made, ask to discard
if len(diff) > 0:
txt = "Unsaved changes will be discarded. Really disconnect?"
msg = QtGui.QMessageBox(self.win)
msg.setWindowTitle("Warning: Unsaved Changes")
msg.setText("Warning: Unsaved Changes")
msg.setInformativeText(txt)
msg.setIcon(QtGui.QMessageBox.Warning)
msg.addButton(QtGui.QMessageBox.Ok)
msg.addButton(QtGui.QMessageBox.Cancel)
if msg.exec_() == QtGui.QMessageBox.Cancel: return
# reset everything and set the state appropriately
self.repodata = None
self.model.table = {}
self.stage = self.Disconnected
self.status.showMessage("Disconnected successfully", 3000)
# when the 'Save' button is pressed
def saveCallback(self, event):
# get a list of changes that have been made to the repository data
diff = self.model.diffTable()
# if no changes were made, do nothing more
if len(diff) < 1:
self.status.showMessage("Nothing to do", 3000)
return
# ask to overwrite Artifactory configuration
txt = "Any changes made to the Artifactory system configuration while"
txt += " this tool has been connected will be overwritten! Save anyway?"
msg = QtGui.QMessageBox(self.win)
msg.setWindowTitle("Warning")
msg.setText("Warning")
msg.setInformativeText(txt)
msg.setIcon(QtGui.QMessageBox.Warning)
msg.addButton(QtGui.QMessageBox.Ok)
msg.addButton(QtGui.QMessageBox.Cancel)
if msg.exec_() == QtGui.QMessageBox.Cancel: return
# get a request object to send the new config descriptor file
request = self.getRequest("api/system/configuration")
request.setRawHeader("Content-Type", "application/xml")
# set the state appropriately
self.stage = self.Pending
msg = "Querying '" + request.url().toString() + "' ..."
self.status.showMessage(msg)
# get the xml data ready to modify
root = self.repodata.getroot()
ns = root.tag[:root.tag.index('}') + 1]
layouts = self.model.layoutList
# find the repos that are listed to have been modified
for name in "local", "remote", "virtual":
types = self.model.getPackTypes(name)
iterator = None
try:
iterator = root.iter(ns + name + "Repository")
except AttributeError:
# depricated in Python 2.7
iterator = root.getiterator(ns + name + "Repository")
for repo in iterator:
key = repo.find(ns + "key")
if key == None or key.text not in diff: continue
# set the type and layout of each modified repo
ptype = repo.find(ns + "type")
layout = repo.find(ns + "repoLayoutRef")
if ptype != None: ptype.text = types[diff[key.text][2]]
if layout != None: layout.text = layouts[diff[key.text][3]]
# create a file-like object and write the xml to it
fobj = StringIO.StringIO()
self.repodata.write(fobj)
# send the resulting modified xml to the server
response = self.net.post(request, fobj.getvalue())
response.error.connect(self.handleNetError)
response.uploadProgress.connect(self.handleProgress)
fobj.close()
# when the 'Quit' button is pressed
def quitCallback(self, event):
# get a list of changes that have been made to the repository data
diff = self.model.diffTable()
# if changes were made, ask to discard
if self.stage == self.Connected and len(diff) > 0:
txt = "Unsaved changes will be discarded. Really quit?"
msg = QtGui.QMessageBox(self.win)
msg.setWindowTitle("Warning: Unsaved Changes")
msg.setText("Warning: Unsaved Changes")
msg.setInformativeText(txt)
msg.setIcon(QtGui.QMessageBox.Warning)
msg.addButton(QtGui.QMessageBox.Ok)
msg.addButton(QtGui.QMessageBox.Cancel)
if msg.exec_() == QtGui.QMessageBox.Cancel: return
# quit the tool
QtCore.QCoreApplication.instance().quit()
# extract the table data from the xml file
def extractXmlData(self):
layouts, data = [], {}
# get the xml data ready to read
root = self.repodata.getroot()
ns = root.tag[:root.tag.index('}') + 1]
# extract all the layout names from the file
iterator = None
try:
iterator = root.iter(ns + "repoLayout")
except AttributeError:
# depricated in Python 2.7
iterator = root.getiterator(ns + "repoLayout")
for layout in iterator:
name = layout.find(ns + "name")
if name != None: layouts.append(name.text)
layouts.sort()
# iterate over all the repositories
for name in "local", "remote", "virtual":
types = self.model.getPackTypes(name)
iterator = None
try:
iterator = root.iter(ns + name + "Repository")
except AttributeError:
# depricated in Python 2.7
iterator = root.getiterator(ns + name + "Repository")
for repo in iterator:
# extract the data from each repo entry
key = repo.find(ns + "key")
ptype = repo.find(ns + "type")
descr = repo.find(ns + "description")
layout = repo.find(ns + "repoLayoutRef")
if key == None or ptype == None: return None
if ptype.text not in types: return None
# get the description, if one exists
rdescr = None
if descr != None and descr.text != None and len(descr.text) > 0:
rdescr = descr.text
# convert the type and layout strings to their index values
rlayout = None
if layout != None and layout.text in layouts:
rlayout = layouts.index(layout.text)
rtype = types.index(ptype.text)
elem = (rdescr, name, rtype, rlayout)
# save the final data set to the dict
data[key.text] = elem
# if no repositories were found, there may have been a problem
if len(data) < 1: return None
# save the list of layouts, and return the complete dict
self.model.layoutList = layouts
return data
# when a response containing the config descriptor is recieved
def getConfigCallback(self, reply):
try:
err = None
xmlObj = None
# if there was a network error, fail and print
if reply.error() != QtNetwork.QNetworkReply.NoError:
err = self.printNetworkError(reply)
else:
try:
# create a file-like object and read the xml from it
fobj = StringIO.StringIO(str(reply.readAll()))
xmlObj = ET.parse(fobj)
fobj.close()
except IOError:
xmlObj = None
if xmlObj == None:
err = "Error: Invalid xml resource"
if err == None:
# if everything went fine, close the connection
reply.close()
self.repodata = xmlObj
# extract the data from the xml object
extractedData = self.extractXmlData()
if extractedData != None:
# if the extraction succeeded, display the new data
self.model.table = extractedData
self.stage = self.Connected
rtc = QtGui.QHeaderView.ResizeToContents
self.typeList.header().resizeSections(rtc)
self.status.showMessage("Connected successfully", 3000)
else:
# if the extraction failed, disconnect
self.repodata = None
err = "Error: Invalid or empty Artifactory config"
self.status.showMessage(err, 3000)
self.stage = self.Disconnected
else:
# abort and disconnect, something went wrong
self.repodata = None
reply.abort()
if err == None: err = "Error: Response not valid"
self.status.showMessage(err, 3000)
self.stage = self.Disconnected
except:
self.repodata = None
reply.abort()
self.stage = self.Disconnected
raise
# when a response from the config descriptor update is recieved
def postConfigCallback(self, reply):
try:
err = None
if reply.error() != QtNetwork.QNetworkReply.NoError:
# if there was a network error, fail and print
err = self.printNetworkError(reply)
reply.abort()
self.status.showMessage(err, 3000)
self.stage = self.Connected
else:
# otherwise, print success
err = "Configuration updated successfully"
reply.close()
self.status.showMessage(err, 3000)
# send a get request for the config descriptor file again
request = self.getRequest("api/system/configuration")
response = self.net.get(request)
response.error.connect(self.handleNetError)
response.downloadProgress.connect(self.handleProgress)
except:
reply.abort()
self.stage = self.Connected
raise
# when an http response is recieved
def handleResponse(self, reply):
# if the response calls for a redirect, do it
attr = QtNetwork.QNetworkRequest.RedirectionTargetAttribute
redirect = reply.attribute(attr).toUrl()
if redirect.isValid():
request = reply.request()
request.setUrl(reply.url().resolved(redirect))
response = self.net.createRequest(reply.operation(), request)
response.error.connect(self.handleNetError)
post = QtNetwork.QNetworkAccessManager.PostOperation
if reply.operation() == post:
response.uploadProgress.connect(self.handleProgress)
else:
response.downloadProgress.connect(self.handleProgress)
elif reply.operation() == QtNetwork.QNetworkAccessManager.GetOperation:
# if it's a response to a get, we're getting the descriptor
self.getConfigCallback(reply)
elif reply.operation() == QtNetwork.QNetworkAccessManager.PostOperation:
# if it's a response to a post, we're posting the descriptor
self.postConfigCallback(reply)
else: raise RuntimeError("Unanticipated HTTP response")
# extract the network error information and print it to the status bar
def printNetworkError(self, reply):
statusAttr = QtNetwork.QNetworkRequest.HttpStatusCodeAttribute
messageAttr = QtNetwork.QNetworkRequest.HttpReasonPhraseAttribute
status = reply.attribute(statusAttr).toString()
message = reply.attribute(messageAttr).toString()
return "Network Error: " + status + " '" + message + "'"
# if a network error occurs, print it somwhere
def handleNetError(self, error):
raise RuntimeError("Network Error: " + str(error))
# when progress is made in the download/upload, update the progress bar
def handleProgress(self, recvd, total):
if total < 0:
if self.progress.maximum() != 0: self.progress.setMaximum(0)
else:
if self.progress.maximum() != 100: self.progress.setMaximum(100)
self.progress.setValue((100*recvd)/total)
# stage getter
def getStage(self):
return self._stage
# stage setter, also update ui appropriately
def setStage(self, stage):
if stage == self.Disconnected:
self._stage = self.Disconnected
self.urlEntry.setEnabled(True)
self.userEntry.setEnabled(True)
self.passEntry.setEnabled(True)
self.typeList.setEnabled(False)
self.typeList.setHeaderHidden(True)
self.buttonExport.setEnabled(False)
self.buttonConnect.setText("Connect")
self.buttonConnect.setEnabled(True)
self.buttonSave.setEnabled(False)
self.status.removeWidget(self.progress)
elif stage == self.Pending:
self._stage = self.Pending
self.urlEntry.setEnabled(False)
self.userEntry.setEnabled(False)
self.passEntry.setEnabled(False)
self.typeList.setEnabled(False)
self.typeList.setHeaderHidden(True)
self.buttonExport.setEnabled(False)
self.buttonConnect.setEnabled(False)
self.buttonConnect.setText("Working ...")
self.buttonSave.setEnabled(False)
self.progress.reset()
self.progress.setRange(0, 0)
self.status.addPermanentWidget(self.progress)
self.progress.show()
elif stage == self.Connected:
self._stage = self.Connected
self.urlEntry.setEnabled(False)
self.userEntry.setEnabled(False)
self.passEntry.setEnabled(False)
self.typeList.setEnabled(True)
self.typeList.setHeaderHidden(False)
self.buttonExport.setEnabled(True)
self.buttonConnect.setText("Disconnect")
self.buttonConnect.setEnabled(True)
self.buttonSave.setEnabled(True)
self.status.removeWidget(self.progress)
else: raise RuntimeError("Improper stage state")
# connect the stage property to its getter and setter
stage = property(getStage, setStage)
# allows the QTreeView to display data properly
class RepositoryListModel(QtCore.QAbstractItemModel):
def __init__(self, parent = None):
QtCore.QAbstractItemModel.__init__(self, parent)
# get the default size hint from a combo box
# this is used to display rows of the proper height
self.comboSize = QtGui.QComboBox().sizeHint()
# list of default layout and package type pairs
self.deflayouts = {
"bower-default": ["bower"], "gradle-default": ["gradle"],
"ivy-default": ["ivy"], "maven-1-default": ["maven"],
"maven-2-default": ["maven"], "npm-default": ["npm"],
"nuget-default": ["nuget"], "sbt-default": ["sbt"],
"vcs-default": ["vcs"], "simple-default": [
"generic", "debian", "docker", "gems", "gitlfs",
"pypi", "vagrant", "yum", "p2"]}
# _table contains the currently displayed data
self._table = {}
# orig contains the original data, so we can diff them later
self.orig = {}
# keys allows us to convert between table keys and indexes
self.keys = []
# overload: get a QModelIndex from a row, col, and parent
def index(self, row, column, parent = QtCore.QModelIndex()):
if parent.isValid(): return QtCore.QModelIndex()
return self.createIndex(row, column, self.keys[row])
# overload: get the parent index given a child index
# this is always the root index, since it's just one layer
def parent(self, child):
return QtCore.QModelIndex()
# overload: get the row count
def rowCount(self, parent = QtCore.QModelIndex()):
if parent.isValid(): return 0
return len(self._table)
# overload: get the column count
def columnCount(self, parent = QtCore.QModelIndex()):
return 4
# overload: get the value of an index in a given role
def data(self, index, role = QtCore.Qt.DisplayRole):
if not index.isValid(): return None
col, ptr = index.column(), index.internalPointer()
# text to display in the table
# ptr is the key, so show that if we're in the key column
# otherwise, get the appropriate string given the index
if role == QtCore.Qt.DisplayRole:
if col == 0: return ptr
elif col == 1: return self._table[ptr][1]
else:
lst = None
if col == 2: lst = self.getPackTypes(self._table[ptr][1])
elif col == 3: lst = self.layoutList
val = self._table[ptr][col]
return "N/A" if val == None else lst[val]
# the N/A string in the virtual layout field should be grey
elif role == QtCore.Qt.ForegroundRole and col > 1:
if self._table[ptr][col] != None: return None
return QtGui.QBrush(QtCore.Qt.gray)
# index of the combobox option to display
elif role == QtCore.Qt.EditRole and col > 1:
return self._table[ptr][col]
# display an icon when the layout doesn't match the package type
elif role == QtCore.Qt.DecorationRole and col == 3:
if self._table[ptr][3] == None: return 0
ptype = self.getPackTypes(self._table[ptr][1])[self._table[ptr][2]]
layout = self.layoutList[self._table[ptr][3]]
if layout not in self.deflayouts or (layout in self.deflayouts
and ptype not in self.deflayouts[layout]):
icon = QtGui.QStyle.SP_MessageBoxInformation
return QtGui.qApp.style().standardIcon(icon)
return 0
# show a tooltip when the layout doesn't match the package type
elif role == QtCore.Qt.ToolTipRole and col == 3:
if self._table[ptr][3] == None: return None
ptype = self.getPackTypes(self._table[ptr][1])[self._table[ptr][2]]
layout = self.layoutList[self._table[ptr][3]]
if layout not in self.deflayouts or (layout in self.deflayouts
and ptype not in self.deflayouts[layout]):
return "This layout is not the default for this package type."
return None
# show a tooltip containing the repository description
elif role == QtCore.Qt.ToolTipRole and col == 0:
if self._table[ptr][0] == None: return None
return self._table[ptr][0]
# return the combobox size hint, so the rows will be big enough
elif role == QtCore.Qt.SizeHintRole and col == self.columnCount() - 1:
return self.comboSize
return None
# overload: return the displayed header string of a given column index
def headerData(self, section, orientation, role = QtCore.Qt.DisplayRole):
if orientation != QtCore.Qt.Horizontal or role != QtCore.Qt.DisplayRole:
return None
hlist = ["Repository Key", "Repository Type", "Package Type", "Layout"]
return hlist[section]
# overload: set a given index to a specified value internally
def setData(self, index, value, role = QtCore.Qt.EditRole):
col, ptr = index.column(), index.internalPointer()
if role != QtCore.Qt.EditRole or col <= 1: return False
# convert the row to a list (tuples are immutable)
data = list(self._table[ptr])
# set the proper element
data[col], _ = value.toInt()
# convert back to a tuple and save the row
self._table[ptr] = tuple(data)
return True
# overload: return flags for a given index
def flags(self, index):
val = self._table[index.internalPointer()][index.column()]
# key names are not editable
# null values are also not editable
if index.column() > 1 and val != None:
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsEditable
return QtCore.Qt.ItemIsEnabled
# given an index, return a list of available package types
def getPackTypesList(self, index):
if not index.isValid(): return None
ptr = index.internalPointer()
return self.getPackTypes(self._table[ptr][1])
# given a repo type, return a list of available package types
def getPackTypes(self, repoType, packTypesMap = {}):
# memoize these, they aren't going to change
if repoType in packTypesMap: return packTypesMap[repoType]
# all the types that any repo can have
types = ["bower", "docker", "gems", "gradle", "ivy", "maven", "npm",
"nuget", "pypi", "sbt"]
# all the types that local repos can have
if repoType == "local":
types.extend(["debian", "gitlfs", "vagrant", "yum"])
# all the types that remote repos can have
elif repoType == "remote":
types.extend(["debian", "p2", "vcs", "yum"])
# all the types that virtual repos can have
elif repoType == "virtual":
types.extend(["p2"])
else: raise ValueError("Incorrect package type: " + repoType)
# sort them alphabetically and append generic to the end
# this is how they are in the Artifactory ui
types.sort()
types.append("generic")
packTypesMap[repoType] = types
return types
# list all the rows that have been modified
def diffTable(self):
diff = {}
for key in self.keys:
if self._table[key] != self.orig[key]: diff[key] = self._table[key]
return diff
# table getter
def getTable(self):
return self._table
# table setter, also set orig and keys appropriately
# also send reset signals so the QTreeView updates properly
def setTable(self, tab):
try:
self.beginResetModel()
self._table = tab.copy()
self.orig = tab.copy()
self.keys = tab.keys()
self.endResetModel()
except AttributeError:
# Fix for PyQt < 4.6
self._table = tab.copy()
self.orig = tab.copy()
self.keys = tab.keys()
self.reset()
# connect the table property to its getter and setter
table = property(getTable, setTable)
# allows for filtering of non-maven repositories from the list
class MavenFilterModel(QtGui.QSortFilterProxyModel):
def __init__(self, parent = None):
QtGui.QSortFilterProxyModel.__init__(self, parent)
self.source = None
# whether to filter non-maven repos from the list
self.mavenOnly = False
# overload: get the source model so we can access it later
def setSourceModel(self, model):
QtGui.QSortFilterProxyModel.setSourceModel(self, model)
self.source = model
# overload: filter all the non-maven rows, only when the filter is enabled
def filterAcceptsRow(self, row, parent):
if not self.mavenOnly: return True
rowdata = self.source.table[self.source.keys[row]]
mvnidx = self.source.getPackTypes(rowdata[1]).index("maven")
return rowdata[2] == mvnidx
# allow the delegate to access the getPackTypesList function
def getPackTypesList(self, index):
return self.source.getPackTypesList(self.mapToSource(index))
# allow the delegate to access the layoutList property
def getLayoutList(self):
return self.source.layoutList
layoutList = property(getLayoutList)
# allows data to be modified properly in the QTreeView
class ComboBoxDelegate(QtGui.QStyledItemDelegate):
def __init__(self, parent = None):
QtGui.QStyledItemDelegate.__init__(self, parent)
# overload: create an editor widget for a given field
def createEditor(self, parent, option, index):
col, model = index.column(), index.model()
if col <= 1:
par = QtGui.QStyledItemDelegate
return par.createEditor(self, parent, option, index)
# all of these are editable with a combobox
widget = QtGui.QComboBox(parent)
widget.setAutoFillBackground(True)
widget.setFocusPolicy(QtCore.Qt.StrongFocus)
# comboboxes need lists of items to display
lst = None
if col == 2: lst = model.getPackTypesList(index)
elif col == 3: lst = model.layoutList
widget.addItems(lst)
return widget
# overload: set an editor widget to display the data in a given index
def setEditorData(self, editor, index):
if index.column() <= 1: return
data, _ = index.data(QtCore.Qt.EditRole).toInt()
editor.setCurrentIndex(data)
# overload: modify the internal data to reflect the editor widget state
def setModelData(self, editor, model, index):
if index.column() <= 1: return
data = QtCore.QVariant(editor.currentIndex())
model.setData(index, data)
if index.column() == 2 and editor.currentText() != "maven":
try: model.invalidateFilter()
except AttributeError: pass
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
window = PackageTypeWindow()
sys.exit(app.exec_())
|
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper library for visualizations.
TODO(googleuser): Find a more reliable way to serve stuff from IPython
notebooks (e.g. determining where the root notebook directory is).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import uuid
from google.protobuf import json_format
from dragnn.protos import trace_pb2
# Make a guess about where the IPython kernel root is.
_IPYTHON_KERNEL_PATH = os.path.realpath(os.getcwd())
# Bazel uses the 'data' attribute for this library to ensure viz.min.js.gz is
# packaged.
module_path = os.path.dirname(os.path.abspath(__file__))
viz_script = os.path.join(os.path.dirname(module_path), 'viz', 'viz.min.js.gz')
def _load_viz_script():
"""Reads the bundled visualization script.
Raises:
EnvironmentError: If the visualization script could not be found.
Returns:
str JavaScript source code.
"""
if not os.path.isfile(viz_script):
raise EnvironmentError(
'Visualization script should be built into {}'.format(viz_script))
with gzip.GzipFile(viz_script) as f:
return f.read()
def parse_trace_json(trace):
"""Converts a binary-encoded MasterTrace proto to a JSON parser trace.
Args:
trace: Binary string containing a MasterTrace.
Returns:
JSON str, as expected by visualization tools.
"""
as_proto = trace_pb2.MasterTrace.FromString(trace)
# Sanitize non-UTF8 captions. One case where this occurs is for byte LSTMs,
# which may be processing a sub-sequence of a UTF-8 multi-byte sequence.
for component_trace in as_proto.component_trace:
for step_trace in component_trace.step_trace:
if isinstance(step_trace.caption, str):
try:
unicode(step_trace.caption, 'utf-8')
except UnicodeDecodeError:
step_trace.caption = repr(step_trace.caption) # Safe encoding.
as_json = json_format.MessageToJson(
as_proto, preserving_proto_field_name=True)
return as_json
def _optional_master_spec_json(master_spec):
"""Helper function to return 'null' or a master spec JSON string."""
if master_spec is None:
return 'null'
else:
return json_format.MessageToJson(
master_spec, preserving_proto_field_name=True)
def _container_div(height='700px', contents=''):
elt_id = str(uuid.uuid4())
html = """
<div id="{elt_id}" style="width: 100%; min-width: 200px; height: {height};">
{contents}</div>
""".format(
elt_id=elt_id, height=height, contents=contents)
return elt_id, html
def trace_html(trace,
convert_to_unicode=True,
height='700px',
script=None,
master_spec=None):
"""Generates HTML that will render a master trace.
This will result in a self-contained "div" element.
Args:
trace: binary-encoded MasterTrace string.
convert_to_unicode: Whether to convert the output to unicode. Defaults to
True because IPython.display.HTML expects unicode, and we expect users to
often pass the output of this function to IPython.display.HTML.
height: CSS string representing the height of the element, default '700px'.
script: Visualization script contents, if the defaults are unacceptable.
master_spec: Master spec proto (parsed), which can improve the layout. May
be required in future versions.
Returns:
unicode or str with HTML contents.
"""
if script is None:
script = _load_viz_script()
json_trace = parse_trace_json(trace)
elt_id, div_html = _container_div(height=height)
as_str = """
<meta charset="utf-8"/>
{div_html}
<script type='text/javascript'>
{script}
visualizeToDiv({json}, "{elt_id}", {master_spec_json});
</script>
""".format(
script=script,
json=json_trace,
master_spec_json=_optional_master_spec_json(master_spec),
elt_id=elt_id,
div_html=div_html)
return unicode(as_str, 'utf-8') if convert_to_unicode else as_str
def open_in_new_window(html, notebook_html_fcn=None, temp_file_basename=None):
"""Opens an HTML visualization in a new window.
This function assumes that the module was loaded when the current working
directory is the IPython/Jupyter notebook root directory. Then it writes a
file ./tmp/_new_window_html/<random-uuid>.html, and returns an HTML display
element, which will call `window.open("/files/<filename>")`. This works
because IPython serves files from the /files root.
Args:
html: HTML to write to a file.
notebook_html_fcn: Function to generate an HTML element; defaults to
IPython.display.HTML (lazily imported).
temp_file_basename: File name to write (defaults to <random-uuid>.html).
Returns:
HTML notebook element, which will trigger the browser to open a new window.
"""
if isinstance(html, unicode):
html = html.encode('utf-8')
if notebook_html_fcn is None:
from IPython import display
notebook_html_fcn = display.HTML
if temp_file_basename is None:
temp_file_basename = '{}.html'.format(str(uuid.uuid4()))
rel_path = os.path.join('tmp', '_new_window_html', temp_file_basename)
abs_path = os.path.join(_IPYTHON_KERNEL_PATH, rel_path)
# Write the file, creating the directory if it doesn't exist.
if not os.path.isdir(os.path.dirname(abs_path)):
os.makedirs(os.path.dirname(abs_path))
with open(abs_path, 'w') as f:
f.write(html)
return notebook_html_fcn("""
<script type='text/javascript'>
window.open("/files/{}");
</script>
""".format(rel_path))
class InteractiveVisualization(object):
"""Helper class for displaying visualizations interactively.
See usage in examples/dragnn/interactive_text_analyzer.ipynb.
"""
def initial_html(self, height='700px', script=None, init_message=None):
"""Returns HTML for a container, which will be populated later.
Args:
height: CSS string representing the height of the element, default
'700px'.
script: Visualization script contents, if the defaults are unacceptable.
init_message: Initial message to display.
Returns:
unicode with HTML contents.
"""
if script is None:
script = _load_viz_script()
if init_message is None:
init_message = 'Type a sentence and press (enter) to see the trace.'
self.elt_id, div_html = _container_div(
height=height, contents='<strong>{}</strong>'.format(init_message))
html = """
<meta charset="utf-8"/>
{div_html}
<script type='text/javascript'>
{script}
</script>
""".format(
script=script, div_html=div_html)
return unicode(html, 'utf-8') # IPython expects unicode.
def show_trace(self, trace, master_spec=None):
"""Returns a JS script HTML fragment, which will populate the container.
Args:
trace: binary-encoded MasterTrace string.
master_spec: Master spec proto (parsed), which can improve the layout. May
be required in future versions.
Returns:
unicode with HTML contents.
"""
html = """
<meta charset="utf-8"/>
<script type='text/javascript'>
document.getElementById("{elt_id}").innerHTML = ""; // Clear previous.
visualizeToDiv({json}, "{elt_id}", {master_spec_json});
</script>
""".format(
json=parse_trace_json(trace),
master_spec_json=_optional_master_spec_json(master_spec),
elt_id=self.elt_id)
return unicode(html, 'utf-8') # IPython expects unicode.
|
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Representation of type information for DBCore model fields.
"""
from __future__ import division, absolute_import, print_function
from . import query
from beets.util import str2bool
import six
if not six.PY2:
buffer = memoryview # sqlite won't accept memoryview in python 2
# Abstract base.
class Type(object):
"""An object encapsulating the type of a model field. Includes
information about how to store, query, format, and parse a given
field.
"""
sql = u'TEXT'
"""The SQLite column type for the value.
"""
query = query.SubstringQuery
"""The `Query` subclass to be used when querying the field.
"""
model_type = six.text_type
"""The Python type that is used to represent the value in the model.
The model is guaranteed to return a value of this type if the field
is accessed. To this end, the constructor is used by the `normalize`
and `from_sql` methods and the `default` property.
"""
@property
def null(self):
"""The value to be exposed when the underlying value is None.
"""
return self.model_type()
def format(self, value):
"""Given a value of this type, produce a Unicode string
representing the value. This is used in template evaluation.
"""
if value is None:
value = self.null
# `self.null` might be `None`
if value is None:
value = u''
if isinstance(value, bytes):
value = value.decode('utf-8', 'ignore')
return six.text_type(value)
def parse(self, string):
"""Parse a (possibly human-written) string and return the
indicated value of this type.
"""
try:
return self.model_type(string)
except ValueError:
return self.null
def normalize(self, value):
"""Given a value that will be assigned into a field of this
type, normalize the value to have the appropriate type. This
base implementation only reinterprets `None`.
"""
if value is None:
return self.null
else:
# TODO This should eventually be replaced by
# `self.model_type(value)`
return value
def from_sql(self, sql_value):
"""Receives the value stored in the SQL backend and return the
value to be stored in the model.
For fixed fields the type of `value` is determined by the column
type affinity given in the `sql` property and the SQL to Python
mapping of the database adapter. For more information see:
https://www.sqlite.org/datatype3.html
https://docs.python.org/2/library/sqlite3.html#sqlite-and-python-types
Flexible fields have the type affinity `TEXT`. This means the
`sql_value` is either a `buffer`/`memoryview` or a `unicode` object`
and the method must handle these in addition.
"""
if isinstance(sql_value, buffer):
sql_value = bytes(sql_value).decode('utf-8', 'ignore')
if isinstance(sql_value, six.text_type):
return self.parse(sql_value)
else:
return self.normalize(sql_value)
def to_sql(self, model_value):
"""Convert a value as stored in the model object to a value used
by the database adapter.
"""
return model_value
# Reusable types.
class Default(Type):
null = None
class Integer(Type):
"""A basic integer type.
"""
sql = u'INTEGER'
query = query.NumericQuery
model_type = int
class PaddedInt(Integer):
"""An integer field that is formatted with a given number of digits,
padded with zeroes.
"""
def __init__(self, digits):
self.digits = digits
def format(self, value):
return u'{0:0{1}d}'.format(value or 0, self.digits)
class NullPaddedInt(PaddedInt):
"""Same as `PaddedInt`, but does not normalize `None` to `0.0`.
"""
null = None
class ScaledInt(Integer):
"""An integer whose formatting operation scales the number by a
constant and adds a suffix. Good for units with large magnitudes.
"""
def __init__(self, unit, suffix=u''):
self.unit = unit
self.suffix = suffix
def format(self, value):
return u'{0}{1}'.format((value or 0) // self.unit, self.suffix)
class Id(Integer):
"""An integer used as the row id or a foreign key in a SQLite table.
This type is nullable: None values are not translated to zero.
"""
null = None
def __init__(self, primary=True):
if primary:
self.sql = u'INTEGER PRIMARY KEY'
class Float(Type):
"""A basic floating-point type. The `digits` parameter specifies how
many decimal places to use in the human-readable representation.
"""
sql = u'REAL'
query = query.NumericQuery
model_type = float
def __init__(self, digits=1):
self.digits = digits
def format(self, value):
return u'{0:.{1}f}'.format(value or 0, self.digits)
class NullFloat(Float):
"""Same as `Float`, but does not normalize `None` to `0.0`.
"""
null = None
class String(Type):
"""A Unicode string type.
"""
sql = u'TEXT'
query = query.SubstringQuery
class Boolean(Type):
"""A boolean type.
"""
sql = u'INTEGER'
query = query.BooleanQuery
model_type = bool
def format(self, value):
return six.text_type(bool(value))
def parse(self, string):
return str2bool(string)
# Shared instances of common types.
DEFAULT = Default()
INTEGER = Integer()
PRIMARY_ID = Id(True)
FOREIGN_ID = Id(False)
FLOAT = Float()
NULL_FLOAT = NullFloat()
STRING = String()
BOOLEAN = Boolean()
|
|
# Python stubs generated by omniidl from /usr/local/share/idl/omniORB/COS/CosLifeCycle.idl
# DO NOT EDIT THIS FILE!
import omniORB, _omnipy
from omniORB import CORBA, PortableServer
_0_CORBA = CORBA
_omnipy.checkVersion(4,2, __file__, 1)
try:
property
except NameError:
def property(*args):
return None
# #include "CosNaming.idl"
import CosNaming_idl
_0_CosNaming = omniORB.openModule("CosNaming")
_0_CosNaming__POA = omniORB.openModule("CosNaming__POA")
#
# Start of module "CosLifeCycle"
#
__name__ = "CosLifeCycle"
_0_CosLifeCycle = omniORB.openModule("CosLifeCycle", r"/usr/local/share/idl/omniORB/COS/CosLifeCycle.idl")
_0_CosLifeCycle__POA = omniORB.openModule("CosLifeCycle__POA", r"/usr/local/share/idl/omniORB/COS/CosLifeCycle.idl")
# typedef ... Key
class Key:
_NP_RepositoryId = "IDL:omg.org/CosLifeCycle/Key:1.0"
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_0_CosLifeCycle.Key = Key
_0_CosLifeCycle._d_Key = omniORB.typeMapping["IDL:omg.org/CosNaming/Name:1.0"]
_0_CosLifeCycle._ad_Key = (omniORB.tcInternal.tv_alias, Key._NP_RepositoryId, "Key", omniORB.typeCodeMapping["IDL:omg.org/CosNaming/Name:1.0"]._d)
_0_CosLifeCycle._tc_Key = omniORB.tcInternal.createTypeCode(_0_CosLifeCycle._ad_Key)
omniORB.registerType(Key._NP_RepositoryId, _0_CosLifeCycle._ad_Key, _0_CosLifeCycle._tc_Key)
del Key
# typedef ... Factory
class Factory:
_NP_RepositoryId = "IDL:omg.org/CosLifeCycle/Factory:1.0"
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_0_CosLifeCycle.Factory = Factory
_0_CosLifeCycle._d_Factory = omniORB.typeMapping["IDL:omg.org/CORBA/Object:1.0"]
_0_CosLifeCycle._ad_Factory = (omniORB.tcInternal.tv_alias, Factory._NP_RepositoryId, "Factory", omniORB.typeMapping["IDL:omg.org/CORBA/Object:1.0"])
_0_CosLifeCycle._tc_Factory = omniORB.tcInternal.createTypeCode(_0_CosLifeCycle._ad_Factory)
omniORB.registerType(Factory._NP_RepositoryId, _0_CosLifeCycle._ad_Factory, _0_CosLifeCycle._tc_Factory)
del Factory
# typedef ... Factories
class Factories:
_NP_RepositoryId = "IDL:omg.org/CosLifeCycle/Factories:1.0"
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_0_CosLifeCycle.Factories = Factories
_0_CosLifeCycle._d_Factories = (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosLifeCycle/Factory:1.0"], 0)
_0_CosLifeCycle._ad_Factories = (omniORB.tcInternal.tv_alias, Factories._NP_RepositoryId, "Factories", (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosLifeCycle/Factory:1.0"], 0))
_0_CosLifeCycle._tc_Factories = omniORB.tcInternal.createTypeCode(_0_CosLifeCycle._ad_Factories)
omniORB.registerType(Factories._NP_RepositoryId, _0_CosLifeCycle._ad_Factories, _0_CosLifeCycle._tc_Factories)
del Factories
# struct NVP
_0_CosLifeCycle.NVP = omniORB.newEmptyClass()
class NVP (omniORB.StructBase):
_NP_RepositoryId = "IDL:omg.org/CosLifeCycle/NVP:1.0"
def __init__(self, name, value):
self.name = name
self.value = value
_0_CosLifeCycle.NVP = NVP
_0_CosLifeCycle._d_NVP = (omniORB.tcInternal.tv_struct, NVP, NVP._NP_RepositoryId, "NVP", "name", omniORB.typeMapping["IDL:omg.org/CosNaming/Istring:1.0"], "value", omniORB.tcInternal.tv_any)
_0_CosLifeCycle._tc_NVP = omniORB.tcInternal.createTypeCode(_0_CosLifeCycle._d_NVP)
omniORB.registerType(NVP._NP_RepositoryId, _0_CosLifeCycle._d_NVP, _0_CosLifeCycle._tc_NVP)
del NVP
# typedef ... NameValuePair
class NameValuePair (_0_CosLifeCycle.NVP):
_NP_RepositoryId = "IDL:omg.org/CosLifeCycle/NameValuePair:1.0"
_0_CosLifeCycle.NameValuePair = NameValuePair
_0_CosLifeCycle._d_NameValuePair = omniORB.typeMapping["IDL:omg.org/CosLifeCycle/NVP:1.0"]
_0_CosLifeCycle._ad_NameValuePair = (omniORB.tcInternal.tv_alias, NameValuePair._NP_RepositoryId, "NameValuePair", omniORB.typeMapping["IDL:omg.org/CosLifeCycle/NVP:1.0"])
_0_CosLifeCycle._tc_NameValuePair = omniORB.tcInternal.createTypeCode(_0_CosLifeCycle._ad_NameValuePair)
omniORB.registerType(NameValuePair._NP_RepositoryId, _0_CosLifeCycle._ad_NameValuePair, _0_CosLifeCycle._tc_NameValuePair)
del NameValuePair
# typedef ... Criteria
class Criteria:
_NP_RepositoryId = "IDL:omg.org/CosLifeCycle/Criteria:1.0"
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_0_CosLifeCycle.Criteria = Criteria
_0_CosLifeCycle._d_Criteria = (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosLifeCycle/NameValuePair:1.0"], 0)
_0_CosLifeCycle._ad_Criteria = (omniORB.tcInternal.tv_alias, Criteria._NP_RepositoryId, "Criteria", (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosLifeCycle/NameValuePair:1.0"], 0))
_0_CosLifeCycle._tc_Criteria = omniORB.tcInternal.createTypeCode(_0_CosLifeCycle._ad_Criteria)
omniORB.registerType(Criteria._NP_RepositoryId, _0_CosLifeCycle._ad_Criteria, _0_CosLifeCycle._tc_Criteria)
del Criteria
# exception NoFactory
_0_CosLifeCycle.NoFactory = omniORB.newEmptyClass()
class NoFactory (CORBA.UserException):
_NP_RepositoryId = "IDL:omg.org/CosLifeCycle/NoFactory:1.0"
def __init__(self, search_key):
CORBA.UserException.__init__(self, search_key)
self.search_key = search_key
_0_CosLifeCycle.NoFactory = NoFactory
_0_CosLifeCycle._d_NoFactory = (omniORB.tcInternal.tv_except, NoFactory, NoFactory._NP_RepositoryId, "NoFactory", "search_key", omniORB.typeMapping["IDL:omg.org/CosLifeCycle/Key:1.0"])
_0_CosLifeCycle._tc_NoFactory = omniORB.tcInternal.createTypeCode(_0_CosLifeCycle._d_NoFactory)
omniORB.registerType(NoFactory._NP_RepositoryId, _0_CosLifeCycle._d_NoFactory, _0_CosLifeCycle._tc_NoFactory)
del NoFactory
# exception NotCopyable
_0_CosLifeCycle.NotCopyable = omniORB.newEmptyClass()
class NotCopyable (CORBA.UserException):
_NP_RepositoryId = "IDL:omg.org/CosLifeCycle/NotCopyable:1.0"
def __init__(self, reason):
CORBA.UserException.__init__(self, reason)
self.reason = reason
_0_CosLifeCycle.NotCopyable = NotCopyable
_0_CosLifeCycle._d_NotCopyable = (omniORB.tcInternal.tv_except, NotCopyable, NotCopyable._NP_RepositoryId, "NotCopyable", "reason", (omniORB.tcInternal.tv_string,0))
_0_CosLifeCycle._tc_NotCopyable = omniORB.tcInternal.createTypeCode(_0_CosLifeCycle._d_NotCopyable)
omniORB.registerType(NotCopyable._NP_RepositoryId, _0_CosLifeCycle._d_NotCopyable, _0_CosLifeCycle._tc_NotCopyable)
del NotCopyable
# exception NotMovable
_0_CosLifeCycle.NotMovable = omniORB.newEmptyClass()
class NotMovable (CORBA.UserException):
_NP_RepositoryId = "IDL:omg.org/CosLifeCycle/NotMovable:1.0"
def __init__(self, reason):
CORBA.UserException.__init__(self, reason)
self.reason = reason
_0_CosLifeCycle.NotMovable = NotMovable
_0_CosLifeCycle._d_NotMovable = (omniORB.tcInternal.tv_except, NotMovable, NotMovable._NP_RepositoryId, "NotMovable", "reason", (omniORB.tcInternal.tv_string,0))
_0_CosLifeCycle._tc_NotMovable = omniORB.tcInternal.createTypeCode(_0_CosLifeCycle._d_NotMovable)
omniORB.registerType(NotMovable._NP_RepositoryId, _0_CosLifeCycle._d_NotMovable, _0_CosLifeCycle._tc_NotMovable)
del NotMovable
# exception NotRemovable
_0_CosLifeCycle.NotRemovable = omniORB.newEmptyClass()
class NotRemovable (CORBA.UserException):
_NP_RepositoryId = "IDL:omg.org/CosLifeCycle/NotRemovable:1.0"
def __init__(self, reason):
CORBA.UserException.__init__(self, reason)
self.reason = reason
_0_CosLifeCycle.NotRemovable = NotRemovable
_0_CosLifeCycle._d_NotRemovable = (omniORB.tcInternal.tv_except, NotRemovable, NotRemovable._NP_RepositoryId, "NotRemovable", "reason", (omniORB.tcInternal.tv_string,0))
_0_CosLifeCycle._tc_NotRemovable = omniORB.tcInternal.createTypeCode(_0_CosLifeCycle._d_NotRemovable)
omniORB.registerType(NotRemovable._NP_RepositoryId, _0_CosLifeCycle._d_NotRemovable, _0_CosLifeCycle._tc_NotRemovable)
del NotRemovable
# exception InvalidCriteria
_0_CosLifeCycle.InvalidCriteria = omniORB.newEmptyClass()
class InvalidCriteria (CORBA.UserException):
_NP_RepositoryId = "IDL:omg.org/CosLifeCycle/InvalidCriteria:1.0"
def __init__(self, invalid_criteria):
CORBA.UserException.__init__(self, invalid_criteria)
self.invalid_criteria = invalid_criteria
_0_CosLifeCycle.InvalidCriteria = InvalidCriteria
_0_CosLifeCycle._d_InvalidCriteria = (omniORB.tcInternal.tv_except, InvalidCriteria, InvalidCriteria._NP_RepositoryId, "InvalidCriteria", "invalid_criteria", omniORB.typeMapping["IDL:omg.org/CosLifeCycle/Criteria:1.0"])
_0_CosLifeCycle._tc_InvalidCriteria = omniORB.tcInternal.createTypeCode(_0_CosLifeCycle._d_InvalidCriteria)
omniORB.registerType(InvalidCriteria._NP_RepositoryId, _0_CosLifeCycle._d_InvalidCriteria, _0_CosLifeCycle._tc_InvalidCriteria)
del InvalidCriteria
# exception CannotMeetCriteria
_0_CosLifeCycle.CannotMeetCriteria = omniORB.newEmptyClass()
class CannotMeetCriteria (CORBA.UserException):
_NP_RepositoryId = "IDL:omg.org/CosLifeCycle/CannotMeetCriteria:1.0"
def __init__(self, unmet_criteria):
CORBA.UserException.__init__(self, unmet_criteria)
self.unmet_criteria = unmet_criteria
_0_CosLifeCycle.CannotMeetCriteria = CannotMeetCriteria
_0_CosLifeCycle._d_CannotMeetCriteria = (omniORB.tcInternal.tv_except, CannotMeetCriteria, CannotMeetCriteria._NP_RepositoryId, "CannotMeetCriteria", "unmet_criteria", omniORB.typeMapping["IDL:omg.org/CosLifeCycle/Criteria:1.0"])
_0_CosLifeCycle._tc_CannotMeetCriteria = omniORB.tcInternal.createTypeCode(_0_CosLifeCycle._d_CannotMeetCriteria)
omniORB.registerType(CannotMeetCriteria._NP_RepositoryId, _0_CosLifeCycle._d_CannotMeetCriteria, _0_CosLifeCycle._tc_CannotMeetCriteria)
del CannotMeetCriteria
# interface FactoryFinder
_0_CosLifeCycle._d_FactoryFinder = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosLifeCycle/FactoryFinder:1.0", "FactoryFinder")
omniORB.typeMapping["IDL:omg.org/CosLifeCycle/FactoryFinder:1.0"] = _0_CosLifeCycle._d_FactoryFinder
_0_CosLifeCycle.FactoryFinder = omniORB.newEmptyClass()
class FactoryFinder :
_NP_RepositoryId = _0_CosLifeCycle._d_FactoryFinder[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_CosLifeCycle.FactoryFinder = FactoryFinder
_0_CosLifeCycle._tc_FactoryFinder = omniORB.tcInternal.createTypeCode(_0_CosLifeCycle._d_FactoryFinder)
omniORB.registerType(FactoryFinder._NP_RepositoryId, _0_CosLifeCycle._d_FactoryFinder, _0_CosLifeCycle._tc_FactoryFinder)
# FactoryFinder operations and attributes
FactoryFinder._d_find_factories = ((omniORB.typeMapping["IDL:omg.org/CosLifeCycle/Key:1.0"], ), (omniORB.typeMapping["IDL:omg.org/CosLifeCycle/Factories:1.0"], ), {_0_CosLifeCycle.NoFactory._NP_RepositoryId: _0_CosLifeCycle._d_NoFactory})
# FactoryFinder object reference
class _objref_FactoryFinder (CORBA.Object):
_NP_RepositoryId = FactoryFinder._NP_RepositoryId
def __init__(self, obj):
CORBA.Object.__init__(self, obj)
def find_factories(self, *args):
return self._obj.invoke("find_factories", _0_CosLifeCycle.FactoryFinder._d_find_factories, args)
omniORB.registerObjref(FactoryFinder._NP_RepositoryId, _objref_FactoryFinder)
_0_CosLifeCycle._objref_FactoryFinder = _objref_FactoryFinder
del FactoryFinder, _objref_FactoryFinder
# FactoryFinder skeleton
__name__ = "CosLifeCycle__POA"
class FactoryFinder (PortableServer.Servant):
_NP_RepositoryId = _0_CosLifeCycle.FactoryFinder._NP_RepositoryId
_omni_op_d = {"find_factories": _0_CosLifeCycle.FactoryFinder._d_find_factories}
FactoryFinder._omni_skeleton = FactoryFinder
_0_CosLifeCycle__POA.FactoryFinder = FactoryFinder
omniORB.registerSkeleton(FactoryFinder._NP_RepositoryId, FactoryFinder)
del FactoryFinder
__name__ = "CosLifeCycle"
# interface LifeCycleObject
_0_CosLifeCycle._d_LifeCycleObject = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosLifeCycle/LifeCycleObject:1.0", "LifeCycleObject")
omniORB.typeMapping["IDL:omg.org/CosLifeCycle/LifeCycleObject:1.0"] = _0_CosLifeCycle._d_LifeCycleObject
_0_CosLifeCycle.LifeCycleObject = omniORB.newEmptyClass()
class LifeCycleObject :
_NP_RepositoryId = _0_CosLifeCycle._d_LifeCycleObject[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_CosLifeCycle.LifeCycleObject = LifeCycleObject
_0_CosLifeCycle._tc_LifeCycleObject = omniORB.tcInternal.createTypeCode(_0_CosLifeCycle._d_LifeCycleObject)
omniORB.registerType(LifeCycleObject._NP_RepositoryId, _0_CosLifeCycle._d_LifeCycleObject, _0_CosLifeCycle._tc_LifeCycleObject)
# LifeCycleObject operations and attributes
LifeCycleObject._d_copy = ((omniORB.typeMapping["IDL:omg.org/CosLifeCycle/FactoryFinder:1.0"], omniORB.typeMapping["IDL:omg.org/CosLifeCycle/Criteria:1.0"]), (omniORB.typeMapping["IDL:omg.org/CosLifeCycle/LifeCycleObject:1.0"], ), {_0_CosLifeCycle.NoFactory._NP_RepositoryId: _0_CosLifeCycle._d_NoFactory, _0_CosLifeCycle.NotCopyable._NP_RepositoryId: _0_CosLifeCycle._d_NotCopyable, _0_CosLifeCycle.InvalidCriteria._NP_RepositoryId: _0_CosLifeCycle._d_InvalidCriteria, _0_CosLifeCycle.CannotMeetCriteria._NP_RepositoryId: _0_CosLifeCycle._d_CannotMeetCriteria})
LifeCycleObject._d_move = ((omniORB.typeMapping["IDL:omg.org/CosLifeCycle/FactoryFinder:1.0"], omniORB.typeMapping["IDL:omg.org/CosLifeCycle/Criteria:1.0"]), (), {_0_CosLifeCycle.NoFactory._NP_RepositoryId: _0_CosLifeCycle._d_NoFactory, _0_CosLifeCycle.NotMovable._NP_RepositoryId: _0_CosLifeCycle._d_NotMovable, _0_CosLifeCycle.InvalidCriteria._NP_RepositoryId: _0_CosLifeCycle._d_InvalidCriteria, _0_CosLifeCycle.CannotMeetCriteria._NP_RepositoryId: _0_CosLifeCycle._d_CannotMeetCriteria})
LifeCycleObject._d_remove = ((), (), {_0_CosLifeCycle.NotRemovable._NP_RepositoryId: _0_CosLifeCycle._d_NotRemovable})
# LifeCycleObject object reference
class _objref_LifeCycleObject (CORBA.Object):
_NP_RepositoryId = LifeCycleObject._NP_RepositoryId
def __init__(self, obj):
CORBA.Object.__init__(self, obj)
def copy(self, *args):
return self._obj.invoke("copy", _0_CosLifeCycle.LifeCycleObject._d_copy, args)
def move(self, *args):
return self._obj.invoke("move", _0_CosLifeCycle.LifeCycleObject._d_move, args)
def remove(self, *args):
return self._obj.invoke("remove", _0_CosLifeCycle.LifeCycleObject._d_remove, args)
omniORB.registerObjref(LifeCycleObject._NP_RepositoryId, _objref_LifeCycleObject)
_0_CosLifeCycle._objref_LifeCycleObject = _objref_LifeCycleObject
del LifeCycleObject, _objref_LifeCycleObject
# LifeCycleObject skeleton
__name__ = "CosLifeCycle__POA"
class LifeCycleObject (PortableServer.Servant):
_NP_RepositoryId = _0_CosLifeCycle.LifeCycleObject._NP_RepositoryId
_omni_op_d = {"copy": _0_CosLifeCycle.LifeCycleObject._d_copy, "move": _0_CosLifeCycle.LifeCycleObject._d_move, "remove": _0_CosLifeCycle.LifeCycleObject._d_remove}
LifeCycleObject._omni_skeleton = LifeCycleObject
_0_CosLifeCycle__POA.LifeCycleObject = LifeCycleObject
omniORB.registerSkeleton(LifeCycleObject._NP_RepositoryId, LifeCycleObject)
del LifeCycleObject
__name__ = "CosLifeCycle"
# interface GenericFactory
_0_CosLifeCycle._d_GenericFactory = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosLifeCycle/GenericFactory:1.0", "GenericFactory")
omniORB.typeMapping["IDL:omg.org/CosLifeCycle/GenericFactory:1.0"] = _0_CosLifeCycle._d_GenericFactory
_0_CosLifeCycle.GenericFactory = omniORB.newEmptyClass()
class GenericFactory :
_NP_RepositoryId = _0_CosLifeCycle._d_GenericFactory[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_CosLifeCycle.GenericFactory = GenericFactory
_0_CosLifeCycle._tc_GenericFactory = omniORB.tcInternal.createTypeCode(_0_CosLifeCycle._d_GenericFactory)
omniORB.registerType(GenericFactory._NP_RepositoryId, _0_CosLifeCycle._d_GenericFactory, _0_CosLifeCycle._tc_GenericFactory)
# GenericFactory operations and attributes
GenericFactory._d_supports = ((omniORB.typeMapping["IDL:omg.org/CosLifeCycle/Key:1.0"], ), (omniORB.tcInternal.tv_boolean, ), None)
GenericFactory._d_create_object = ((omniORB.typeMapping["IDL:omg.org/CosLifeCycle/Key:1.0"], omniORB.typeMapping["IDL:omg.org/CosLifeCycle/Criteria:1.0"]), (omniORB.typeMapping["IDL:omg.org/CORBA/Object:1.0"], ), {_0_CosLifeCycle.NoFactory._NP_RepositoryId: _0_CosLifeCycle._d_NoFactory, _0_CosLifeCycle.InvalidCriteria._NP_RepositoryId: _0_CosLifeCycle._d_InvalidCriteria, _0_CosLifeCycle.CannotMeetCriteria._NP_RepositoryId: _0_CosLifeCycle._d_CannotMeetCriteria})
# GenericFactory object reference
class _objref_GenericFactory (CORBA.Object):
_NP_RepositoryId = GenericFactory._NP_RepositoryId
def __init__(self, obj):
CORBA.Object.__init__(self, obj)
def supports(self, *args):
return self._obj.invoke("supports", _0_CosLifeCycle.GenericFactory._d_supports, args)
def create_object(self, *args):
return self._obj.invoke("create_object", _0_CosLifeCycle.GenericFactory._d_create_object, args)
omniORB.registerObjref(GenericFactory._NP_RepositoryId, _objref_GenericFactory)
_0_CosLifeCycle._objref_GenericFactory = _objref_GenericFactory
del GenericFactory, _objref_GenericFactory
# GenericFactory skeleton
__name__ = "CosLifeCycle__POA"
class GenericFactory (PortableServer.Servant):
_NP_RepositoryId = _0_CosLifeCycle.GenericFactory._NP_RepositoryId
_omni_op_d = {"supports": _0_CosLifeCycle.GenericFactory._d_supports, "create_object": _0_CosLifeCycle.GenericFactory._d_create_object}
GenericFactory._omni_skeleton = GenericFactory
_0_CosLifeCycle__POA.GenericFactory = GenericFactory
omniORB.registerSkeleton(GenericFactory._NP_RepositoryId, GenericFactory)
del GenericFactory
__name__ = "CosLifeCycle"
#
# End of module "CosLifeCycle"
#
__name__ = "CosLifeCycle_idl"
_exported_modules = ( "CosLifeCycle", )
# The end.
|
|
import glob
import os
import pickle
import shutil
import sys
import tempfile
import time
import unittest
from unittest.mock import patch
import yaml
import ray
from ray.rllib import _register_all
from ray import tune
from ray.tune.integration.docker import DockerSyncer
from ray.tune.integration.kubernetes import KubernetesSyncer
from ray.tune.syncer import CommandBasedClient, detect_sync_to_driver
class TestSyncFunctionality(unittest.TestCase):
def setUp(self):
# Wait up to 1.5 seconds for placement groups when starting a trial
os.environ["TUNE_PLACEMENT_GROUP_WAIT_S"] = "1.5"
# Block for results even when placement groups are pending
os.environ["TUNE_TRIAL_STARTUP_GRACE_PERIOD"] = "0"
ray.init(num_cpus=2)
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
@patch("ray.tune.sync_client.S3_PREFIX", "test")
def testNoUploadDir(self):
"""No Upload Dir is given."""
with self.assertRaises(AssertionError):
[trial] = tune.run(
"__fake",
name="foo",
max_failures=0,
stop={
"training_iteration": 1
},
sync_config=tune.SyncConfig(
**{"sync_to_cloud": "echo {source} {target}"})).trials
@patch("ray.tune.sync_client.S3_PREFIX", "test")
def testCloudProperString(self):
with self.assertRaises(ValueError):
[trial] = tune.run(
"__fake",
name="foo",
max_failures=0,
stop={
"training_iteration": 1
},
sync_config=tune.SyncConfig(**{
"upload_dir": "test",
"sync_to_cloud": "ls {target}"
})).trials
with self.assertRaises(ValueError):
[trial] = tune.run(
"__fake",
name="foo",
max_failures=0,
stop={
"training_iteration": 1
},
sync_config=tune.SyncConfig(**{
"upload_dir": "test",
"sync_to_cloud": "ls {source}"
})).trials
tmpdir = tempfile.mkdtemp()
logfile = os.path.join(tmpdir, "test.log")
[trial] = tune.run(
"__fake",
name="foo",
max_failures=0,
stop={
"training_iteration": 1
},
sync_config=tune.SyncConfig(
**{
"upload_dir": "test",
"sync_to_cloud": "echo {source} {target} > " + logfile
})).trials
with open(logfile) as f:
lines = f.read()
self.assertTrue("test" in lines)
shutil.rmtree(tmpdir)
def testClusterProperString(self):
"""Tests that invalid commands throw.."""
with self.assertRaises(ValueError):
# This raises ValueError because logger is init in safe zone.
sync_config = tune.SyncConfig(sync_to_driver="ls {target}")
[trial] = tune.run(
"__fake",
name="foo",
max_failures=0,
stop={
"training_iteration": 1
},
sync_config=sync_config,
).trials
with self.assertRaises(ValueError):
# This raises ValueError because logger is init in safe zone.
sync_config = tune.SyncConfig(sync_to_driver="ls {source}")
[trial] = tune.run(
"__fake",
name="foo",
max_failures=0,
sync_config=sync_config,
stop={
"training_iteration": 1
}).trials
with patch.object(CommandBasedClient, "_execute") as mock_fn:
with patch("ray.util.get_node_ip_address") as mock_sync:
sync_config = tune.SyncConfig(
sync_to_driver="echo {source} {target}")
mock_sync.return_value = "0.0.0.0"
[trial] = tune.run(
"__fake",
name="foo",
max_failures=0,
sync_config=sync_config,
stop={
"training_iteration": 1
}).trials
self.assertGreater(mock_fn.call_count, 0)
def testCloudFunctions(self):
tmpdir = tempfile.mkdtemp()
tmpdir2 = tempfile.mkdtemp()
os.mkdir(os.path.join(tmpdir2, "foo"))
def sync_func(local, remote):
for filename in glob.glob(os.path.join(local, "*.json")):
shutil.copy(filename, remote)
sync_config = tune.SyncConfig(
upload_dir=tmpdir2, sync_to_cloud=sync_func)
[trial] = tune.run(
"__fake",
name="foo",
max_failures=0,
local_dir=tmpdir,
stop={
"training_iteration": 1
},
sync_config=sync_config).trials
test_file_path = glob.glob(os.path.join(tmpdir2, "foo", "*.json"))
self.assertTrue(test_file_path)
shutil.rmtree(tmpdir)
shutil.rmtree(tmpdir2)
@patch("ray.tune.sync_client.S3_PREFIX", "test")
def testCloudSyncPeriod(self):
"""Tests that changing CLOUD_SYNC_PERIOD affects syncing frequency."""
tmpdir = tempfile.mkdtemp()
def trainable(config):
for i in range(10):
time.sleep(1)
tune.report(score=i)
def counter(local, remote):
count_file = os.path.join(tmpdir, "count.txt")
if not os.path.exists(count_file):
count = 0
else:
with open(count_file, "rb") as fp:
count = pickle.load(fp)
count += 1
with open(count_file, "wb") as fp:
pickle.dump(count, fp)
sync_config = tune.SyncConfig(
upload_dir="test", sync_to_cloud=counter, cloud_sync_period=1)
# This was originally set to 0.5
os.environ["TUNE_GLOBAL_CHECKPOINT_S"] = "0"
self.addCleanup(
lambda: os.environ.pop("TUNE_GLOBAL_CHECKPOINT_S", None))
[trial] = tune.run(
trainable,
name="foo",
max_failures=0,
local_dir=tmpdir,
stop={
"training_iteration": 10
},
sync_config=sync_config,
).trials
count_file = os.path.join(tmpdir, "count.txt")
with open(count_file, "rb") as fp:
count = pickle.load(fp)
self.assertEqual(count, 12)
shutil.rmtree(tmpdir)
def testClusterSyncFunction(self):
def sync_func_driver(source, target):
assert ":" in source, "Source {} not a remote path.".format(source)
assert ":" not in target, "Target is supposed to be local."
with open(os.path.join(target, "test.log2"), "w") as f:
print("writing to", f.name)
f.write(source)
sync_config = tune.SyncConfig(
sync_to_driver=sync_func_driver, node_sync_period=5)
[trial] = tune.run(
"__fake",
name="foo",
max_failures=0,
stop={
"training_iteration": 1
},
sync_config=sync_config).trials
test_file_path = os.path.join(trial.logdir, "test.log2")
self.assertFalse(os.path.exists(test_file_path))
with patch("ray.util.get_node_ip_address") as mock_sync:
mock_sync.return_value = "0.0.0.0"
sync_config = tune.SyncConfig(sync_to_driver=sync_func_driver)
[trial] = tune.run(
"__fake",
name="foo",
max_failures=0,
stop={
"training_iteration": 1
},
sync_config=sync_config).trials
test_file_path = os.path.join(trial.logdir, "test.log2")
self.assertTrue(os.path.exists(test_file_path))
os.remove(test_file_path)
def testNoSync(self):
"""Sync should not run on a single node."""
def sync_func(source, target):
pass
sync_config = tune.SyncConfig(sync_to_driver=sync_func)
with patch.object(CommandBasedClient, "_execute") as mock_sync:
[trial] = tune.run(
"__fake",
name="foo",
max_failures=0,
stop={
"training_iteration": 1
},
sync_config=sync_config).trials
self.assertEqual(mock_sync.call_count, 0)
def testSyncDetection(self):
kubernetes_conf = {
"provider": {
"type": "kubernetes",
"namespace": "test_ray"
}
}
docker_conf = {
"docker": {
"image": "bogus"
},
"provider": {
"type": "aws"
}
}
aws_conf = {"provider": {"type": "aws"}}
with tempfile.TemporaryDirectory() as dir:
kubernetes_file = os.path.join(dir, "kubernetes.yaml")
with open(kubernetes_file, "wt") as fp:
yaml.safe_dump(kubernetes_conf, fp)
docker_file = os.path.join(dir, "docker.yaml")
with open(docker_file, "wt") as fp:
yaml.safe_dump(docker_conf, fp)
aws_file = os.path.join(dir, "aws.yaml")
with open(aws_file, "wt") as fp:
yaml.safe_dump(aws_conf, fp)
kubernetes_syncer = detect_sync_to_driver(None, kubernetes_file)
self.assertTrue(issubclass(kubernetes_syncer, KubernetesSyncer))
self.assertEqual(kubernetes_syncer._namespace, "test_ray")
docker_syncer = detect_sync_to_driver(None, docker_file)
self.assertTrue(issubclass(docker_syncer, DockerSyncer))
aws_syncer = detect_sync_to_driver(None, aws_file)
self.assertEqual(aws_syncer, None)
# Should still return DockerSyncer, since it was passed explicitly
syncer = detect_sync_to_driver(DockerSyncer, kubernetes_file)
self.assertTrue(issubclass(syncer, DockerSyncer))
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Callable, Generic, Optional, Type, Union, TYPE_CHECKING
import warnings
from pyspark.pandas._typing import T
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
class CachedAccessor(Generic[T]):
"""
Custom property-like object.
A descriptor for caching accessors:
Parameters
----------
name : str
Namespace that accessor's methods, properties, etc will be accessed under, e.g. "foo" for a
dataframe accessor yields the accessor ``df.foo``
accessor: cls
Class with the extension methods.
Notes
-----
For accessor, the class's __init__ method assumes that you are registering an accessor for one
of ``Series``, ``DataFrame``, or ``Index``.
This object is not meant to be instantiated directly. Instead, use register_dataframe_accessor,
register_series_accessor, or register_index_accessor.
The pandas-on-Spark accessor is modified based on pandas.core.accessor.
"""
def __init__(self, name: str, accessor: Type[T]) -> None:
self._name = name
self._accessor = accessor
def __get__(
self, obj: Optional[Union["DataFrame", "Series", "Index"]], cls: Type[T]
) -> Union[T, Type[T]]:
if obj is None:
return self._accessor
accessor_obj = self._accessor(obj) # type: ignore
object.__setattr__(obj, self._name, accessor_obj)
return accessor_obj
def _register_accessor(
name: str, cls: Union[Type["DataFrame"], Type["Series"], Type["Index"]]
) -> Callable[[Type[T]], Type[T]]:
"""
Register a custom accessor on {klass} objects.
Parameters
----------
name : str
Name under which the accessor should be registered. A warning is issued if this name
conflicts with a preexisting attribute.
Returns
-------
callable
A class decorator.
See Also
--------
register_dataframe_accessor: Register a custom accessor on DataFrame objects
register_series_accessor: Register a custom accessor on Series objects
register_index_accessor: Register a custom accessor on Index objects
Notes
-----
When accessed, your accessor will be initialiazed with the pandas-on-Spark object the user
is interacting with. The code signature must be:
.. code-block:: python
def __init__(self, pandas_on_spark_obj):
# constructor logic
...
In the pandas API, if data passed to your accessor has an incorrect dtype, it's recommended to
raise an ``AttributeError`` for consistency purposes. In pandas-on-Spark, ``ValueError`` is more
frequently used to annotate when a value's datatype is unexpected for a given method/function.
Ultimately, you can structure this however you like, but pandas-on-Spark would likely do
something like this:
>>> ps.Series(['a', 'b']).dt
...
Traceback (most recent call last):
...
ValueError: Cannot call DatetimeMethods on type StringType
Note: This function is not meant to be used directly - instead, use register_dataframe_accessor,
register_series_accessor, or register_index_accessor.
"""
def decorator(accessor: Type[T]) -> Type[T]:
if hasattr(cls, name):
msg = (
"registration of accessor {0} under name '{1}' for type {2} is overriding "
"a preexisting attribute with the same name.".format(accessor, name, cls.__name__)
)
warnings.warn(
msg,
UserWarning,
stacklevel=2,
)
setattr(cls, name, CachedAccessor(name, accessor))
return accessor
return decorator
def register_dataframe_accessor(name: str) -> Callable[[Type[T]], Type[T]]:
"""
Register a custom accessor with a DataFrame
Parameters
----------
name : str
name used when calling the accessor after its registered
Returns
-------
callable
A class decorator.
See Also
--------
register_series_accessor: Register a custom accessor on Series objects
register_index_accessor: Register a custom accessor on Index objects
Notes
-----
When accessed, your accessor will be initialiazed with the pandas-on-Spark object the user
is interacting with. The accessor's init method should always ingest the object being accessed.
See the examples for the init signature.
In the pandas API, if data passed to your accessor has an incorrect dtype, it's recommended to
raise an ``AttributeError`` for consistency purposes. In pandas-on-Spark, ``ValueError`` is more
frequently used to annotate when a value's datatype is unexpected for a given method/function.
Ultimately, you can structure this however you like, but pandas-on-Spark would likely do
something like this:
>>> ps.Series(['a', 'b']).dt
...
Traceback (most recent call last):
...
ValueError: Cannot call DatetimeMethods on type StringType
Examples
--------
In your library code::
from pyspark.pandas.extensions import register_dataframe_accessor
@register_dataframe_accessor("geo")
class GeoAccessor:
def __init__(self, pandas_on_spark_obj):
self._obj = pandas_on_spark_obj
# other constructor logic
@property
def center(self):
# return the geographic center point of this DataFrame
lat = self._obj.latitude
lon = self._obj.longitude
return (float(lon.mean()), float(lat.mean()))
def plot(self):
# plot this array's data on a map
pass
Then, in an ipython session::
>>> ## Import if the accessor is in the other file.
>>> # from my_ext_lib import GeoAccessor
>>> psdf = ps.DataFrame({"longitude": np.linspace(0,10),
... "latitude": np.linspace(0, 20)})
>>> psdf.geo.center # doctest: +SKIP
(5.0, 10.0)
>>> psdf.geo.plot() # doctest: +SKIP
"""
from pyspark.pandas import DataFrame
return _register_accessor(name, DataFrame)
def register_series_accessor(name: str) -> Callable[[Type[T]], Type[T]]:
"""
Register a custom accessor with a Series object
Parameters
----------
name : str
name used when calling the accessor after its registered
Returns
-------
callable
A class decorator.
See Also
--------
register_dataframe_accessor: Register a custom accessor on DataFrame objects
register_index_accessor: Register a custom accessor on Index objects
Notes
-----
When accessed, your accessor will be initialiazed with the pandas-on-Spark object the user is
interacting with. The code signature must be::
def __init__(self, pandas_on_spark_obj):
# constructor logic
...
In the pandas API, if data passed to your accessor has an incorrect dtype, it's recommended to
raise an ``AttributeError`` for consistency purposes. In pandas-on-Spark, ``ValueError`` is more
frequently used to annotate when a value's datatype is unexpected for a given method/function.
Ultimately, you can structure this however you like, but pandas-on-Spark would likely do
something like this:
>>> ps.Series(['a', 'b']).dt
...
Traceback (most recent call last):
...
ValueError: Cannot call DatetimeMethods on type StringType
Examples
--------
In your library code::
from pyspark.pandas.extensions import register_series_accessor
@register_series_accessor("geo")
class GeoAccessor:
def __init__(self, pandas_on_spark_obj):
self._obj = pandas_on_spark_obj
@property
def is_valid(self):
# boolean check to see if series contains valid geometry
return True
Then, in an ipython session::
>>> ## Import if the accessor is in the other file.
>>> # from my_ext_lib import GeoAccessor
>>> psdf = ps.DataFrame({"longitude": np.linspace(0,10),
... "latitude": np.linspace(0, 20)})
>>> psdf.longitude.geo.is_valid # doctest: +SKIP
True
"""
from pyspark.pandas import Series
return _register_accessor(name, Series)
def register_index_accessor(name: str) -> Callable[[Type[T]], Type[T]]:
"""
Register a custom accessor with an Index
Parameters
----------
name : str
name used when calling the accessor after its registered
Returns
-------
callable
A class decorator.
See Also
--------
register_dataframe_accessor: Register a custom accessor on DataFrame objects
register_series_accessor: Register a custom accessor on Series objects
Notes
-----
When accessed, your accessor will be initialiazed with the pandas-on-Spark object the user is
interacting with. The code signature must be::
def __init__(self, pandas_on_spark_obj):
# constructor logic
...
In the pandas API, if data passed to your accessor has an incorrect dtype, it's recommended to
raise an ``AttributeError`` for consistency purposes. In pandas-on-Spark, ``ValueError`` is more
frequently used to annotate when a value's datatype is unexpected for a given method/function.
Ultimately, you can structure this however you like, but pandas-on-Spark would likely do
something like this:
>>> ps.Series(['a', 'b']).dt
...
Traceback (most recent call last):
...
ValueError: Cannot call DatetimeMethods on type StringType
Examples
--------
In your library code::
from pyspark.pandas.extensions import register_index_accessor
@register_index_accessor("foo")
class CustomAccessor:
def __init__(self, pandas_on_spark_obj):
self._obj = pandas_on_spark_obj
self.item = "baz"
@property
def bar(self):
# return item value
return self.item
Then, in an ipython session::
>>> ## Import if the accessor is in the other file.
>>> # from my_ext_lib import CustomAccessor
>>> psdf = ps.DataFrame({"longitude": np.linspace(0,10),
... "latitude": np.linspace(0, 20)})
>>> psdf.index.foo.bar # doctest: +SKIP
'baz'
"""
from pyspark.pandas import Index
return _register_accessor(name, Index)
def _test() -> None:
import os
import doctest
import sys
import numpy
from pyspark.sql import SparkSession
import pyspark.pandas.extensions
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.extensions.__dict__.copy()
globs["np"] = numpy
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.extensions tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.extensions,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 18 17:48:03 2017
@author: Masao Takakuwa
"""
import numpy as np
from common.functions import sigmoid
class Relu(object) :
def __init__(self):
self.mask = None
def forward(self, x):
self.mask = (x <= 0)
out = x.copy()
out[self.mask] = 0
return out
def backward(self, dout):
dout[self.mask] = 0
dx = dout
return dx
class Sigmoid(object):
def __init__(self):
self.out = None
def forward(self, x):
out = sigmoid(x)
self.out = out
return out
def backward(self, dout):
dx = dout * (1.0 - self.out) * self.out
return dx
class Affine(object):
def __init__(self,W,b,batch_size=0):
#print("Affine")
self.x = None # N*50
self.dW = None #50,10
self.db = None #10
self.W = W # 50,10 (in,out)
self.b = b # 10 (out) self.db = None #10
def forward(self,x):
self.x = x
out = np.dot(x,self.W)+self.b
return out
def backward(self,dout): #dout:N,10
dx = np.dot(dout,self.W.T)
self.dW = np.dot(self.x.T,dout)
self.db = np.sum(dout,axis=0)
return dx
###---------------------------------------------------------
class PathIndex(object) :
def __init__(self,size) :
#print("PathIndex size:",size)
#self.idx = 0
self.i = 0
self.pre_i = 0
self.size = size
#self.list_size = size*11+1
#self.rand_list = np.random.randint(0,size,self.list_size)
def update(self,pre_i):
self.pre_i = pre_i
if self.size != 0:
self.i = np.random.randint(0,self.size)
#self.idx = (self.idx+1) % self.list_size
#self.i = self.rand_list[self.idx]
return self.i
class DPLRelu(Relu,PathIndex) :
def __init__(self):
PathIndex.__init__(self,0)
self.mask = None
def update(self,pre_i) :
self.i = pre_i
self.pre_i = pre_i
return self.i
def DPLforward(self,x):
#print("DPLRelu-forward x:",x)
self.mask = (x <= 0)
out = x.copy()
out[self.mask] = 0
#print("DPLRelu-DPForward out:",out,"mask",self.mask,"i:",self.i)
return out
def DPLbackward(self, dout):
#print("DPLRelu-backward dout:",dout,"mask",self.mask,"i:",self.i)
dout[self.mask] = 0
dx = dout
return dx
class DPLSigmoid(Sigmoid,PathIndex):
def __init__(self):
self.out = None
def update(self,pre_i) :
self.i = pre_i
self.pre_i = pre_i
return self.i
def DPLforward(self, x):
out = sigmoid(x)
self.out = out
return out
def DPLbackward(self, dout):
dx = dout * (1.0 - self.out) * self.out
return dx
class Through(Sigmoid,PathIndex):
def __init__(self):
self.out = None
def update(self,pre_i) :
self.i = pre_i
self.pre_i = pre_i
return self.i
def DPLforward(self, x): return x
def DPLbackward(self, dout): return dout
class FirstPath(Affine,PathIndex):
def _init(self,w,b,batch_size) :
Affine.__init__(self,w,b,batch_size)
PathIndex.__init__(self,b.shape[0])
# w: 784,50 (in,out) b:50 (out) x: N*784
self.dW = np.zeros_like(w) #784,50
self.db = np.zeros_like(b) #50
def __init__(self,w,b,batch_size=0):
self._init(w,b,batch_size)
self.dout = np.zeros((batch_size,b.shape[0]))
def DPLforward(self,x):
#print("FirstPath-DPLforward x:",x.shape,"W:",self.W.shape,"b:",self.b.shape)
self.x = x
W = self.W[:,self.i:self.i+1]
out = np.dot(x,W)+self.b[self.i]
#out = x*self.W[self.pre_i][self.i]+self.b[self.i]
#print("FirstPath-DPLforward out:",out,"W:",self.W.shape,"b:",self.b.shape)
return out
def DPLbackward(self,dout): #dout:N*1
#print("FirstPath-DPLbackward dout:",dout.shape,"X:",self.x.shape,"W:",self.W.shape,"db",self.db.shape)
self.dout[:,self.i:self.i+1] = dout
dx = np.dot(self.dout,self.W.T)
self.dW[:,self.i:self.i+1] = np.dot(self.x.T,dout)
self.db[self.i] = np.sum(dout,axis=0)
#print("FirstPath-backward dout:",dout.shape,"X:",self.x.shape,"W:",self.W.shape,"db",self.db.shape,"dx:",dx.shape)
return dx
class LastPath(FirstPath):
def __init__(self,w,b,batch_size=0):
# w: 100,10 (in,out) b:10 (out) x: N*10
FirstPath._init(self,w,b,batch_size)
self.out = np.zeros((batch_size,w.shape[0]))
def DPLforward(self,x):
self.x = x
#print("LastPath-DPLforward x:",x.shape,"W:",self.W.shape,"b:",self.b.shape)
self.out[:,self.pre_i:self.pre_i+1] = x
out = np.dot(self.out,self.W)+self.b
return out
def DPLbackward(self,dout): #dout:N*10
#print("LastPath-DPLBackward dout:",dout.shape,"W:",self.W.shape,"db:",self.db.shape,"x:",self.x.shape)
dx = np.dot(dout,self.W[self.pre_i])
dx = dx[:,np.newaxis]
self.dW[self.pre_i][self.i] = np.dot(self.x.T,dout[:,self.i:self.i+1])
#self.dW[self.pre_i] = np.dot(self.x.T,dout)
self.db[self.i] = np.sum(dout[:,self.i:self.i+1],axis=0)
#self.db = np.sum(dout,axis=0)
#print("LastPath-Backward dout:",dout.shape,"W:",self.W.shape,"db:",self.db.shape,"x:",self.x.shape,"dx:",dx.shape)
return dx
class DPLPath(FirstPath):
def __init__(self,w,b,batch_size=0):
# w: 50,100 (in,out) b:100 (out) x: N
FirstPath._init(self,w,b,batch_size)
def DPLforward(self,x):
self.x = x
print("DPLPath-DPLforward x:",x.shape,"W:",self.W.shape,"b:",self.b.shape)
out = x*self.W[self.pre_i][self.i]+self.b[self.i]
#print("DPLPath-forward x:",x.shape,"W:",self.W.shape,"b:",self.b.shape,"out:",out.shape)
#print("DPLPath-forward x: ",x.T)
#print("DPLPath-forward out:",out.T)
return out
def DPLbackward(self,dout): #dout:N
#print("DPLPath-DPLbacward dout:",dout.shape,"W:",self.W.shape,"db:",self.db.shape,"x:",self.x.shape,"pre_i",self.pre_i,"i",self.i)
dx = dout*self.W[self.pre_i][self.i]
self.dW[self.pre_i][self.i] = np.dot(self.x.T,dout)
self.db[self.i] = np.sum(dout,axis=0)
#print("DPLPath-Bacward dout:",dout.shape,"W:",self.W.shape,"db:",self.db.shape,"x:",self.x.shape,"dx:",dx.shape)
return dx
|
|
import pandas as pd
import numpy as np
class Developer(object):
"""
Pass the dataframe that is returned by feasibility here
Can also be a dictionary where keys are building forms and values are
the individual data frames returned by the proforma lookup routine.
"""
def __init__(self, feasibility):
if isinstance(feasibility, dict):
feasibility = pd.concat(feasibility.values(), keys=feasibility.keys(), axis=1)
self.feasibility = feasibility
@staticmethod
def _max_form(f, colname):
"""
Assumes dataframe with hierarchical columns with first index equal to the
use and second index equal to the attribute.
e.g. f.columns equal to::
mixedoffice building_cost
building_revenue
building_size
max_profit
max_profit_far
total_cost
industrial building_cost
building_revenue
building_size
max_profit
max_profit_far
total_cost
"""
df = f.stack(level=0)[[colname]].stack().unstack(level=1).reset_index(level=1, drop=True)
return df.idxmax(axis=1)
def keep_form_with_max_profit(self, forms=None):
"""
This converts the dataframe, which shows all profitable forms,
to the form with the greatest profit, so that more profitable
forms outcompete less profitable forms.
Parameters
----------
forms: list of strings
List of forms which compete which other. Can leave some out.
Returns
-------
Nothing. Goes from a multi-index to a single index with only the
most profitable form.
"""
f = self.feasibility
if forms is not None:
f = f[forms]
mu = self._max_form(f, "max_profit")
indexes = [tuple(x) for x in mu.reset_index().values]
df = f.stack(level=0).loc[indexes]
df.index.names = ["parcel_id", "form"]
df = df.reset_index(level=1)
return df
@staticmethod
def compute_units_to_build(num_agents, num_units, target_vacancy):
"""
Compute number of units to build to match target vacancy.
Parameters
----------
num_agents : int
number of agents that need units in the region
num_units : int
number of units in buildings
target_vacancy : float (0-1.0)
target vacancy rate
Returns
-------
number_of_units : int
the number of units that need to be built
"""
print "Number of agents: {:,}".format(num_agents)
print "Number of agent spaces: {:,}".format(int(num_units))
assert target_vacancy < 1.0
target_units = int(max(num_agents / (1 - target_vacancy) - num_units, 0))
print "Current vacancy = {:.2f}".format(1 - num_agents / float(num_units))
print "Target vacancy = {:.2f}, target of new units = {:,}".\
format(target_vacancy, target_units)
return target_units
def pick(self, form, target_units, parcel_size, ave_unit_size,
current_units, max_parcel_size=200000, min_unit_size=400,
drop_after_build=True, residential=True, bldg_sqft_per_job=400.0):
"""
Choose the buildings from the list that are feasible to build in
order to match the specified demand.
Parameters
----------
form : string or list
One or more of the building forms from the pro forma specification -
e.g. "residential" or "mixedresidential" - these are configuration
parameters passed previously to the pro forma. If more than one form
is passed the forms compete with each other (based on profitability)
for which one gets built in order to meet demand.
target_units : int
The number of units to build. For non-residential buildings this
should be passed as the number of job spaces that need to be created.
parcel_size : series
The size of the parcels. This was passed to feasibility as well,
but should be passed here as well. Index should be parcel_ids.
ave_unit_size : series
The average residential unit size around each parcel - this is
indexed by parcel, but is usually a disaggregated version of a
zonal or accessibility aggregation.
bldg_sqft_per_job : float (default 400.0)
The average square feet per job for this building form.
min_unit_size : float
Values less than this number in ave_unit_size will be set to this
number. Deals with cases where units are currently not built.
current_units : series
The current number of units on the parcel. Is used to compute the
net number of units produced by the developer model. Many times
the developer model is redeveloping units (demolishing them) and
is trying to meet a total number of net units produced.
max_parcel_size : float
Parcels larger than this size will not be considered for
development - usually large parcels should be specified manually
in a development projects table.
drop_after_build : bool
Whether or not to drop parcels from consideration after they
have been chosen for development. Usually this is true so as
to not develop the same parcel twice.
residential: bool
If creating non-residential buildings set this to false and
developer will fill in job_spaces rather than residential_units
Returns
-------
new_buildings : dataframe
DataFrame of buildings to add. These buildings are rows from the
DataFrame that is returned from feasibility.
"""
if len(self.feasibility) == 0:
# no feasible buldings, might as well bail
return None
if form is None:
df = self.feasibility
elif isinstance(form, list):
df = self.keep_form_with_max_profit(form)
else:
df = self.feasibility[form]
# feasible buildings only for this building type
df = df[df.max_profit_far > 0]
ave_unit_size[ave_unit_size < min_unit_size] = min_unit_size
df["ave_unit_size"] = ave_unit_size
df["parcel_size"] = parcel_size
df['current_units'] = current_units
df = df[df.parcel_size < max_parcel_size]
df['residential_units'] = np.round(df.residential_sqft /
df.ave_unit_size)
df['job_spaces'] = np.round(df.non_residential_sqft / bldg_sqft_per_job)
if residential:
df['net_units'] = df.residential_units - df.current_units
else:
df['net_units'] = df.job_spaces - df.current_units
df = df[df.net_units > 0]
if len(df) == 0:
print "WARNING THERE ARE NO FEASIBLE BUILDING TO CHOOSE FROM"
return
# print "Describe of net units\n", df.net_units.describe()
print "Sum of net units that are profitable: {:,}".\
format(int(df.net_units.sum()))
if df.net_units.sum() < target_units:
print "WARNING THERE WERE NOT ENOUGH PROFITABLE UNITS TO " \
"MATCH DEMAND"
choices = np.random.choice(df.index.values, size=len(df.index),
replace=False,
p=(df.max_profit.values / df.max_profit.sum()))
net_units = df.net_units.loc[choices]
tot_units = net_units.values.cumsum()
ind = int(np.searchsorted(tot_units, target_units, side="left"))
if target_units != 0:
ind += 1
ind = min(ind, len(choices))
build_idx = choices[:ind]
if drop_after_build:
self.feasibility = self.feasibility.drop(build_idx)
new_df = df.loc[build_idx]
new_df.index.name = "parcel_id"
return new_df.reset_index()
@staticmethod
def merge(old_df, new_df):
"""
Merge two dataframes of buildings. The old dataframe is
usually the buildings dataset and the new dataframe is a modified
(by the user) version of what is returned by the pick method.
Parameters
----------
old_df : dataframe
Current set of buildings
new_df : dataframe
New buildings to add, usually comes from this module
Returns
-------
df : dataframe
Combined DataFrame of buildings, makes sure indexes don't overlap
"""
maxind = np.max(old_df.index.values)
new_df = new_df.reset_index(drop=True)
new_df.index = new_df.index + maxind + 1
concat_df = pd.concat([old_df, new_df], verify_integrity=True)
concat_df.index.name = 'building_id'
return concat_df
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from eventlet import greenthread
from lxml import etree
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from nova.cloudpipe import pipelib
import nova.conf
from nova.i18n import _LI
from nova.i18n import _LW
import nova.virt.firewall as base_firewall
from nova.virt import netutils
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
libvirt = None
class NWFilterFirewall(base_firewall.FirewallDriver):
"""This class implements a network filtering mechanism by using
libvirt's nwfilter.
all instances get a filter ("nova-base") applied. This filter
provides some basic security such as protection against MAC
spoofing, IP spoofing, and ARP spoofing.
"""
def __init__(self, host, **kwargs):
"""Create an NWFilter firewall driver
:param host: nova.virt.libvirt.host.Host instance
:param kwargs: currently unused
"""
global libvirt
if libvirt is None:
try:
libvirt = importutils.import_module('libvirt')
except ImportError:
LOG.warning(_LW("Libvirt module could not be loaded. "
"NWFilterFirewall will not work correctly."))
self._host = host
self.static_filters_configured = False
def apply_instance_filter(self, instance, network_info):
"""No-op. Everything is done in prepare_instance_filter."""
pass
def _get_connection(self):
return self._host.get_connection()
_conn = property(_get_connection)
def nova_no_nd_reflection_filter(self):
"""This filter protects false positives on IPv6 Duplicate Address
Detection(DAD).
"""
uuid = self._get_filter_uuid('nova-no-nd-reflection')
return '''<filter name='nova-no-nd-reflection' chain='ipv6'>
<!-- no nd reflection -->
<!-- drop if destination mac is v6 mcast mac addr and
we sent it. -->
<uuid>%s</uuid>
<rule action='drop' direction='in'>
<mac dstmacaddr='33:33:00:00:00:00'
dstmacmask='ff:ff:00:00:00:00' srcmacaddr='$MAC'/>
</rule>
</filter>''' % uuid
def nova_dhcp_filter(self):
"""The standard allow-dhcp-server filter is an <ip> one, so it uses
ebtables to allow traffic through. Without a corresponding rule in
iptables, it'll get blocked anyway.
"""
uuid = self._get_filter_uuid('nova-allow-dhcp-server')
return '''<filter name='nova-allow-dhcp-server' chain='ipv4'>
<uuid>%s</uuid>
<rule action='accept' direction='out'
priority='100'>
<udp srcipaddr='0.0.0.0'
dstipaddr='255.255.255.255'
srcportstart='68'
dstportstart='67'/>
</rule>
<rule action='accept' direction='in'
priority='100'>
<udp srcipaddr='$DHCPSERVER'
srcportstart='67'
dstportstart='68'/>
</rule>
</filter>''' % uuid
def setup_basic_filtering(self, instance, network_info):
"""Set up basic filtering (MAC, IP, and ARP spoofing protection)."""
LOG.info(_LI('Called setup_basic_filtering in nwfilter'),
instance=instance)
LOG.info(_LI('Ensuring static filters'), instance=instance)
self._ensure_static_filters()
nodhcp_base_filter = self.get_base_filter_list(instance, False)
dhcp_base_filter = self.get_base_filter_list(instance, True)
for vif in network_info:
_base_filter = nodhcp_base_filter
for subnet in vif['network']['subnets']:
if subnet.get_meta('dhcp_server'):
_base_filter = dhcp_base_filter
break
self._define_filter(self._get_instance_filter_xml(instance,
_base_filter,
vif))
def _get_instance_filter_parameters(self, vif):
parameters = []
def format_parameter(parameter, value):
return ("<parameter name='%s' value='%s'/>" % (parameter, value))
network = vif['network']
if not vif['network'] or not vif['network']['subnets']:
return parameters
v4_subnets = [s for s in network['subnets'] if s['version'] == 4]
v6_subnets = [s for s in network['subnets'] if s['version'] == 6]
for subnet in v4_subnets:
for ip in subnet['ips']:
parameters.append(format_parameter('IP', ip['address']))
dhcp_server = subnet.get_meta('dhcp_server')
if dhcp_server:
parameters.append(format_parameter('DHCPSERVER', dhcp_server))
for subnet in v6_subnets:
gateway = subnet.get('gateway')
if gateway:
ra_server = gateway['address'] + "/128"
parameters.append(format_parameter('RASERVER', ra_server))
if CONF.allow_same_net_traffic:
for subnet in v4_subnets:
ipv4_cidr = subnet['cidr']
net, mask = netutils.get_net_and_mask(ipv4_cidr)
parameters.append(format_parameter('PROJNET', net))
parameters.append(format_parameter('PROJMASK', mask))
for subnet in v6_subnets:
ipv6_cidr = subnet['cidr']
net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
parameters.append(format_parameter('PROJNET6', net))
parameters.append(format_parameter('PROJMASK6', prefix))
return parameters
def _get_instance_filter_xml(self, instance, filters, vif):
nic_id = vif['address'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
parameters = self._get_instance_filter_parameters(vif)
uuid = self._get_filter_uuid(instance_filter_name)
xml = '''<filter name='%s' chain='root'>''' % instance_filter_name
xml += '<uuid>%s</uuid>' % uuid
for f in filters:
xml += '''<filterref filter='%s'>''' % f
xml += ''.join(parameters)
xml += '</filterref>'
xml += '</filter>'
return xml
def get_base_filter_list(self, instance, allow_dhcp):
"""Obtain a list of base filters to apply to an instance.
The return value should be a list of strings, each
specifying a filter name. Subclasses can override this
function to add additional filters as needed. Additional
filters added to the list must also be correctly defined
within the subclass.
"""
if pipelib.is_vpn_image(instance.image_ref):
base_filter = 'nova-vpn'
elif allow_dhcp:
base_filter = 'nova-base'
else:
base_filter = 'nova-nodhcp'
return [base_filter]
def _ensure_static_filters(self):
"""Static filters are filters that have no need to be IP aware.
There is no configuration or tuneability of these filters, so they
can be set up once and forgotten about.
"""
if self.static_filters_configured:
return
filter_set = ['no-mac-spoofing',
'no-ip-spoofing',
'no-arp-spoofing']
self._define_filter(self.nova_no_nd_reflection_filter())
filter_set.append('nova-no-nd-reflection')
self._define_filter(self._filter_container('nova-nodhcp', filter_set))
filter_set.append('allow-dhcp-server')
self._define_filter(self._filter_container('nova-base', filter_set))
self._define_filter(self._filter_container('nova-vpn',
['allow-dhcp-server']))
self._define_filter(self.nova_dhcp_filter())
self.static_filters_configured = True
def _filter_container(self, name, filters):
uuid = self._get_filter_uuid(name)
xml = '''<filter name='%s' chain='root'>
<uuid>%s</uuid>
%s
</filter>''' % (name, uuid,
''.join(["<filterref filter='%s'/>" % (f,) for f in filters]))
return xml
def _get_filter_uuid(self, name):
try:
flt = self._conn.nwfilterLookupByName(name)
xml = flt.XMLDesc(0)
doc = etree.fromstring(xml)
u = doc.find("./uuid").text
except Exception as e:
LOG.debug(u"Cannot find UUID for filter '%(name)s': '%(e)s'",
{'name': name, 'e': e})
u = uuid.uuid4().hex
LOG.debug("UUID for filter '%s' is '%s'", name, u)
return u
def _define_filter(self, xml):
if callable(xml):
xml = xml()
try:
self._conn.nwfilterDefineXML(xml)
except libvirt.libvirtError as ex:
with excutils.save_and_reraise_exception() as ctxt:
errcode = ex.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
# Since libvirt 1.2.7 this operation can fail if the filter
# with the same name already exists for the given uuid.
# Unfortunately there is not a specific error code for this
# so we have to parse the error message to see if that was
# the failure.
errmsg = ex.get_error_message()
if 'already exists with uuid' in errmsg:
ctxt.reraise = False
def unfilter_instance(self, instance, network_info):
"""Clear out the nwfilter rules."""
for vif in network_info:
nic_id = vif['address'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
# nwfilters may be defined in a separate thread in the case
# of libvirt non-blocking mode, so we wait for completion
max_retry = CONF.live_migration_retry_count
for cnt in range(max_retry):
try:
_nw = self._conn.nwfilterLookupByName(instance_filter_name)
_nw.undefine()
break
except libvirt.libvirtError as e:
if cnt == max_retry - 1:
raise
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# This happens when the instance filter is still in use
# (ie. when the instance has not terminated properly)
LOG.info(_LI('Failed to undefine network filter '
'%(name)s. Try %(cnt)d of '
'%(max_retry)d.'),
{'name': instance_filter_name,
'cnt': cnt + 1,
'max_retry': max_retry},
instance=instance)
greenthread.sleep(1)
else:
LOG.debug('The nwfilter(%s) is not found.',
instance_filter_name, instance=instance)
break
@staticmethod
def _instance_filter_name(instance, nic_id=None):
if not nic_id:
return 'nova-instance-%s' % (instance.name)
return 'nova-instance-%s-%s' % (instance.name, nic_id)
def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists."""
for vif in network_info:
nic_id = vif['address'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
try:
self._conn.nwfilterLookupByName(instance_filter_name)
except libvirt.libvirtError:
name = instance.name
LOG.debug('The nwfilter(%(instance_filter_name)s) for'
'%(name)s is not found.',
{'instance_filter_name': instance_filter_name,
'name': name},
instance=instance)
return False
return True
class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
def __init__(self, execute=None, **kwargs):
"""Create an IP tables firewall driver instance
:param execute: unused, pass None
:param kwargs: extra arguments
The @kwargs parameter must contain a key 'host' that
maps to an instance of the nova.virt.libvirt.host.Host
class.
"""
super(IptablesFirewallDriver, self).__init__(**kwargs)
self.nwfilter = NWFilterFirewall(kwargs['host'])
def setup_basic_filtering(self, instance, network_info):
"""Set up basic NWFilter."""
self.nwfilter.setup_basic_filtering(instance, network_info)
def apply_instance_filter(self, instance, network_info):
"""No-op. Everything is done in prepare_instance_filter."""
pass
def unfilter_instance(self, instance, network_info):
# NOTE(salvatore-orlando):
# Overriding base class method for applying nwfilter operation
if self.instance_info.pop(instance.id, None):
self.remove_filters_for_instance(instance)
self.iptables.apply()
self.nwfilter.unfilter_instance(instance, network_info)
else:
LOG.info(_LI('Attempted to unfilter instance which is not '
'filtered'), instance=instance)
def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists."""
return self.nwfilter.instance_filter_exists(instance, network_info)
|
|
#
# Django-Lets-go
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <[email protected]>
#
from django import template
from django.utils.safestring import mark_safe
from django.utils.datastructures import SortedDict
from django_lets_go.common_functions import word_capital
import copy
import json
register = template.Library()
@register.filter(name='wordcap')
def wordcap(value):
"""
Capitalizes the first character of each words.
"""
return word_capital(value)
@register.filter(name='mul')
def mul(value, arg):
"""Multiplication
>>> mul(2, 2)
4
"""
return value * arg
mul.is_safe = True
@register.filter(name='div')
def div(value, arg):
"""Division
>>> div(4, 2)
2
"""
if arg is None:
return 0
elif arg is 0:
return 0
else:
return value / arg
@register.filter(name='subtract')
def subtract(value, arg):
"""Subtraction
>>> subtract(4, 2)
2
"""
return value - arg
@register.simple_tag(name='percentage_tag')
def percentage_tag(fraction, population):
"""Usage: {% percentage_tag fraction population %}"""
try:
return "%.2f%%" % ((float(fraction) / float(population)) * 100)
except:
return "0.00%"
@register.filter(name='percent')
def percent(value):
"""Percentage with % sign
>>> percent(1)
'100.0 %'
"""
return str(round(value * 100, 2)) + " %"
@register.filter(name='profit_in_percentage')
def profit_in_percentage(value, arg):
"""Profit Percentage with % sign
>>> profit_in_percentage(2, 1)
'100.0 %'
"""
val = value - arg
return str(round(val * 100, 2)) + " %"
@register.filter(name='cal_width')
def cal_width(value, max):
"""Get width
>>> cal_width(70, 100)
140.0
"""
if not value or not max:
return "None"
width = (value / float(max)) * 200
return width
@register.filter(name='time_in_min')
def time_in_min(value, arg):
"""Convert value in min or second format
>>> time_in_min(130, 'min')
'02:10 min'
>>> time_in_min(130, 'sec')
'130 sec'
"""
try:
value = int(value)
except:
value = 0
if value != 0:
if arg == 'min':
min = int(value / 60)
sec = int(value % 60)
return "%02d" % min + ":" + "%02d" % sec + " min"
else:
min = int(value / 60)
min = (min * 60)
sec = int(value % 60)
total_sec = min + sec
return str(total_sec) + " sec"
else:
return str("00:00 min")
@register.filter(name='conv_min')
def conv_min(value):
"""Convert value in min:sec format
>>> conv_min(130)
'02:10'
"""
try:
value = int(value)
except:
value = 0
if value != 0:
min = int(value / 60)
sec = int(value % 60)
return "%02d" % min + ":" + "%02d" % sec
else:
return "00:00"
@register.filter(name='month_name')
def month_name(value, arg):
"""Get month name from 1-12 int no
>>> month_name(2, 1)
'Feb 1'
"""
month_dict = {1: "Jan", 2: "Feb", 3: "Mar",
4: "Apr", 5: "May", 6: "Jun",
7: "Jul", 8: "Aug", 9: "Sep",
10: "Oct", 11: "Nov", 12: "Dec"}
no = int(value)
m_name = month_dict[no]
return str(m_name) + " " + str(arg)
@register.filter(name='to_json')
def to_json(value):
return mark_safe(json.dumps(value))
@register.inclusion_tag('sort_link_frag.html', takes_context=True)
def sort_link(context, link_text, sort_field, visible_name=None):
"""Usage: {% sort_link "link text" "field_name" %}
Usage: {% sort_link "link text" "field_name" "Visible name" %}
"""
is_sorted = False
sort_order = None
orig_sort_field = sort_field
if context.get('current_sort_field') == sort_field:
sort_field = '-%s' % sort_field
visible_name = '-%s' % (visible_name or orig_sort_field)
is_sorted = True
sort_order = 'down'
elif context.get('current_sort_field') == '-' + sort_field:
visible_name = '%s' % (visible_name or orig_sort_field)
is_sorted = True
sort_order = 'up'
if visible_name:
if 'request' in context:
request = context['request']
request.session[visible_name] = sort_field
if 'getsortvars' in context:
extra_vars = context['getsortvars']
else:
if 'request' in context:
request = context['request']
getvars = request.GET.copy()
if 'sort_by' in getvars:
del getvars['sort_by']
if len(getvars.keys()) > 0:
context['getsortvars'] = "&%s" % getvars.urlencode()
else:
context['getsortvars'] = ''
extra_vars = context['getsortvars']
else:
extra_vars = ''
return {'link_text': link_text, 'sort_field': sort_field,
'extra_vars': extra_vars, 'sort_order': sort_order,
'is_sorted': is_sorted, 'visible_name': visible_name
}
def get_fieldset(parser, token):
"""Usage: {% get_fieldset field1,field2 as list_field from xyz_form %}
{% for list_field in xyz_form %}
{% endfor %}
"""
try:
name, fields, as_, variable_name, from_, form = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError('bad arguments for %r' %
token.split_contents()[0])
return FieldSetNode(fields.split(','), variable_name, form)
get_fieldset = register.tag(get_fieldset)
class FieldSetNode(template.Node):
def __init__(self, fields, variable_name, form_variable):
self.fields = fields
self.variable_name = variable_name
self.form_variable = form_variable
def render(self, context):
form = template.Variable(self.form_variable).resolve(context)
new_form = copy.copy(form)
new_form.fields = SortedDict(
[(key, value) for key, value in form.fields.items() if key in self.fields])
context[self.variable_name] = new_form
return u''
class ArgumentError(ValueError):
"""Missing or incompatible argument."""
def _regroup_table(seq, rows=None, columns=None):
if not (rows or columns):
raise ArgumentError("Missing one of rows or columns")
if columns:
rows = (len(seq) // columns) + 1
table = [seq[i::rows] for i in range(rows)]
# Pad out short rows
n = len(table[0])
return [row + [None for x in range(n - len(row))] for row in table]
@register.filter(name='groupby_rows')
def groupby_rows(seq, n):
"""Returns a list of n lists. Each sub-list is the same length.
Short lists are padded with None. This is useful for creating HTML tables
from a sequence.
>>> groupby_rows(range(1, 11), 3)
[[1, 4, 7, 10], [2, 5, 8, None], [3, 6, 9, None]]
"""
return _regroup_table(seq, rows=int(n))
@register.filter(name='groupby_columns')
def groupby_columns(seq, n):
"""Returns a list of lists where each sub-list has n items.
Short lists are padded with None. This is useful for creating HTML tables
from a sequence.
>>> groupby_columns(range(1, 11), 3)
[[1, 5, 9], [2, 6, 10], [3, 7, None], [4, 8, None]]
"""
return _regroup_table(seq, columns=int(n))
@register.filter(name='sort')
def listsort(value):
"""Sort list
>>> value = {'a': 1, 'c': 3, 'd': 4, 'b': 2}
>>> listsort(value)
{'a': 1, 'b': 2, 'c': 3, 'd': 4}
"""
if isinstance(value, dict):
new_dict = SortedDict()
key_list = value.keys()
key_list.sort()
for key in key_list:
new_dict[key] = value[key]
return new_dict
elif isinstance(value, list):
new_list = list(value)
new_list.sort()
return new_list
else:
return value
listsort.is_safe = True
@register.filter(name='convert_to_int')
def convert_to_int(val):
"""
Return int value
"""
try:
return int(val)
except:
return val
|
|
import socket
import httplib, urllib
import sys, traceback
from threading import *
import logging
from Queue import Queue, Empty
import exceptions
import sys
import time
if float(sys.version[:3]) >= 2.6:
import json
else:
# python 2.4 or 2.5 can also import simplejson
# as working alternative to the json included.
import simplejson as json
class thread_job_worker(Thread):
"""Thread executing tasks from a given tasks queue"""
def __init__(self, tasks):
Thread.__init__(self)
self.tasks = tasks
self.callbacks = { 'task_done' : {}}
self.daemon = True
self.start()
self.connectionString = None
self.log = logging.getLogger("thread_job_worker")
self.conn = None
def cbAddTaskDone(self,funct):
self.callbacks['task_done'][funct] = 1
def taskDone(self,request):
self.tasks.task_done()
self.log.debug( 'taskDone')
for func in self.callbacks['task_done']:
func(request)
# Now process nicely
return
def processRequest(self,request):
time.sleep(1)
return
def run(self):
while self.daemon:
request = self.tasks.get()
self.log.debug('Running')
reponce = self.processRequest(request)
if reponce != None:
request['responce'] = reponce
self.taskDone(request)
def ConnectionSet(self,connectionStr):
#print "connectionStr", connectionStr
Changed = False
if not hasattr(self,'connectionString'):
Changed = True
self.connectionString = connectionStr
if self.connectionString != connectionStr:
Changed = True
self.connectionString = connectionStr
if not hasattr(self,'conn'):
Changed = True
elif self.conn == None:
Changed = True
self.conn = None
class threadpool_jobs:
"""Pool of threads consuming tasks from a queue"""
def __init__(self, num_threads = 10):
self.log = logging.getLogger("threadpool_jobs")
self.tasks = Queue(num_threads)
self.preTasks = Queue()
self.arrayOfthread_job_worker = []
self.taskCacheRunning = {}
self.preTaskCache = {}
self.callbacks = {
"messagesToProcess" : {},
}
self.taskCacheFinished = {}
self.postTasks = Queue()
self.blockedMessages = {}
for item in range(num_threads):
new = thread_job_worker(self.tasks)
new.cbAddTaskDone(self.handleTaskDoneByThread)
self.arrayOfthread_job_worker.append(new)
def cbAddOnMessagesToProcess(self,function):
self.callbacks['messagesToProcess'][function] = 1
def cbDoOnMessagesToProcess(self):
for item in self.callbacks["messagesToProcess"]:
item(self)
def handleTaskDoneByThread(self,request):
self.log.debug('handleTaskDoneByThread')
msgHash = request['params'].__hash__()
if msgHash in self.taskCacheRunning.keys():
del(self.taskCacheRunning[msgHash])
#self.log.debug(request)
if not 'responce' in request.keys():
self.log.error('no reponce')
self.log.debug("request=%s" % (request))
# we will requeue the responce
if not 'retries' in request.keys():
request['retries'] = 0
request['retries'] += 1
if request['retries'] > 2:
self.blockedMessages[msgHash] = request
return
if msgHash in self.preTaskCache.keys():
self.preTaskCache[msgHash]['retries'] = request['retries']
else:
self.preTaskCache[msgHash] = request
self.preTasks.put(request)
return
self.taskCacheFinished[msgHash] = request
self.postTasks.put(msgHash)
self.cbDoOnMessagesToProcess()
def wait_completion(self):
"""Wait for completion of all the tasks in the queue"""
self.log.error("Wait for completion of all the tasks in the queue")
self.tasks.join()
def SendMessage(self,func,message, *args, **kargs):
#Sends a message now without Queuing
#for a aproved thread
request = {
'function' : func,
'params' : message,
'args' : args,
'kargs' : kargs,
}
msgHash = request['params'].__hash__()
if msgHash in self.taskCacheRunning.keys():
self.log.error("SendMessage Overwriting task")
self.taskCacheRunning[msgHash] = request
self.tasks.put(request)
self.QueueProcessPreTask()
def QueueProcessPreTask(self):
# For calling to place messages on
#self.log.debug( 'self.preTasks.qsize=%s' % ( self.preTasks.qsize()))
while True:
try:
request = self.preTasks.get_nowait()
except Empty:
break
msgHash = request['params'].__hash__()
self.taskCacheRunning[msgHash] = request
if msgHash in self.preTaskCache.keys():
del self.preTaskCache[msgHash]
#print request
self.tasks.put(request)
self.preTasks.task_done()
#self.log.debug('self.preTasks.qsize=%s' % ( self.preTasks.qsize()))
#self.log.debug('self.tasks.qsize=%s' % ( self.tasks.qsize()))
def QueueProcessResponces(self):
# To be processed by external thead
counter = 0
#self.log.debug('self.postTasks.qsize=%s' % ( self.postTasks.qsize()))
while True:
try:
msgHash = self.postTasks.get_nowait()
except Empty:
break
if not msgHash in self.taskCacheFinished.keys():
self.postTasks.task_done()
continue
func = self.taskCacheFinished[msgHash]['function']
params = self.taskCacheFinished[msgHash]['params']
args = self.taskCacheFinished[msgHash]['args']
kargs = self.taskCacheFinished[msgHash]['kargs']
rep = self.taskCacheFinished[msgHash]['responce']
if func != None:
func(rep,params,*args, **kargs)
#try: func(rep)
#except Exception, e:
# print e
# #traceback.print_tb(e, limit=1, file=sys.stdout)
del(self.taskCacheFinished[msgHash])
self.postTasks.task_done()
#self.log.debug('self.postTasks.qsize=%s' % ( self.postTasks.qsize()) )
def QueueProcessAddMessage(self,func,message, *args, **kargs):
messageDict = {}
if message.__hash__() in self.preTaskCache.keys():
self.log.debug('preTaskCache dedupe is working')
return
request = {
'function' : func,
'params' : message,
'args' : args,
'kargs' : kargs,
}
self.preTaskCache[message.__hash__()] = request
self.preTasks.put(request)
#self.log.debug('adding self.preTasks.qsize=%s' % ( self.preTasks.qsize()))
def testcbDone(one):
print one
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
import time
log = logging.getLogger("main")
model.host.set('mini')
poool = sConTPool(model)
msg = {
"method": "slim.request",
"params": [
"-",
[
"player",
"count",
"?"
]
]
}
poool.SendMessage(testcbDone,msg)
log.error('self.tasks.qsize=%s' % ( poool.tasks.qsize()))
time.sleep(20)
|
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import StringIO, os, re
import warnings
import ConfigParser
import boto
# If running in Google App Engine there is no "user" and
# os.path.expanduser() will fail. Attempt to detect this case and use a
# no-op expanduser function in this case.
try:
os.path.expanduser('~')
expanduser = os.path.expanduser
except (AttributeError, ImportError):
# This is probably running on App Engine.
expanduser = (lambda x: x)
# By default we use two locations for the boto configurations,
# /etc/boto.cfg and ~/.boto (which works on Windows and Unix).
BotoConfigPath = '/etc/boto.cfg'
BotoConfigLocations = [BotoConfigPath]
UserConfigPath = os.path.join(expanduser('~'), '.boto')
BotoConfigLocations.append(UserConfigPath)
# If there's a BOTO_CONFIG variable set, we load ONLY
# that variable
if 'BOTO_CONFIG' in os.environ:
BotoConfigLocations = [expanduser(os.environ['BOTO_CONFIG'])]
# If there's a BOTO_PATH variable set, we use anything there
# as the current configuration locations, split with colons
elif 'BOTO_PATH' in os.environ:
BotoConfigLocations = []
for path in os.environ['BOTO_PATH'].split(":"):
BotoConfigLocations.append(expanduser(path))
class Config(ConfigParser.SafeConfigParser):
def __init__(self, path=None, fp=None, do_load=True):
ConfigParser.SafeConfigParser.__init__(self, {'working_dir' : '/mnt/pyami',
'debug' : '0'})
if do_load:
if path:
self.load_from_path(path)
elif fp:
self.readfp(fp)
else:
self.read(BotoConfigLocations)
if "AWS_CREDENTIAL_FILE" in os.environ:
full_path = expanduser(os.environ['AWS_CREDENTIAL_FILE'])
try:
self.load_credential_file(full_path)
except IOError:
warnings.warn('Unable to load AWS_CREDENTIAL_FILE (%s)' % full_path)
def load_credential_file(self, path):
"""Load a credential file as is setup like the Java utilities"""
c_data = StringIO.StringIO()
c_data.write("[Credentials]\n")
for line in open(path, "r").readlines():
c_data.write(line.replace("AWSAccessKeyId", "aws_access_key_id").replace("AWSSecretKey", "aws_secret_access_key"))
c_data.seek(0)
self.readfp(c_data)
def load_from_path(self, path):
file = open(path)
for line in file.readlines():
match = re.match("^#import[\s\t]*([^\s^\t]*)[\s\t]*$", line)
if match:
extended_file = match.group(1)
(dir, file) = os.path.split(path)
self.load_from_path(os.path.join(dir, extended_file))
self.read(path)
def save_option(self, path, section, option, value):
"""
Write the specified Section.Option to the config file specified by path.
Replace any previous value. If the path doesn't exist, create it.
Also add the option the the in-memory config.
"""
config = ConfigParser.SafeConfigParser()
config.read(path)
if not config.has_section(section):
config.add_section(section)
config.set(section, option, value)
fp = open(path, 'w')
config.write(fp)
fp.close()
if not self.has_section(section):
self.add_section(section)
self.set(section, option, value)
def save_user_option(self, section, option, value):
self.save_option(UserConfigPath, section, option, value)
def save_system_option(self, section, option, value):
self.save_option(BotoConfigPath, section, option, value)
def get_instance(self, name, default=None):
try:
val = self.get('Instance', name)
except:
val = default
return val
def get_user(self, name, default=None):
try:
val = self.get('User', name)
except:
val = default
return val
def getint_user(self, name, default=0):
try:
val = self.getint('User', name)
except:
val = default
return val
def get_value(self, section, name, default=None):
return self.get(section, name, default)
def get(self, section, name, default=None):
try:
val = ConfigParser.SafeConfigParser.get(self, section, name)
except:
val = default
return val
def getint(self, section, name, default=0):
try:
val = ConfigParser.SafeConfigParser.getint(self, section, name)
except:
val = int(default)
return val
def getfloat(self, section, name, default=0.0):
try:
val = ConfigParser.SafeConfigParser.getfloat(self, section, name)
except:
val = float(default)
return val
def getbool(self, section, name, default=False):
if self.has_option(section, name):
val = self.get(section, name)
if val.lower() == 'true':
val = True
else:
val = False
else:
val = default
return val
def setbool(self, section, name, value):
if value:
self.set(section, name, 'true')
else:
self.set(section, name, 'false')
def dump(self):
s = StringIO.StringIO()
self.write(s)
print s.getvalue()
def dump_safe(self, fp=None):
if not fp:
fp = StringIO.StringIO()
for section in self.sections():
fp.write('[%s]\n' % section)
for option in self.options(section):
if option == 'aws_secret_access_key':
fp.write('%s = xxxxxxxxxxxxxxxxxx\n' % option)
else:
fp.write('%s = %s\n' % (option, self.get(section, option)))
def dump_to_sdb(self, domain_name, item_name):
try:
import simplejson as json
except ImportError:
import json
sdb = boto.connect_sdb()
domain = sdb.lookup(domain_name)
if not domain:
domain = sdb.create_domain(domain_name)
item = domain.new_item(item_name)
item.active = False
for section in self.sections():
d = {}
for option in self.options(section):
d[option] = self.get(section, option)
item[section] = json.dumps(d)
item.save()
def load_from_sdb(self, domain_name, item_name):
try:
import json
except ImportError:
import simplejson as json
sdb = boto.connect_sdb()
domain = sdb.lookup(domain_name)
item = domain.get_item(item_name)
for section in item.keys():
if not self.has_section(section):
self.add_section(section)
d = json.loads(item[section])
for attr_name in d.keys():
attr_value = d[attr_name]
if attr_value == None:
attr_value = 'None'
if isinstance(attr_value, bool):
self.setbool(section, attr_name, attr_value)
else:
self.set(section, attr_name, attr_value)
|
|
# Twisted Imports
from twisted.internet import reactor, defer, task
from twisted.python import log, failure
# Sibling Imports
from .error import NotRunning, AlreadyRunning, NotPaused, Stopped
# Package Imports
from ..constants import State
from ..events import EventEmitter
def init_child (parent, child):
from .sequence import Sequence
if child is None:
child = Sequence([])
elif isinstance(child, BaseStep):
pass
else:
try:
child = Sequence(child)
except TypeError:
raise Error("Argument must be an instance of Step or a list of Steps")
child.on("all", parent._bubbleEvent)
return child
class Runnable (object):
"""
Objects that can be run or reset.
"""
def run (self, parent = None):
if self.state is not State.READY:
raise AlreadyRunning
self.state = State.RUNNING
self.parent = parent
return defer.maybeDeferred(self._run)
def reset (self):
if self.state in (State.RUNNING, State.PAUSED):
raise AlreadyRunning
self.state = State.READY
self._onResume = None
return defer.maybeDeferred(self._reset)
@property
def root (self):
obj = self
while obj.parent is not None:
obj = obj.parent
return obj
def _run (self):
pass
def _reset (self):
pass
class Pausable (object):
def pause (self):
if self.state is not State.RUNNING:
raise NotRunning
self.state = State.PAUSED
return defer.maybeDeferred(self._pause)
def resume (self):
if self.state is not State.PAUSED:
raise NotPaused
self.state = State.RUNNING
d = defer.maybeDeferred(self._resume)
try:
onResume, self._onResume = self._onResume, None
onResume()
except (AttributeError, TypeError):
pass
return d
def _pause (self):
pass
def _resume (self):
pass
class Cancellable (object):
def cancel (self, abort = False):
"""
Stop gracefully, cancelling a loop or moving on to next step.
"""
if self.state not in (State.RUNNING, State.PAUSED):
raise NotRunning
self._onResume = None
self.state = State.CANCELLED
return defer.maybeDeferred(self._cancel, abort)
def abort (self):
"""
Stop nongracefully, raising an error.
"""
try:
return self.cancel(abort = True)
except NotRunning:
return defer.succeed(None)
def _pause (self):
pass
def _resume (self):
pass
def _cancel (self, abort = False):
pass
class BaseStep (Runnable, Pausable, Cancellable):
pass
class Dependent (Runnable, Pausable, Cancellable, EventEmitter):
def __init__ (self):
self.state = State.READY
class Looping (Runnable, Pausable, Cancellable):
"""
Subclass this to create step or dependent objects that involve iteration.
"""
def __init__ (self, max_calls = None):
self._max_calls = max_calls
self._calls = 0
def _run (self):
# Don't run at all if max_calls was set to 0.
if self._max_calls is not None and self._max_calls == 0:
return self._iteration_complete()
self._calls = 0
self._iteration_start()
def _test (self):
"""
Override to control whether or not _iterate() is called on each cycle.
Return True to run, False to skip
"""
return True
def _iterate (self):
def _done (result):
self._schedule()
def _error (failure):
self.state = state.ERROR
try:
if self.state is State.PAUSED:
self._iteration_stop()
self._onResume = self._iteration_start
elif self.state is State.CANCELLED:
self._iteration_stop()
return
elif (self._max_calls is not None and self._max_calls > 0 and self._calls >= int(self._max_calls)):
raise StopIteration
elif self.state is State.RUNNING and self._test():
d = self._call()
if d is not None:
self._calls += 1
d.addCallbacks(_done, _error)
else:
self._schedule()
except StopIteration:
self._iteration_stop()
self._iteration_complete()
except Exception as e:
self._iteration_stop()
self._iteration_error(e)
def _schedule (self):
"""
Executed when each loop is complete, if another loop is required.
"""
pass
def _call (self):
"""
Executed on each loop
"""
pass
def _iteration_start (self):
"""
Starts the loop running
"""
self._iterate()
def _iteration_stop (self):
"""
Stops the loop.
Triggered if _test() or _iterate raise StopIteration,
or if state is PAUSED or CANCELLED.
"""
pass
def _iteration_complete (self):
"""
Called when the loop finishes.
This is either when max_calls has been reached
or when StopIteration is raised by _test() or _iterate().
"""
pass
def _iteration_error (self, error):
"""
Called if an error other than StopIteration is raised within
_test() or _iterate()
"""
# NB also called if an error is raised by _iteration_stop()!
pass
def _cancel (self, abort = False):
self._iteration_stop()
def _reset (self):
self._iteration_stop()
self._calls = 0
class Caller (EventEmitter):
def __init__ (self, fn, fnArgs = None, fnKeywords = None):
self._fn = fn
self._args = fnArgs or ()
self._kwargs = fnKeywords or {}
self._step = None
def _bubbleEvent (self, event, data):
# If a parent is allocated, pass on any events.
# Always pass on log events
if event == "log" or self.parent is not None:
self.emit(event, **data)
def _call (self):
if isinstance(self._fn, BaseStep):
self._step = self._fn
elif callable(self._fn):
# Act based on the result of fn().
result = self._fn(*self._args, **self._kwargs)
if isinstance(result, defer.Deferred):
# Tick will wait for the Deferred before cycling again.
self._step = result
return result
elif isinstance(result, BaseStep):
self._step = result
else:
return defer.succeed(result)
# fn was not callable.
else:
return None
# We have a Runnable to run.
d = defer.Deferred()
def remove_events (passthrough):
# Clean up after the step is complete
self._step.off("all", self._bubbleEvent)
self._step = None
if isinstance(passthrough, failure.Failure):
d.errback(passthrough)
else:
d.callback(passthrough)
self._step.on("all", self._bubbleEvent)
# Run the step
self._step.reset()
self._step.run(parent = self.parent).addBoth(remove_events)
return d
def _cancel (self, abort = False):
self._iteration_stop()
# If fn returned a Deferred, continue to wait on it
if isinstance(self._step, defer.Deferred):
return self._step
try:
return self._step.cancel(abort)
except AttributeError:
pass # step might be None
except NotRunning:
pass # no problem
def _pause (self):
try:
return self._step.pause()
except AttributeError:
pass # step might be None
except NotRunning:
pass # no problem
def _resume (self):
try:
return self._step.resume()
except AttributeError:
pass # step might be None
except NotRunning:
pass # no problem
#
# Idea 2 - allow dependents to be attached to multiple parents
# run() increments the run count, cancel() decrements it.
# if run count > 0, run.
# pause() pauses but doesn't raise error if already paused
# resume() returns to run state, also doesn't raise errors.
#
class Tick (Caller, Looping, Dependent):
"""
This is a dependent that runs a function periodically.
The function can return a value, a Deferred, or a BaseStep to be executed.
The function can also be a BaseStep to be executed.
If the function is or returns a BaseStep then the BaseStep must complete
before the next iteration can begin, no matter how long it takes.
Log entries for the BaseStep are passed on to the step to which the
dependent is attached.
If Tick is run with a parent parameter then all events are passed on
to the parent.
"""
def __init__ (self, fn, interval, now = True, max_calls = None, fnArgs = None, fnKeywords = None):
"""
Initialise a Tick.
@type fn: callable or BaseStep.
@param fn: The function or sequence to execute.
@param interval: Frequency in seconds to call fn.
@param now: The function should be called immediately, instead of after one interval.
@param max_calls: Maximum number of times to run the function. Pass None for unlimited.
@param fnArgs: Arguments to pass to fn.
@param fnKeywords: Keyword arguments to pass to fn.
"""
Dependent.__init__(self)
Caller.__init__(self, fn, fnArgs, fnKeywords)
Looping.__init__(self, max_calls)
self._interval = float(interval)
self._now = bool(now)
self._c = task.LoopingCall(self._iterate)
def _schedule (self):
pass
def _iteration_start (self):
self._c.start(self._interval, now = self._now)
def _iteration_stop (self):
if self._c and self._c.running:
self._c.stop()
def _iteration_complete (self):
self.state = State.COMPLETE
def _iteration_error (self, error):
self.state = State.ERROR
log.err(error)
class Trigger (Caller, Looping, Dependent):
"""
This is a dependent that runs a function as soon as a test evaluates to True.
The function can return a value, a Deferred, or a Runnable to be executed.
The function can also be a Runnable to be executed.
If the function is or returns a Runnable then the Runnable must complete
before the next iteration can begin, no matter how long it takes.
Log entries for the Runnable are passed on to the step to which the
dependent is attached.
If Trigger is run with a parent parameter then all events are passed on
to the parent.
"""
def __init__ (self, expr, fn, max_calls = None, fnArgs = None, fnKeywords = None):
"""
Initialise a Trigger.
@type expr: Expression (or value; but that would be relatively pointless).
@type fn: callable or Runnable.
@param fn: The function or sequence to execute.
@param max_calls: Maximum number of times to run the function. Pass None for unlimited.
@param fnArgs: Arguments to pass to fn.
@param fnKeywords: Keyword arguments to pass to fn.
"""
Dependent.__init__(self)
Caller.__init__(self, fn, fnArgs, fnKeywords)
Looping.__init__(self, max_calls)
self._expr = expr
def _cancel (self, abort = False):
Looping._cancel(self, abort)
Caller._cancel(self, abort)
def _test (self, data = None):
return bool(self._expr)
def _schedule (self):
self._expr.once("change", self._iterate)
def _iteration_stop (self):
try:
self._expr.off("change", self._iterate)
except KeyError:
pass
def _iteration_complete (self):
self.state = State.COMPLETE
def _iteration_error (self, error):
self.state = State.ERROR
log.err(error)
class Dependents (Dependent):
def __init__ (self):
Dependent.__init__(self)
self._dependents = set()
def _bubbleEvent (self, event, data):
self.emit(event, **data)
def add (self, dep):
if hasattr(dep, "container") and dep.container is not None:
raise Exception("Dependent is already assigned")
self._dependents.add(dep)
dep.container = self
dep.on("all", self._bubbleEvent)
if self.state is State.RUNNING:
dep.run()
return dep
def remove (self, dependent):
try:
self._dependents.remove(dependent)
except KeyError:
pass
else:
if self.state in (State.RUNNING, State.PAUSED):
dependent.cancel()
dependent.container = None
dep.off("all", self._bubbleEvent)
# Runnable
def _run (self):
for d in self._dependents:
try:
d.run()
except AlreadyRunning:
pass
except Exception as e:
return defer.fail(e)
return defer.succeed(None)
def _reset (self):
r = []
for d in self._dependents:
try:
r.append(d.reset())
except AlreadyRunning:
r.append(d.cancel().addCallback(lambda _: d.reset())) # May take a while - should return a deferred. So all these functions should return deferred's (like run?)
return defer.gatherResults(r)
# Pausable
def _pause (self):
r = []
for d in self._dependents:
try:
r.append(d.pause())
except NotRunning:
pass
return defer.gatherResults(r)
def _resume (self):
r = []
for d in self._dependents:
try:
r.append(d.resume())
except NotPaused:
pass
return defer.gatherResults(r)
# Cancelable
def _cancel (self, abort = False):
r = []
for d in self._dependents:
try:
r.append(d.cancel(abort))
except NotRunning:
pass
return defer.gatherResults(r)
|
|
from __future__ import division, unicode_literals
import base64
import io
import itertools
import os
import time
import xml.etree.ElementTree as etree
from .fragment import FragmentFD
from ..compat import (
compat_urlparse,
compat_urllib_error,
)
from ..utils import (
encodeFilename,
sanitize_open,
struct_pack,
struct_unpack,
xpath_text,
)
class FlvReader(io.BytesIO):
"""
Reader for Flv files
The file format is documented in https://www.adobe.com/devnet/f4v.html
"""
# Utility functions for reading numbers and strings
def read_unsigned_long_long(self):
return struct_unpack('!Q', self.read(8))[0]
def read_unsigned_int(self):
return struct_unpack('!I', self.read(4))[0]
def read_unsigned_char(self):
return struct_unpack('!B', self.read(1))[0]
def read_string(self):
res = b''
while True:
char = self.read(1)
if char == b'\x00':
break
res += char
return res
def read_box_info(self):
"""
Read a box and return the info as a tuple: (box_size, box_type, box_data)
"""
real_size = size = self.read_unsigned_int()
box_type = self.read(4)
header_end = 8
if size == 1:
real_size = self.read_unsigned_long_long()
header_end = 16
return real_size, box_type, self.read(real_size - header_end)
def read_asrt(self):
# version
self.read_unsigned_char()
# flags
self.read(3)
quality_entry_count = self.read_unsigned_char()
# QualityEntryCount
for i in range(quality_entry_count):
self.read_string()
segment_run_count = self.read_unsigned_int()
segments = []
for i in range(segment_run_count):
first_segment = self.read_unsigned_int()
fragments_per_segment = self.read_unsigned_int()
segments.append((first_segment, fragments_per_segment))
return {
'segment_run': segments,
}
def read_afrt(self):
# version
self.read_unsigned_char()
# flags
self.read(3)
# time scale
self.read_unsigned_int()
quality_entry_count = self.read_unsigned_char()
# QualitySegmentUrlModifiers
for i in range(quality_entry_count):
self.read_string()
fragments_count = self.read_unsigned_int()
fragments = []
for i in range(fragments_count):
first = self.read_unsigned_int()
first_ts = self.read_unsigned_long_long()
duration = self.read_unsigned_int()
if duration == 0:
discontinuity_indicator = self.read_unsigned_char()
else:
discontinuity_indicator = None
fragments.append({
'first': first,
'ts': first_ts,
'duration': duration,
'discontinuity_indicator': discontinuity_indicator,
})
return {
'fragments': fragments,
}
def read_abst(self):
# version
self.read_unsigned_char()
# flags
self.read(3)
self.read_unsigned_int() # BootstrapinfoVersion
# Profile,Live,Update,Reserved
flags = self.read_unsigned_char()
live = flags & 0x20 != 0
# time scale
self.read_unsigned_int()
# CurrentMediaTime
self.read_unsigned_long_long()
# SmpteTimeCodeOffset
self.read_unsigned_long_long()
self.read_string() # MovieIdentifier
server_count = self.read_unsigned_char()
# ServerEntryTable
for i in range(server_count):
self.read_string()
quality_count = self.read_unsigned_char()
# QualityEntryTable
for i in range(quality_count):
self.read_string()
# DrmData
self.read_string()
# MetaData
self.read_string()
segments_count = self.read_unsigned_char()
segments = []
for i in range(segments_count):
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'asrt'
segment = FlvReader(box_data).read_asrt()
segments.append(segment)
fragments_run_count = self.read_unsigned_char()
fragments = []
for i in range(fragments_run_count):
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'afrt'
fragments.append(FlvReader(box_data).read_afrt())
return {
'segments': segments,
'fragments': fragments,
'live': live,
}
def read_bootstrap_info(self):
total_size, box_type, box_data = self.read_box_info()
assert box_type == b'abst'
return FlvReader(box_data).read_abst()
def read_bootstrap_info(bootstrap_bytes):
return FlvReader(bootstrap_bytes).read_bootstrap_info()
def build_fragments_list(boot_info):
""" Return a list of (segment, fragment) for each fragment in the video """
res = []
segment_run_table = boot_info['segments'][0]
fragment_run_entry_table = boot_info['fragments'][0]['fragments']
first_frag_number = fragment_run_entry_table[0]['first']
fragments_counter = itertools.count(first_frag_number)
for segment, fragments_count in segment_run_table['segment_run']:
for _ in range(fragments_count):
res.append((segment, next(fragments_counter)))
if boot_info['live']:
res = res[-2:]
return res
def write_unsigned_int(stream, val):
stream.write(struct_pack('!I', val))
def write_unsigned_int_24(stream, val):
stream.write(struct_pack('!I', val)[1:])
def write_flv_header(stream):
"""Writes the FLV header to stream"""
# FLV header
stream.write(b'FLV\x01')
stream.write(b'\x05')
stream.write(b'\x00\x00\x00\x09')
stream.write(b'\x00\x00\x00\x00')
def write_metadata_tag(stream, metadata):
"""Writes optional metadata tag to stream"""
SCRIPT_TAG = b'\x12'
FLV_TAG_HEADER_LEN = 11
if metadata:
stream.write(SCRIPT_TAG)
write_unsigned_int_24(stream, len(metadata))
stream.write(b'\x00\x00\x00\x00\x00\x00\x00')
stream.write(metadata)
write_unsigned_int(stream, FLV_TAG_HEADER_LEN + len(metadata))
def _add_ns(prop):
return '{http://ns.adobe.com/f4m/1.0}%s' % prop
class F4mFD(FragmentFD):
"""
A downloader for f4m manifests or AdobeHDS.
"""
FD_NAME = 'f4m'
def _get_unencrypted_media(self, doc):
media = doc.findall(_add_ns('media'))
if not media:
self.report_error('No media found')
for e in (doc.findall(_add_ns('drmAdditionalHeader')) +
doc.findall(_add_ns('drmAdditionalHeaderSet'))):
# If id attribute is missing it's valid for all media nodes
# without drmAdditionalHeaderId or drmAdditionalHeaderSetId attribute
if 'id' not in e.attrib:
self.report_error('Missing ID in f4m DRM')
media = list(filter(lambda e: 'drmAdditionalHeaderId' not in e.attrib and
'drmAdditionalHeaderSetId' not in e.attrib,
media))
if not media:
self.report_error('Unsupported DRM')
return media
def _get_bootstrap_from_url(self, bootstrap_url):
bootstrap = self.ydl.urlopen(bootstrap_url).read()
return read_bootstrap_info(bootstrap)
def _update_live_fragments(self, bootstrap_url, latest_fragment):
fragments_list = []
retries = 30
while (not fragments_list) and (retries > 0):
boot_info = self._get_bootstrap_from_url(bootstrap_url)
fragments_list = build_fragments_list(boot_info)
fragments_list = [f for f in fragments_list if f[1] > latest_fragment]
if not fragments_list:
# Retry after a while
time.sleep(5.0)
retries -= 1
if not fragments_list:
self.report_error('Failed to update fragments')
return fragments_list
def _parse_bootstrap_node(self, node, base_url):
if node.text is None:
bootstrap_url = compat_urlparse.urljoin(
base_url, node.attrib['url'])
boot_info = self._get_bootstrap_from_url(bootstrap_url)
else:
bootstrap_url = None
bootstrap = base64.b64decode(node.text.encode('ascii'))
boot_info = read_bootstrap_info(bootstrap)
return (boot_info, bootstrap_url)
def real_download(self, filename, info_dict):
man_url = info_dict['url']
requested_bitrate = info_dict.get('tbr')
self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME)
manifest = self.ydl.urlopen(man_url).read()
doc = etree.fromstring(manifest)
formats = [(int(f.attrib.get('bitrate', -1)), f)
for f in self._get_unencrypted_media(doc)]
if requested_bitrate is None:
# get the best format
formats = sorted(formats, key=lambda f: f[0])
rate, media = formats[-1]
else:
rate, media = list(filter(
lambda f: int(f[0]) == requested_bitrate, formats))[0]
base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
bootstrap_node = doc.find(_add_ns('bootstrapInfo'))
boot_info, bootstrap_url = self._parse_bootstrap_node(bootstrap_node, base_url)
live = boot_info['live']
metadata_node = media.find(_add_ns('metadata'))
if metadata_node is not None:
metadata = base64.b64decode(metadata_node.text.encode('ascii'))
else:
metadata = None
fragments_list = build_fragments_list(boot_info)
if self.params.get('test', False):
# We only download the first fragment
fragments_list = fragments_list[:1]
total_frags = len(fragments_list)
# For some akamai manifests we'll need to add a query to the fragment url
akamai_pv = xpath_text(doc, _add_ns('pv-2.0'))
ctx = {
'filename': filename,
'total_frags': total_frags,
}
self._prepare_frag_download(ctx)
dest_stream = ctx['dest_stream']
write_flv_header(dest_stream)
if not live:
write_metadata_tag(dest_stream, metadata)
self._start_frag_download(ctx)
frags_filenames = []
while fragments_list:
seg_i, frag_i = fragments_list.pop(0)
name = 'Seg%d-Frag%d' % (seg_i, frag_i)
url = base_url + name
if akamai_pv:
url += '?' + akamai_pv.strip(';')
if info_dict.get('extra_param_to_segment_url'):
url += info_dict.get('extra_param_to_segment_url')
frag_filename = '%s-%s' % (ctx['tmpfilename'], name)
try:
success = ctx['dl'].download(frag_filename, {'url': url})
if not success:
return False
(down, frag_sanitized) = sanitize_open(frag_filename, 'rb')
down_data = down.read()
down.close()
reader = FlvReader(down_data)
while True:
_, box_type, box_data = reader.read_box_info()
if box_type == b'mdat':
dest_stream.write(box_data)
break
if live:
os.remove(encodeFilename(frag_sanitized))
else:
frags_filenames.append(frag_sanitized)
except (compat_urllib_error.HTTPError, ) as err:
if live and (err.code == 404 or err.code == 410):
# We didn't keep up with the live window. Continue
# with the next available fragment.
msg = 'Fragment %d unavailable' % frag_i
self.report_warning(msg)
fragments_list = []
else:
raise
if not fragments_list and live and bootstrap_url:
fragments_list = self._update_live_fragments(bootstrap_url, frag_i)
total_frags += len(fragments_list)
if fragments_list and (fragments_list[0][1] > frag_i + 1):
msg = 'Missed %d fragments' % (fragments_list[0][1] - (frag_i + 1))
self.report_warning(msg)
self._finish_frag_download(ctx)
for frag_file in frags_filenames:
os.remove(encodeFilename(frag_file))
return True
|
|
import inspect
import os
import pytest
from sanic.blueprints import Blueprint
@pytest.fixture(scope="module")
def static_file_directory():
"""The static directory to serve"""
current_file = inspect.getfile(inspect.currentframe())
current_directory = os.path.dirname(os.path.abspath(current_file))
static_directory = os.path.join(current_directory, "static")
return static_directory
def get_file_path(static_file_directory, file_name):
return os.path.join(static_file_directory, file_name)
def get_file_content(static_file_directory, file_name):
"""The content of the static file to check"""
with open(get_file_path(static_file_directory, file_name), "rb") as file:
return file.read()
@pytest.mark.parametrize(
"file_name", ["test.file", "decode me.txt", "python.png"]
)
def test_static_file(app, static_file_directory, file_name):
app.static(
"/testing.file", get_file_path(static_file_directory, file_name)
)
app.static(
"/testing2.file",
get_file_path(static_file_directory, file_name),
name="testing_file",
)
uri = app.url_for("static")
uri2 = app.url_for("static", filename="any")
uri3 = app.url_for("static", name="static", filename="any")
assert uri == "/testing.file"
assert uri == uri2
assert uri2 == uri3
request, response = app.test_client.get(uri)
assert response.status == 200
assert response.body == get_file_content(static_file_directory, file_name)
bp = Blueprint("test_bp_static", url_prefix="/bp")
bp.static("/testing.file", get_file_path(static_file_directory, file_name))
bp.static(
"/testing2.file",
get_file_path(static_file_directory, file_name),
name="testing_file",
)
app.blueprint(bp)
uri = app.url_for("static", name="test_bp_static.static")
uri2 = app.url_for("static", name="test_bp_static.static", filename="any")
uri3 = app.url_for("test_bp_static.static")
uri4 = app.url_for("test_bp_static.static", name="any")
uri5 = app.url_for("test_bp_static.static", filename="any")
uri6 = app.url_for("test_bp_static.static", name="any", filename="any")
assert uri == "/bp/testing.file"
assert uri == uri2
assert uri2 == uri3
assert uri3 == uri4
assert uri4 == uri5
assert uri5 == uri6
request, response = app.test_client.get(uri)
assert response.status == 200
assert response.body == get_file_content(static_file_directory, file_name)
# test for other parameters
uri = app.url_for("static", _external=True, _server="http://localhost")
assert uri == "http://localhost/testing.file"
uri = app.url_for('static', _external=True, _server='http://localhost',
_port=80)
assert uri == 'http://localhost/testing.file'
uri = app.url_for('static', _external=True, _server='http://localhost',
_port=80)
assert uri == 'http://localhost/testing.file'
uri = app.url_for('static', _external=True, _server='https://localhost',
_port=443)
assert uri == 'https://localhost/testing.file'
uri = app.url_for('static', _external=True, _server='http://localhost',
_port=8080)
assert uri == 'http://localhost:8080/testing.file'
uri = app.url_for('static', _external=True, _server='https://localhost',
_port=4433)
assert uri == 'https://localhost:4433/testing.file'
uri = app.url_for('static', name='test_bp_static.static',
_external=True, _server='http://localhost')
assert uri == 'http://localhost/bp/testing.file'
uri = app.url_for('static', name='test_bp_static.static',
_external=True, _server='http://localhost', _port=80)
assert uri == 'http://localhost/bp/testing.file'
uri = app.url_for('static', name='test_bp_static.static',
_external=True, _server='https://localhost', _port=443)
assert uri == 'https://localhost/bp/testing.file'
uri = app.url_for('static', name='test_bp_static.static',
_external=True, _server='http://localhost', _port=8080)
assert uri == 'http://localhost:8080/bp/testing.file'
uri = app.url_for('static', name='test_bp_static.static',
_external=True, _server='https://localhost', _port=4433)
assert uri == 'https://localhost:4433/bp/testing.file'
# test for defined name
uri = app.url_for("static", name="testing_file")
assert uri == "/testing2.file"
request, response = app.test_client.get(uri)
assert response.status == 200
assert response.body == get_file_content(static_file_directory, file_name)
uri = app.url_for("static", name="test_bp_static.testing_file")
assert uri == "/bp/testing2.file"
assert uri == app.url_for(
"static", name="test_bp_static.testing_file", filename="any"
)
request, response = app.test_client.get(uri)
assert response.status == 200
assert response.body == get_file_content(static_file_directory, file_name)
@pytest.mark.parametrize("file_name", ["test.file", "decode me.txt"])
@pytest.mark.parametrize("base_uri", ["/static", "", "/dir"])
def test_static_directory(app, file_name, base_uri, static_file_directory):
app.static(base_uri, static_file_directory)
base_uri2 = base_uri + "/2"
app.static(base_uri2, static_file_directory, name="uploads")
uri = app.url_for("static", name="static", filename=file_name)
assert uri == "{}/{}".format(base_uri, file_name)
request, response = app.test_client.get(uri)
assert response.status == 200
assert response.body == get_file_content(static_file_directory, file_name)
uri2 = app.url_for("static", name="static", filename="/" + file_name)
uri3 = app.url_for("static", filename=file_name)
uri4 = app.url_for("static", filename="/" + file_name)
uri5 = app.url_for("static", name="uploads", filename=file_name)
uri6 = app.url_for("static", name="uploads", filename="/" + file_name)
assert uri == uri2
assert uri2 == uri3
assert uri3 == uri4
assert uri5 == "{}/{}".format(base_uri2, file_name)
assert uri5 == uri6
bp = Blueprint("test_bp_static", url_prefix="/bp")
bp.static(base_uri, static_file_directory)
bp.static(base_uri2, static_file_directory, name="uploads")
app.blueprint(bp)
uri = app.url_for(
"static", name="test_bp_static.static", filename=file_name
)
uri2 = app.url_for(
"static", name="test_bp_static.static", filename="/" + file_name
)
uri4 = app.url_for(
"static", name="test_bp_static.uploads", filename=file_name
)
uri5 = app.url_for(
"static", name="test_bp_static.uploads", filename="/" + file_name
)
assert uri == "/bp{}/{}".format(base_uri, file_name)
assert uri == uri2
assert uri4 == "/bp{}/{}".format(base_uri2, file_name)
assert uri4 == uri5
request, response = app.test_client.get(uri)
assert response.status == 200
assert response.body == get_file_content(static_file_directory, file_name)
@pytest.mark.parametrize("file_name", ["test.file", "decode me.txt"])
def test_static_head_request(app, file_name, static_file_directory):
app.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
use_content_range=True,
)
bp = Blueprint("test_bp_static", url_prefix="/bp")
bp.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
use_content_range=True,
)
app.blueprint(bp)
uri = app.url_for("static")
assert uri == "/testing.file"
assert uri == app.url_for("static", name="static")
assert uri == app.url_for("static", name="static", filename="any")
request, response = app.test_client.head(uri)
assert response.status == 200
assert "Accept-Ranges" in response.headers
assert "Content-Length" in response.headers
assert int(response.headers["Content-Length"]) == len(
get_file_content(static_file_directory, file_name)
)
# blueprint
uri = app.url_for("static", name="test_bp_static.static")
assert uri == "/bp/testing.file"
assert uri == app.url_for(
"static", name="test_bp_static.static", filename="any"
)
request, response = app.test_client.head(uri)
assert response.status == 200
assert "Accept-Ranges" in response.headers
assert "Content-Length" in response.headers
assert int(response.headers["Content-Length"]) == len(
get_file_content(static_file_directory, file_name)
)
@pytest.mark.parametrize("file_name", ["test.file", "decode me.txt"])
def test_static_content_range_correct(app, file_name, static_file_directory):
app.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
use_content_range=True,
)
bp = Blueprint("test_bp_static", url_prefix="/bp")
bp.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
use_content_range=True,
)
app.blueprint(bp)
headers = {"Range": "bytes=12-19"}
uri = app.url_for("static")
assert uri == "/testing.file"
assert uri == app.url_for("static", name="static")
assert uri == app.url_for("static", name="static", filename="any")
request, response = app.test_client.get(uri, headers=headers)
assert response.status == 206
assert "Content-Length" in response.headers
assert "Content-Range" in response.headers
static_content = bytes(get_file_content(static_file_directory, file_name))[
12:20
]
assert int(response.headers["Content-Length"]) == len(static_content)
assert response.body == static_content
# blueprint
uri = app.url_for("static", name="test_bp_static.static")
assert uri == "/bp/testing.file"
assert uri == app.url_for(
"static", name="test_bp_static.static", filename="any"
)
assert uri == app.url_for("test_bp_static.static")
assert uri == app.url_for("test_bp_static.static", name="any")
assert uri == app.url_for("test_bp_static.static", filename="any")
assert uri == app.url_for(
"test_bp_static.static", name="any", filename="any"
)
request, response = app.test_client.get(uri, headers=headers)
assert response.status == 206
assert "Content-Length" in response.headers
assert "Content-Range" in response.headers
static_content = bytes(get_file_content(static_file_directory, file_name))[
12:20
]
assert int(response.headers["Content-Length"]) == len(static_content)
assert response.body == static_content
@pytest.mark.parametrize("file_name", ["test.file", "decode me.txt"])
def test_static_content_range_front(app, file_name, static_file_directory):
app.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
use_content_range=True,
)
bp = Blueprint("test_bp_static", url_prefix="/bp")
bp.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
use_content_range=True,
)
app.blueprint(bp)
headers = {"Range": "bytes=12-"}
uri = app.url_for("static")
assert uri == "/testing.file"
assert uri == app.url_for("static", name="static")
assert uri == app.url_for("static", name="static", filename="any")
request, response = app.test_client.get(uri, headers=headers)
assert response.status == 206
assert "Content-Length" in response.headers
assert "Content-Range" in response.headers
static_content = bytes(get_file_content(static_file_directory, file_name))[
12:
]
assert int(response.headers["Content-Length"]) == len(static_content)
assert response.body == static_content
# blueprint
uri = app.url_for("static", name="test_bp_static.static")
assert uri == "/bp/testing.file"
assert uri == app.url_for(
"static", name="test_bp_static.static", filename="any"
)
assert uri == app.url_for("test_bp_static.static")
assert uri == app.url_for("test_bp_static.static", name="any")
assert uri == app.url_for("test_bp_static.static", filename="any")
assert uri == app.url_for(
"test_bp_static.static", name="any", filename="any"
)
request, response = app.test_client.get(uri, headers=headers)
assert response.status == 206
assert "Content-Length" in response.headers
assert "Content-Range" in response.headers
static_content = bytes(get_file_content(static_file_directory, file_name))[
12:
]
assert int(response.headers["Content-Length"]) == len(static_content)
assert response.body == static_content
@pytest.mark.parametrize("file_name", ["test.file", "decode me.txt"])
def test_static_content_range_back(app, file_name, static_file_directory):
app.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
use_content_range=True,
)
bp = Blueprint("test_bp_static", url_prefix="/bp")
bp.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
use_content_range=True,
)
app.blueprint(bp)
headers = {"Range": "bytes=-12"}
uri = app.url_for("static")
assert uri == "/testing.file"
assert uri == app.url_for("static", name="static")
assert uri == app.url_for("static", name="static", filename="any")
request, response = app.test_client.get(uri, headers=headers)
assert response.status == 206
assert "Content-Length" in response.headers
assert "Content-Range" in response.headers
static_content = bytes(get_file_content(static_file_directory, file_name))[
-12:
]
assert int(response.headers["Content-Length"]) == len(static_content)
assert response.body == static_content
# blueprint
uri = app.url_for("static", name="test_bp_static.static")
assert uri == "/bp/testing.file"
assert uri == app.url_for(
"static", name="test_bp_static.static", filename="any"
)
assert uri == app.url_for("test_bp_static.static")
assert uri == app.url_for("test_bp_static.static", name="any")
assert uri == app.url_for("test_bp_static.static", filename="any")
assert uri == app.url_for(
"test_bp_static.static", name="any", filename="any"
)
request, response = app.test_client.get(uri, headers=headers)
assert response.status == 206
assert "Content-Length" in response.headers
assert "Content-Range" in response.headers
static_content = bytes(get_file_content(static_file_directory, file_name))[
-12:
]
assert int(response.headers["Content-Length"]) == len(static_content)
assert response.body == static_content
@pytest.mark.parametrize("file_name", ["test.file", "decode me.txt"])
def test_static_content_range_empty(app, file_name, static_file_directory):
app.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
use_content_range=True,
)
bp = Blueprint("test_bp_static", url_prefix="/bp")
bp.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
use_content_range=True,
)
app.blueprint(bp)
uri = app.url_for("static")
assert uri == "/testing.file"
assert uri == app.url_for("static", name="static")
assert uri == app.url_for("static", name="static", filename="any")
request, response = app.test_client.get(uri)
assert response.status == 200
assert "Content-Length" in response.headers
assert "Content-Range" not in response.headers
assert int(response.headers["Content-Length"]) == len(
get_file_content(static_file_directory, file_name)
)
assert response.body == bytes(
get_file_content(static_file_directory, file_name)
)
# blueprint
uri = app.url_for("static", name="test_bp_static.static")
assert uri == "/bp/testing.file"
assert uri == app.url_for(
"static", name="test_bp_static.static", filename="any"
)
assert uri == app.url_for("test_bp_static.static")
assert uri == app.url_for("test_bp_static.static", name="any")
assert uri == app.url_for("test_bp_static.static", filename="any")
assert uri == app.url_for(
"test_bp_static.static", name="any", filename="any"
)
request, response = app.test_client.get(uri)
assert response.status == 200
assert "Content-Length" in response.headers
assert "Content-Range" not in response.headers
assert int(response.headers["Content-Length"]) == len(
get_file_content(static_file_directory, file_name)
)
assert response.body == bytes(
get_file_content(static_file_directory, file_name)
)
@pytest.mark.parametrize("file_name", ["test.file", "decode me.txt"])
def test_static_content_range_error(app, file_name, static_file_directory):
app.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
use_content_range=True,
)
bp = Blueprint("test_bp_static", url_prefix="/bp")
bp.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
use_content_range=True,
)
app.blueprint(bp)
headers = {"Range": "bytes=1-0"}
uri = app.url_for("static")
assert uri == "/testing.file"
assert uri == app.url_for("static", name="static")
assert uri == app.url_for("static", name="static", filename="any")
request, response = app.test_client.get(uri, headers=headers)
assert response.status == 416
assert "Content-Length" in response.headers
assert "Content-Range" in response.headers
assert response.headers["Content-Range"] == "bytes */%s" % (
len(get_file_content(static_file_directory, file_name)),
)
# blueprint
uri = app.url_for("static", name="test_bp_static.static")
assert uri == "/bp/testing.file"
assert uri == app.url_for(
"static", name="test_bp_static.static", filename="any"
)
assert uri == app.url_for("test_bp_static.static")
assert uri == app.url_for("test_bp_static.static", name="any")
assert uri == app.url_for("test_bp_static.static", filename="any")
assert uri == app.url_for(
"test_bp_static.static", name="any", filename="any"
)
request, response = app.test_client.get(uri, headers=headers)
assert response.status == 416
assert "Content-Length" in response.headers
assert "Content-Range" in response.headers
assert response.headers["Content-Range"] == "bytes */%s" % (
len(get_file_content(static_file_directory, file_name)),
)
|
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
from testtools import matchers
from keystone.tests.unit import test_v3
class TestExtensionCase(test_v3.RestfulTestCase):
EXTENSION_NAME = 'endpoint_filter'
EXTENSION_TO_ADD = 'endpoint_filter_extension'
def config_overrides(self):
super(TestExtensionCase, self).config_overrides()
self.config_fixture.config(
group='catalog', driver='endpoint_filter.sql')
def setUp(self):
super(TestExtensionCase, self).setUp()
self.default_request_url = (
'/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': self.endpoint_id})
class EndpointFilterCRUDTestCase(TestExtensionCase):
def test_create_endpoint_project_association(self):
"""PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Valid endpoint and project id test case.
"""
self.put(self.default_request_url,
expected_status=204)
def test_create_endpoint_project_association_with_invalid_project(self):
"""PUT OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid project id test case.
"""
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': uuid.uuid4().hex,
'endpoint_id': self.endpoint_id},
expected_status=404)
def test_create_endpoint_project_association_with_invalid_endpoint(self):
"""PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid endpoint id test case.
"""
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': uuid.uuid4().hex},
expected_status=404)
def test_create_endpoint_project_association_with_unexpected_body(self):
"""PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Unexpected body in request. The body should be ignored.
"""
self.put(self.default_request_url,
body={'project_id': self.default_domain_project_id},
expected_status=204)
def test_check_endpoint_project_association(self):
"""HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Valid project and endpoint id test case.
"""
self.put(self.default_request_url,
expected_status=204)
self.head('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': self.endpoint_id},
expected_status=204)
def test_check_endpoint_project_association_with_invalid_project(self):
"""HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid project id test case.
"""
self.put(self.default_request_url)
self.head('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': uuid.uuid4().hex,
'endpoint_id': self.endpoint_id},
expected_status=404)
def test_check_endpoint_project_association_with_invalid_endpoint(self):
"""HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid endpoint id test case.
"""
self.put(self.default_request_url)
self.head('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': uuid.uuid4().hex},
expected_status=404)
def test_list_endpoints_associated_with_valid_project(self):
"""GET /OS-EP-FILTER/projects/{project_id}/endpoints
Valid project and endpoint id test case.
"""
self.put(self.default_request_url)
resource_url = '/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
'project_id': self.default_domain_project_id}
r = self.get(resource_url)
self.assertValidEndpointListResponse(r, self.endpoint,
resource_url=resource_url)
def test_list_endpoints_associated_with_invalid_project(self):
"""GET /OS-EP-FILTER/projects/{project_id}/endpoints
Invalid project id test case.
"""
self.put(self.default_request_url)
self.get('/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
'project_id': uuid.uuid4().hex},
expected_status=404)
def test_list_projects_associated_with_endpoint(self):
"""GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
Valid endpoint-project association test case.
"""
self.put(self.default_request_url)
resource_url = '/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' % {
'endpoint_id': self.endpoint_id}
r = self.get(resource_url)
self.assertValidProjectListResponse(r, self.default_domain_project,
resource_url=resource_url)
def test_list_projects_with_no_endpoint_project_association(self):
"""GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
Valid endpoint id but no endpoint-project associations test case.
"""
r = self.get('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' %
{'endpoint_id': self.endpoint_id},
expected_status=200)
self.assertValidProjectListResponse(r, expected_length=0)
def test_list_projects_associated_with_invalid_endpoint(self):
"""GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
Invalid endpoint id test case.
"""
self.get('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' %
{'endpoint_id': uuid.uuid4().hex},
expected_status=404)
def test_remove_endpoint_project_association(self):
"""DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Valid project id and endpoint id test case.
"""
self.put(self.default_request_url)
self.delete('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': self.endpoint_id},
expected_status=204)
def test_remove_endpoint_project_association_with_invalid_project(self):
"""DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid project id test case.
"""
self.put(self.default_request_url)
self.delete('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': uuid.uuid4().hex,
'endpoint_id': self.endpoint_id},
expected_status=404)
def test_remove_endpoint_project_association_with_invalid_endpoint(self):
"""DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid endpoint id test case.
"""
self.put(self.default_request_url)
self.delete('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': uuid.uuid4().hex},
expected_status=404)
def test_endpoint_project_association_cleanup_when_project_deleted(self):
self.put(self.default_request_url)
association_url = ('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' %
{'endpoint_id': self.endpoint_id})
r = self.get(association_url, expected_status=200)
self.assertValidProjectListResponse(r, expected_length=1)
self.delete('/projects/%(project_id)s' % {
'project_id': self.default_domain_project_id})
r = self.get(association_url, expected_status=200)
self.assertValidProjectListResponse(r, expected_length=0)
def test_endpoint_project_association_cleanup_when_endpoint_deleted(self):
self.put(self.default_request_url)
association_url = '/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
'project_id': self.default_domain_project_id}
r = self.get(association_url, expected_status=200)
self.assertValidEndpointListResponse(r, expected_length=1)
self.delete('/endpoints/%(endpoint_id)s' % {
'endpoint_id': self.endpoint_id})
r = self.get(association_url, expected_status=200)
self.assertValidEndpointListResponse(r, expected_length=0)
class EndpointFilterTokenRequestTestCase(TestExtensionCase):
def test_project_scoped_token_using_endpoint_filter(self):
"""Verify endpoints from project scoped token filtered."""
# create a project to work with
ref = self.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': ref})
project = self.assertValidProjectResponse(r, ref)
# grant the user a role on the project
self.put(
'/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {
'user_id': self.user['id'],
'project_id': project['id'],
'role_id': self.role['id']})
# set the user's preferred project
body = {'user': {'default_project_id': project['id']}}
r = self.patch('/users/%(user_id)s' % {
'user_id': self.user['id']},
body=body)
self.assertValidUserResponse(r)
# add one endpoint to the project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': project['id'],
'endpoint_id': self.endpoint_id},
expected_status=204)
# attempt to authenticate without requesting a project
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(r.result['token']['project']['id'], project['id'])
def test_default_scoped_token_using_endpoint_filter(self):
"""Verify endpoints from default scoped token filtered."""
# add one endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id},
expected_status=204)
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(r.result['token']['project']['id'],
self.project['id'])
def test_scoped_token_with_no_catalog_using_endpoint_filter(self):
"""Verify endpoint filter does not affect no catalog."""
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id},
expected_status=204)
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens?nocatalog', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=False)
self.assertEqual(r.result['token']['project']['id'],
self.project['id'])
def test_invalid_endpoint_project_association(self):
"""Verify an invalid endpoint-project association is handled."""
# add first endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id},
expected_status=204)
# create a second temporary endpoint
self.endpoint_id2 = uuid.uuid4().hex
self.endpoint2 = self.new_endpoint_ref(service_id=self.service_id)
self.endpoint2['id'] = self.endpoint_id2
self.catalog_api.create_endpoint(
self.endpoint_id2,
self.endpoint2.copy())
# add second endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id2},
expected_status=204)
# remove the temporary reference
# this will create inconsistency in the endpoint filter table
# which is fixed during the catalog creation for token request
self.catalog_api.delete_endpoint(self.endpoint_id2)
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(r.result['token']['project']['id'],
self.project['id'])
def test_disabled_endpoint(self):
"""Test that a disabled endpoint is handled."""
# Add an enabled endpoint to the default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id},
expected_status=204)
# Add a disabled endpoint to the default project.
# Create a disabled endpoint that's like the enabled one.
disabled_endpoint_ref = copy.copy(self.endpoint)
disabled_endpoint_id = uuid.uuid4().hex
disabled_endpoint_ref.update({
'id': disabled_endpoint_id,
'enabled': False,
'interface': 'internal'
})
self.catalog_api.create_endpoint(disabled_endpoint_id,
disabled_endpoint_ref)
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': disabled_endpoint_id},
expected_status=204)
# Authenticate to get token with catalog
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
endpoints = r.result['token']['catalog'][0]['endpoints']
endpoint_ids = [ep['id'] for ep in endpoints]
self.assertEqual([self.endpoint_id], endpoint_ids)
def test_multiple_endpoint_project_associations(self):
def _create_an_endpoint():
endpoint_ref = self.new_endpoint_ref(service_id=self.service_id)
r = self.post('/endpoints', body={'endpoint': endpoint_ref})
return r.result['endpoint']['id']
# create three endpoints
endpoint_id1 = _create_an_endpoint()
endpoint_id2 = _create_an_endpoint()
_create_an_endpoint()
# only associate two endpoints with project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': endpoint_id1},
expected_status=204)
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': endpoint_id2},
expected_status=204)
# there should be only two endpoints in token catalog
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=2)
def test_get_auth_catalog_using_endpoint_filter(self):
# add one endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id},
expected_status=204)
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
token_data = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(
token_data,
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=1)
auth_catalog = self.get('/auth/catalog',
token=token_data.headers['X-Subject-Token'])
self.assertEqual(token_data.result['token']['catalog'],
auth_catalog.result['catalog'])
class JsonHomeTests(TestExtensionCase, test_v3.JsonHomeTestMixin):
JSON_HOME_DATA = {
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
'1.0/rel/endpoint_projects': {
'href-template': '/OS-EP-FILTER/endpoints/{endpoint_id}/projects',
'href-vars': {
'endpoint_id':
'http://docs.openstack.org/api/openstack-identity/3/param/'
'endpoint_id',
},
},
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
'1.0/rel/endpoint_groups': {
'href': '/OS-EP-FILTER/endpoint_groups',
},
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
'1.0/rel/endpoint_group': {
'href-template': '/OS-EP-FILTER/endpoint_groups/'
'{endpoint_group_id}',
'href-vars': {
'endpoint_group_id':
'http://docs.openstack.org/api/openstack-identity/3/'
'ext/OS-EP-FILTER/1.0/param/endpoint_group_id',
},
},
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
'1.0/rel/endpoint_group_to_project_association': {
'href-template': '/OS-EP-FILTER/endpoint_groups/'
'{endpoint_group_id}/projects/{project_id}',
'href-vars': {
'project_id':
'http://docs.openstack.org/api/openstack-identity/3/param/'
'project_id',
'endpoint_group_id':
'http://docs.openstack.org/api/openstack-identity/3/'
'ext/OS-EP-FILTER/1.0/param/endpoint_group_id',
},
},
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
'1.0/rel/projects_associated_with_endpoint_group': {
'href-template': '/OS-EP-FILTER/endpoint_groups/'
'{endpoint_group_id}/projects',
'href-vars': {
'endpoint_group_id':
'http://docs.openstack.org/api/openstack-identity/3/'
'ext/OS-EP-FILTER/1.0/param/endpoint_group_id',
},
},
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
'1.0/rel/endpoints_in_endpoint_group': {
'href-template': '/OS-EP-FILTER/endpoint_groups/'
'{endpoint_group_id}/endpoints',
'href-vars': {
'endpoint_group_id':
'http://docs.openstack.org/api/openstack-identity/3/'
'ext/OS-EP-FILTER/1.0/param/endpoint_group_id',
},
},
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
'1.0/rel/project_endpoint_groups': {
'href-template': '/OS-EP-FILTER/projects/{project_id}/'
'endpoint_groups',
'href-vars': {
'project_id':
'http://docs.openstack.org/api/openstack-identity/3/param/'
'project_id',
},
},
}
class EndpointGroupCRUDTestCase(TestExtensionCase):
DEFAULT_ENDPOINT_GROUP_BODY = {
'endpoint_group': {
'description': 'endpoint group description',
'filters': {
'interface': 'admin'
},
'name': 'endpoint_group_name'
}
}
DEFAULT_ENDPOINT_GROUP_URL = '/OS-EP-FILTER/endpoint_groups'
def test_create_endpoint_group(self):
"""POST /OS-EP-FILTER/endpoint_groups
Valid endpoint group test case.
"""
r = self.post(self.DEFAULT_ENDPOINT_GROUP_URL,
body=self.DEFAULT_ENDPOINT_GROUP_BODY)
expected_filters = (self.DEFAULT_ENDPOINT_GROUP_BODY
['endpoint_group']['filters'])
expected_name = (self.DEFAULT_ENDPOINT_GROUP_BODY
['endpoint_group']['name'])
self.assertEqual(expected_filters,
r.result['endpoint_group']['filters'])
self.assertEqual(expected_name, r.result['endpoint_group']['name'])
self.assertThat(
r.result['endpoint_group']['links']['self'],
matchers.EndsWith(
'/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': r.result['endpoint_group']['id']}))
def test_create_invalid_endpoint_group(self):
"""POST /OS-EP-FILTER/endpoint_groups
Invalid endpoint group creation test case.
"""
invalid_body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY)
invalid_body['endpoint_group']['filters'] = {'foobar': 'admin'}
self.post(self.DEFAULT_ENDPOINT_GROUP_URL,
body=invalid_body,
expected_status=400)
def test_get_endpoint_group(self):
"""GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Valid endpoint group test case.
"""
# create an endpoint group to work with
response = self.post(self.DEFAULT_ENDPOINT_GROUP_URL,
body=self.DEFAULT_ENDPOINT_GROUP_BODY)
endpoint_group_id = response.result['endpoint_group']['id']
endpoint_group_filters = response.result['endpoint_group']['filters']
endpoint_group_name = response.result['endpoint_group']['name']
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
self.get(url)
self.assertEqual(endpoint_group_id,
response.result['endpoint_group']['id'])
self.assertEqual(endpoint_group_filters,
response.result['endpoint_group']['filters'])
self.assertEqual(endpoint_group_name,
response.result['endpoint_group']['name'])
self.assertThat(response.result['endpoint_group']['links']['self'],
matchers.EndsWith(url))
def test_get_invalid_endpoint_group(self):
"""GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Invalid endpoint group test case.
"""
endpoint_group_id = 'foobar'
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
self.get(url, expected_status=404)
def test_check_endpoint_group(self):
"""HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
Valid endpoint_group_id test case.
"""
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
self.head(url, expected_status=200)
def test_check_invalid_endpoint_group(self):
"""HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
Invalid endpoint_group_id test case.
"""
endpoint_group_id = 'foobar'
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
self.head(url, expected_status=404)
def test_patch_endpoint_group(self):
"""PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Valid endpoint group patch test case.
"""
body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY)
body['endpoint_group']['filters'] = {'region_id': 'UK'}
body['endpoint_group']['name'] = 'patch_test'
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
r = self.patch(url, body=body)
self.assertEqual(endpoint_group_id,
r.result['endpoint_group']['id'])
self.assertEqual(body['endpoint_group']['filters'],
r.result['endpoint_group']['filters'])
self.assertThat(r.result['endpoint_group']['links']['self'],
matchers.EndsWith(url))
def test_patch_nonexistent_endpoint_group(self):
"""PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Invalid endpoint group patch test case.
"""
body = {
'endpoint_group': {
'name': 'patch_test'
}
}
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': 'ABC'}
self.patch(url, body=body, expected_status=404)
def test_patch_invalid_endpoint_group(self):
"""PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Valid endpoint group patch test case.
"""
body = {
'endpoint_group': {
'description': 'endpoint group description',
'filters': {
'region': 'UK'
},
'name': 'patch_test'
}
}
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
self.patch(url, body=body, expected_status=400)
# Perform a GET call to ensure that the content remains
# the same (as DEFAULT_ENDPOINT_GROUP_BODY) after attempting to update
# with an invalid filter
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
r = self.get(url)
del r.result['endpoint_group']['id']
del r.result['endpoint_group']['links']
self.assertDictEqual(self.DEFAULT_ENDPOINT_GROUP_BODY, r.result)
def test_delete_endpoint_group(self):
"""GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Valid endpoint group test case.
"""
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
self.delete(url)
self.get(url, expected_status=404)
def test_delete_invalid_endpoint_group(self):
"""GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Invalid endpoint group test case.
"""
endpoint_group_id = 'foobar'
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
self.delete(url, expected_status=404)
def test_add_endpoint_group_to_project(self):
"""Create a valid endpoint group and project association."""
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
self._create_endpoint_group_project_association(endpoint_group_id,
self.project_id)
def test_add_endpoint_group_to_project_with_invalid_project_id(self):
"""Create an invalid endpoint group and project association."""
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# associate endpoint group with project
project_id = uuid.uuid4().hex
url = self._get_project_endpoint_group_url(
endpoint_group_id, project_id)
self.put(url, expected_status=404)
def test_get_endpoint_group_in_project(self):
"""Test retrieving project endpoint group association."""
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# associate endpoint group with project
url = self._get_project_endpoint_group_url(
endpoint_group_id, self.project_id)
self.put(url)
response = self.get(url)
self.assertEqual(
endpoint_group_id,
response.result['project_endpoint_group']['endpoint_group_id'])
self.assertEqual(
self.project_id,
response.result['project_endpoint_group']['project_id'])
def test_get_invalid_endpoint_group_in_project(self):
"""Test retrieving project endpoint group association."""
endpoint_group_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
url = self._get_project_endpoint_group_url(
endpoint_group_id, project_id)
self.get(url, expected_status=404)
def test_list_endpoint_groups_in_project(self):
"""GET /OS-EP-FILTER/projects/{project_id}/endpoint_groups."""
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# associate endpoint group with project
url = self._get_project_endpoint_group_url(
endpoint_group_id, self.project_id)
self.put(url)
url = ('/OS-EP-FILTER/projects/%(project_id)s/endpoint_groups' %
{'project_id': self.project_id})
response = self.get(url)
self.assertEqual(
endpoint_group_id,
response.result['endpoint_groups'][0]['id'])
def test_list_endpoint_groups_in_invalid_project(self):
"""Test retrieving from invalid project."""
project_id = uuid.uuid4().hex
url = ('/OS-EP-FILTER/projects/%(project_id)s/endpoint_groups' %
{'project_id': project_id})
self.get(url, expected_status=404)
def test_empty_endpoint_groups_in_project(self):
"""Test when no endpoint groups associated with the project."""
url = ('/OS-EP-FILTER/projects/%(project_id)s/endpoint_groups' %
{'project_id': self.project_id})
response = self.get(url)
self.assertEqual(0, len(response.result['endpoint_groups']))
def test_check_endpoint_group_to_project(self):
"""Test HEAD with a valid endpoint group and project association."""
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
self._create_endpoint_group_project_association(endpoint_group_id,
self.project_id)
url = self._get_project_endpoint_group_url(
endpoint_group_id, self.project_id)
self.head(url, expected_status=200)
def test_check_endpoint_group_to_project_with_invalid_project_id(self):
"""Test HEAD with an invalid endpoint group and project association."""
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# create an endpoint group to project association
url = self._get_project_endpoint_group_url(
endpoint_group_id, self.project_id)
self.put(url)
# send a head request with an invalid project id
project_id = uuid.uuid4().hex
url = self._get_project_endpoint_group_url(
endpoint_group_id, project_id)
self.head(url, expected_status=404)
def test_list_endpoint_groups(self):
"""GET /OS-EP-FILTER/endpoint_groups."""
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# recover all endpoint groups
url = '/OS-EP-FILTER/endpoint_groups'
r = self.get(url)
self.assertNotEmpty(r.result['endpoint_groups'])
self.assertEqual(endpoint_group_id,
r.result['endpoint_groups'][0].get('id'))
def test_list_projects_associated_with_endpoint_group(self):
"""GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects
Valid endpoint group test case.
"""
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# associate endpoint group with project
self._create_endpoint_group_project_association(endpoint_group_id,
self.project_id)
# recover list of projects associated with endpoint group
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s'
'/projects' %
{'endpoint_group_id': endpoint_group_id})
self.get(url)
def test_list_endpoints_associated_with_endpoint_group(self):
"""GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}/endpoints
Valid endpoint group test case.
"""
# create a service
service_ref = self.new_service_ref()
response = self.post(
'/services',
body={'service': service_ref})
service_id = response.result['service']['id']
# create an endpoint
endpoint_ref = self.new_endpoint_ref(service_id=service_id)
response = self.post(
'/endpoints',
body={'endpoint': endpoint_ref})
endpoint_id = response.result['endpoint']['id']
# create an endpoint group
body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY)
body['endpoint_group']['filters'] = {'service_id': service_id}
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, body)
# create association
self._create_endpoint_group_project_association(endpoint_group_id,
self.project_id)
# recover list of endpoints associated with endpoint group
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s'
'/endpoints' % {'endpoint_group_id': endpoint_group_id})
r = self.get(url)
self.assertNotEmpty(r.result['endpoints'])
self.assertEqual(endpoint_id, r.result['endpoints'][0].get('id'))
def test_list_endpoints_associated_with_project_endpoint_group(self):
"""GET /OS-EP-FILTER/projects/{project_id}/endpoints
Valid project, endpoint id, and endpoint group test case.
"""
# create a temporary service
service_ref = self.new_service_ref()
response = self.post('/services', body={'service': service_ref})
service_id2 = response.result['service']['id']
# create additional endpoints
self._create_endpoint_and_associations(
self.default_domain_project_id, service_id2)
self._create_endpoint_and_associations(
self.default_domain_project_id)
# create project and endpoint association with default endpoint:
self.put(self.default_request_url)
# create an endpoint group that contains a different endpoint
body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY)
body['endpoint_group']['filters'] = {'service_id': service_id2}
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, body)
# associate endpoint group with project
self._create_endpoint_group_project_association(
endpoint_group_id, self.default_domain_project_id)
# Now get a list of the filtered endpoints
endpoints_url = '/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
'project_id': self.default_domain_project_id}
r = self.get(endpoints_url)
endpoints = self.assertValidEndpointListResponse(r)
self.assertEqual(len(endpoints), 2)
# Now remove project endpoint group association
url = self._get_project_endpoint_group_url(
endpoint_group_id, self.default_domain_project_id)
self.delete(url)
# Now remove endpoint group
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
self.delete(url)
r = self.get(endpoints_url)
endpoints = self.assertValidEndpointListResponse(r)
self.assertEqual(len(endpoints), 1)
def test_endpoint_group_project_cleanup_with_project(self):
# create endpoint group
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# create new project and associate with endpoint_group
project_ref = self.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': project_ref})
project = self.assertValidProjectResponse(r, project_ref)
url = self._get_project_endpoint_group_url(endpoint_group_id,
project['id'])
self.put(url)
# check that we can recover the project endpoint group association
self.get(url)
# Now delete the project and then try and retrieve the project
# endpoint group association again
self.delete('/projects/%(project_id)s' % {
'project_id': project['id']})
self.get(url, expected_status=404)
def test_endpoint_group_project_cleanup_with_endpoint_group(self):
# create endpoint group
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# create new project and associate with endpoint_group
project_ref = self.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': project_ref})
project = self.assertValidProjectResponse(r, project_ref)
url = self._get_project_endpoint_group_url(endpoint_group_id,
project['id'])
self.put(url)
# check that we can recover the project endpoint group association
self.get(url)
# now remove the project endpoint group association
self.delete(url)
self.get(url, expected_status=404)
def test_removing_an_endpoint_group_project(self):
# create an endpoint group
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# create an endpoint_group project
url = self._get_project_endpoint_group_url(
endpoint_group_id, self.default_domain_project_id)
self.put(url)
# remove the endpoint group project
self.delete(url)
self.get(url, expected_status=404)
def test_remove_endpoint_group_with_project_association(self):
# create an endpoint group
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# create an endpoint_group project
project_endpoint_group_url = self._get_project_endpoint_group_url(
endpoint_group_id, self.default_domain_project_id)
self.put(project_endpoint_group_url)
# remove endpoint group, the associated endpoint_group project will
# be removed as well.
endpoint_group_url = ('/OS-EP-FILTER/endpoint_groups/'
'%(endpoint_group_id)s'
% {'endpoint_group_id': endpoint_group_id})
self.delete(endpoint_group_url)
self.get(endpoint_group_url, expected_status=404)
self.get(project_endpoint_group_url, expected_status=404)
def _create_valid_endpoint_group(self, url, body):
r = self.post(url, body=body)
return r.result['endpoint_group']['id']
def _create_endpoint_group_project_association(self,
endpoint_group_id,
project_id):
url = self._get_project_endpoint_group_url(endpoint_group_id,
project_id)
self.put(url)
def _get_project_endpoint_group_url(self,
endpoint_group_id,
project_id):
return ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s'
'/projects/%(project_id)s' %
{'endpoint_group_id': endpoint_group_id,
'project_id': project_id})
def _create_endpoint_and_associations(self, project_id, service_id=None):
"""Creates an endpoint associated with service and project."""
if not service_id:
# create a new service
service_ref = self.new_service_ref()
response = self.post(
'/services', body={'service': service_ref})
service_id = response.result['service']['id']
# create endpoint
endpoint_ref = self.new_endpoint_ref(service_id=service_id)
response = self.post('/endpoints', body={'endpoint': endpoint_ref})
endpoint = response.result['endpoint']
# now add endpoint to project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': endpoint['id']})
return endpoint
|
|
"""
Project Bluebox
2015, University of Stuttgart, IPVS/AS
"""
from _collections_abc import Iterator
from itertools import count
import abc
"""
Project Bluebox
Copyright (C) <2015> <University of Stuttgart>
This software may be modified and distributed under the terms
of the MIT license. See the LICENSE file for details.
"""
from flask import Flask, render_template, request, Response
from werkzeug import secure_filename
from SwiftConnect import SwiftConnect
import json, logging, os, time, datetime
import appConfig
n = 0
def set_globvar_to_one():
global n # Needed to modify global copy of n(index)
n = n+ 6
# initialize logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(module)s - %(levelname)s ##\t %(message)s')
log = logging.getLogger()
# Initialize the Flask application
app = Flask(__name__)
# Instantiating SwiftClient
swift = SwiftConnect(appConfig.swift_type, appConfig.swift_url, appConfig.swift_user, appConfig.swift_pw)
##########################################################################################
"""
This route will show a form to perform an AJAX request
jQuery is loaded to execute the request and update the
value of the operation
"""
@app.route('/')
def index():
return render_template('index.html')
@app.route('/nextpage')
def index2():
return render_template('index2.html')
##########################################################################################
"""
get the list of containers , we get value of marker from the front end (java script)
"""
@app.route('/swift/containers', methods=['GET'])
def getContainers():
print("inside container list")
log.debug("inside container list")
m =request.args.get('marker','')
print(m)
ctss= swift.containerList(marker=m)
j = json.dumps(ctss,sort_keys=True)
return Response(j, mimetype='application/json')
##########################################################################################
##########################################################################################
# """
# get the list of containers
# """
# @app.route('/swift/Display', methods=['GET'])
# def Display(ctss):
#
# j = json.dumps(ctss,sort_keys=True)
# return Response(j, mimetype='application/json')
##########################################################################################
##########################################################################################
"""
# get the list of next containers
# """
# @app.route('/swift/containers/next', methods=['GET'])
# def getNextContainers():
#
#
# print("hello")
# print("hello")
# cts= swift.containerList()
# # mark=""
# list=6
# set_globvar_to_one()
# print(n)
# ctss= swift.containerListLimit(list,n)
#
# j = json.dumps(ctss,sort_keys=True)
# return Response(j, mimetype='application/json')
##########################################################################################
"""
create the Container
"""
##########################################################################################
@app.route('/create', methods=['POST'])
def create():
folderName = request.form['containerName']
print(folderName)
swift.createContainer(folderName)
return Response(None)
##########################################################################################
"""
get the list of all objects in a container
"""
@app.route('/swift/containers/<containerName>/objects', methods=['GET'])
def getObjectsInContainer(containerName):
n =request.args.get('marker','')
print('hallo ' +n);
log.debug(n)
log.debug(containerName)
cts = swift.fileList(containerName,marker=n)
f = json.dumps(cts,sort_keys=True)
return Response(f, mimetype='application/json')
"""
parse objects size
"""
def parseObjects(container):
x = swift.ObjectList(container);
log.debug(x)
print("inside container list22")
##########################################################################################
@app.route('/swift/containers/<containerName>/objects/<path:filename>/details', methods=['GET'])
def getMetaDataInfo(containerName,filename):
log.debug("Get metadata information")
log.debug(containerName)
log.debug(filename)
metaInfo = swift.getObjMetaData(containerName,filename)
metadata = json.dumps(metaInfo,sort_keys=True)
return Response(metadata, mimetype='application/json')
##########################################################################################
"""
Route that will process the file upload
"""
@app.route('/upload', methods=['POST'])
def upload():
# Get the name of the uploaded file
log.debug("inside the upload part")
inputFile = request.files['objectName']
# Check if the file is one of the allowed types/extensions
if inputFile:
log.debug("accepted file upload")
# Make the filename safe, remove unsupported chars
inputFileName = secure_filename(inputFile.filename)
log.debug(inputFileName)
inputFileContent = inputFile.read()
print("hjdgkjdgffhgkdsjh",inputFileContent)
log.debug(inputFileContent)
folderName = request.form['containerNameUp']
log.debug(folderName)
retentime = request.form['RetentionPeriod']
log.debug(retentime)
if retentime:
convertretentime = datetime.datetime.strptime(retentime,"%Y-%m-%d").strftime("%d-%m-%Y")
log.debug(convertretentime)
retentimestamp = int(time.mktime(datetime.datetime.strptime(convertretentime, "%d-%m-%Y").timetuple()))
log.debug(retentimestamp)
else:
retentimestamp = retentime
h = dict()
h["X-Object-Meta-RetentionTime"] = retentimestamp
h["X-Object-Meta-OwnerName"] = request.form['OwnerName']
swift.createObject(inputFileName,inputFileContent,folderName,h,chunk_size=10)
encodedoutputFileContent = swift.retrieveObject(folderName,inputFileName)
return Response(None)
##########################################################################################
"""
download obj route
"""
@app.route('/swift/containers/<containerName>/objects/<path:filename>', methods=['GET'])
def downloadObject(containerName, filename):
log.debug("downloadObject: %s - %s" % (containerName, filename))
encodedOutputFile = swift.getObject(containerName,filename,resp_chunk_size=10)
return Response(encodedOutputFile, mimetype='application/octet-stream')
##########################################################################################
def calcTimeDifference(timestamp):
try:
return int(timestamp) - int(time.time())
except ValueError:
return False
def isRetentionPeriodExpired(timestamp):
if (calcTimeDifference(timestamp)):
return calcTimeDifference(timestamp) <= 0
return False
"""
delete obj route
"""
@app.route('/swift/containers/<containerName>/objects/<path:filename>', methods=['DELETE'])
def deleteObject(containerName,filename):
log.debug("deleteObject: %s - %s" % (containerName, filename))
json1 = json.dumps(swift.getObjMetaData(containerName,filename),ensure_ascii=False)
log.debug(json1)
new_dict = json.loads(json1)
retentimestamp = new_dict['x-object-meta-retentiontime']
if (isRetentionPeriodExpired(retentimestamp) or not retentimestamp):
swift.delObject(containerName,filename)
responsemsg={}
responsemsg['deletestatus'] = "done"
return Response(json.dumps(responsemsg),mimetype='application/json')
else:
log.debug("You are not allowed to delete the file!")
log.debug( "The retentiondate is: " +
datetime.datetime.fromtimestamp(
int(retentimestamp)
).strftime('%m-%d-%Y')
)
minutes, seconds = divmod(calcTimeDifference(retentimestamp), 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
weeks, days = divmod(days, 7)
log.debug("The number of days left for deletion: " + str(days))
log.debug("You should wait for "+ str(weeks)+" weeks and "+ str(days)+" days and "+str(hours)+" hours and "+str(minutes)+" minutes and"+str(seconds)+" seconds to delete this file!!!")
responsemsg={}
responsemsg['deletestatus'] = "failed"
responsemsg['retention'] = datetime.datetime.fromtimestamp(int(retentimestamp)).strftime('%m-%d-%Y')
responsemsg['seconds'] = seconds
responsemsg['minutes'] = minutes
responsemsg['hours'] = hours
responsemsg['days'] = days
responsemsg['weeks'] = weeks
return Response(json.dumps(responsemsg),mimetype='application/json')
#################################Scheduler#########################################################
@app.route('/swift/containers/<containerName>/CheckOldFiles/', methods=['GET'])
def CheckOldFiles(containerName, doDelete=False):
log.debug(containerName)
files = swift.fileList(containerName)
oldFiles={}
filenames = list()
for file in files:
log.debug('{0}\t{1}\t{2}'.format(file['name'], file['bytes'], file['last_modified']))
fileMetaDict = swift.getObjMetaData(containerName,file['name'])
log.debug(fileMetaDict)
log.debug(file['name'])
log.debug(fileMetaDict['x-object-meta-retentiontime'])
retentimestamp = fileMetaDict['x-object-meta-retentiontime']
if (isRetentionPeriodExpired(retentimestamp)):
filenames.append(file['name'])
log.debug(filenames)
responseObj = {"list" : filenames}
if (doDelete):
swift.delObjects(containerName,filenames)
return Response(json.dumps(responseObj),mimetype='application/json')
# TODO what should we do about the files which have no retention date
###################################################################################################
@app.route('/swift/containers/<containerName>/DeleteOldFiles/', methods=['Delete'])
def DeleteOldFiles(containerName):
return CheckOldFiles(containerName, doDelete=True)
###################################################################################################
#Main Function
if __name__ == '__main__':
appPort = os.getenv('VCAP_APP_PORT', '5000')
appHost = os.getenv('VCAP_APP_HOST', '127.0.0.1')
app.run(
host=appHost,
port=int(appPort),
debug=True
)
|
|
from __future__ import unicode_literals
import os
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.core.exceptions import ValidationError
from djblets.testing.decorators import add_fixtures
from kgb import SpyAgency
from reviewboard.scmtools.core import HEAD
from reviewboard.scmtools.models import Repository, Tool
from reviewboard.scmtools.signals import (checked_file_exists,
checking_file_exists,
fetched_file, fetching_file)
from reviewboard.testing.testcase import TestCase
class RepositoryTests(SpyAgency, TestCase):
"""Unit tests for Repository operations."""
fixtures = ['test_scmtools']
def setUp(self):
super(RepositoryTests, self).setUp()
self.local_repo_path = os.path.join(os.path.dirname(__file__),
'..', 'testdata', 'git_repo')
self.repository = Repository.objects.create(
name='Git test repo',
path=self.local_repo_path,
tool=Tool.objects.get(name='Git'))
def test_archive(self):
"""Testing Repository.archive"""
repository1 = self.repository
repository1.archive()
self.assertTrue(repository1.name.startswith('ar:Git test repo:'))
self.assertTrue(repository1.archived)
self.assertFalse(repository1.public)
self.assertIsNotNone(repository1.archived_timestamp)
repository2 = Repository.objects.get(pk=repository1.pk)
self.assertEqual(repository2.name,
repository1.name)
self.assertEqual(repository2.archived,
repository1.archived)
self.assertEqual(repository2.public,
repository1.public)
self.assertEqual(repository2.archived_timestamp,
repository1.archived_timestamp)
def test_archive_no_save(self):
"""Testing Repository.archive with save=False"""
repository1 = self.repository
repository1.archive(save=False)
self.assertTrue(repository1.name.startswith('ar:Git test repo:'))
self.assertTrue(repository1.archived)
self.assertFalse(repository1.public)
self.assertIsNotNone(repository1.archived_timestamp)
repository2 = Repository.objects.get(pk=repository1.pk)
self.assertNotEqual(repository2.name,
repository1.name)
self.assertNotEqual(repository2.archived,
repository1.archived)
self.assertNotEqual(repository2.public,
repository1.public)
self.assertNotEqual(repository2.archived_timestamp,
repository1.archived_timestamp)
def test_clean_without_conflict(self):
"""Testing Repository.clean without name/path conflicts"""
with self.assertNumQueries(1):
self.repository.clean()
def test_clean_with_name_conflict(self):
"""Testing Repository.clean with name conflict"""
repository = Repository(name=self.repository.name,
path='path/to/repo.git',
tool=self.repository.tool)
with self.assertRaises(ValidationError) as ctx:
with self.assertNumQueries(1):
repository.clean()
self.assertEqual(ctx.exception.message_dict, {
'name': ['A repository with this name already exists'],
})
def test_clean_with_path_conflict(self):
"""Testing Repository.clean with path conflict"""
repository = Repository(name='New test repo',
path=self.repository.path,
tool=self.repository.tool)
with self.assertRaises(ValidationError) as ctx:
with self.assertNumQueries(1):
repository.clean()
self.assertEqual(ctx.exception.message_dict, {
'path': ['A repository with this path already exists'],
})
def test_clean_with_name_and_path_conflict(self):
"""Testing Repository.clean with name and path conflict"""
repository = Repository(name=self.repository.name,
path=self.repository.path,
tool=self.repository.tool)
with self.assertRaises(ValidationError) as ctx:
with self.assertNumQueries(1):
repository.clean()
self.assertEqual(ctx.exception.message_dict, {
'name': ['A repository with this name already exists'],
'path': ['A repository with this path already exists'],
})
def test_clean_with_path_conflict_with_archived(self):
"""Testing Repository.clean with archived repositories ignored for
path conflict
"""
orig_repository = self.repository
orig_repository.archive()
repository = Repository(name='New test repo',
path=orig_repository.path,
tool=orig_repository.tool)
with self.assertNumQueries(1):
repository.clean()
def test_get_file_caching(self):
"""Testing Repository.get_file caches result"""
path = 'readme'
revision = 'e965047'
repository = self.repository
scmtool_cls = repository.scmtool_class
self.spy_on(scmtool_cls.get_file,
call_fake=lambda *args, **kwargs: b'file data',
owner=scmtool_cls)
data1 = repository.get_file(path, revision)
data2 = repository.get_file(path, revision)
self.assertIsInstance(data1, bytes)
self.assertIsInstance(data2, bytes)
self.assertEqual(data1, b'file data')
self.assertEqual(data1, data2)
self.assertEqual(len(scmtool_cls.get_file.calls), 1)
self.assertSpyCalledWith(scmtool_cls.get_file,
path,
revision=revision)
def test_get_file_signals(self):
"""Testing Repository.get_file emits signals"""
def on_fetching_file(sender, path, revision, request, **kwargs):
found_signals.append(('fetching_file', path, revision, request))
def on_fetched_file(sender, path, revision, request, **kwargs):
found_signals.append(('fetched_file', path, revision, request))
found_signals = []
fetching_file.connect(on_fetching_file, sender=self.repository)
fetched_file.connect(on_fetched_file, sender=self.repository)
path = 'readme'
revision = 'e965047'
request = {}
self.repository.get_file(path, revision, request=request)
self.assertEqual(len(found_signals), 2)
self.assertEqual(found_signals[0],
('fetching_file', path, revision, request))
self.assertEqual(found_signals[1],
('fetched_file', path, revision, request))
def test_get_file_exists_caching_when_exists(self):
"""Testing Repository.get_file_exists caches result when exists"""
path = 'readme'
revision = 'e965047'
repository = self.repository
scmtool_cls = repository.scmtool_class
self.spy_on(scmtool_cls.file_exists,
call_fake=lambda *args, **kwargs: True,
owner=scmtool_cls)
self.assertTrue(repository.get_file_exists(path, revision))
self.assertTrue(repository.get_file_exists(path, revision))
self.assertEqual(len(scmtool_cls.file_exists.calls), 1)
self.assertSpyCalledWith(scmtool_cls.file_exists,
path,
revision=revision)
def test_get_file_exists_caching_when_not_exists(self):
"""Testing Repository.get_file_exists doesn't cache result when the
file does not exist
"""
path = 'readme'
revision = '12345'
repository = self.repository
scmtool_cls = repository.scmtool_class
self.spy_on(scmtool_cls.file_exists,
call_fake=lambda *args, **kwargs: False,
owner=scmtool_cls)
self.assertFalse(repository.get_file_exists(path, revision))
self.assertFalse(repository.get_file_exists(path, revision))
self.assertEqual(len(scmtool_cls.file_exists.calls), 2)
self.assertSpyCalledWith(scmtool_cls.file_exists,
path,
revision=revision)
def test_get_file_exists_caching_with_fetched_file(self):
"""Testing Repository.get_file_exists uses get_file's cached result"""
path = 'readme'
revision = 'e965047'
repository = self.repository
scmtool_cls = repository.scmtool_class
self.spy_on(scmtool_cls.get_file,
call_fake=lambda *args, **kwargs: b'file data',
owner=scmtool_cls)
self.spy_on(scmtool_cls.file_exists,
call_fake=lambda *args, **kwargs: True,
owner=scmtool_cls)
repository.get_file(path, revision)
exists1 = repository.get_file_exists(path, revision)
exists2 = repository.get_file_exists(path, revision)
self.assertTrue(exists1)
self.assertTrue(exists2)
self.assertEqual(len(scmtool_cls.get_file.calls), 1)
self.assertEqual(len(scmtool_cls.file_exists.calls), 0)
def test_get_file_exists_signals(self):
"""Testing Repository.get_file_exists emits signals"""
def on_checking(sender, path, revision, request, **kwargs):
found_signals.append(('checking_file_exists', path,
revision, request))
def on_checked(sender, path, revision, request, **kwargs):
found_signals.append(('checked_file_exists', path,
revision, request))
repository = self.repository
found_signals = []
checking_file_exists.connect(on_checking, sender=repository)
checked_file_exists.connect(on_checked, sender=repository)
path = 'readme'
revision = 'e965047'
request = {}
repository.get_file_exists(path, revision, request=request)
self.assertEqual(len(found_signals), 2)
self.assertEqual(found_signals[0],
('checking_file_exists', path, revision, request))
self.assertEqual(found_signals[1],
('checked_file_exists', path, revision, request))
def test_repository_name_with_255_characters(self):
"""Testing Repository.name with 255 characters"""
repository = self.create_repository(name='t' * 255)
self.assertEqual(len(repository.name), 255)
def test_is_accessible_by_with_public(self):
"""Testing Repository.is_accessible_by with public repository"""
user = self.create_user()
repository = self.create_repository()
self.assertTrue(repository.is_accessible_by(user))
self.assertTrue(repository.is_accessible_by(AnonymousUser()))
def test_is_accessible_by_with_public_and_hidden(self):
"""Testing Repository.is_accessible_by with public hidden repository"""
user = self.create_user()
repository = self.create_repository(visible=False)
self.assertTrue(repository.is_accessible_by(user))
self.assertTrue(repository.is_accessible_by(AnonymousUser()))
def test_is_accessible_by_with_private_and_not_member(self):
"""Testing Repository.is_accessible_by with private repository and
user not a member
"""
user = self.create_user()
repository = self.create_repository(public=False)
self.assertFalse(repository.is_accessible_by(user))
self.assertFalse(repository.is_accessible_by(AnonymousUser()))
def test_is_accessible_by_with_private_and_member(self):
"""Testing Repository.is_accessible_by with private repository and
user is a member
"""
user = self.create_user()
repository = self.create_repository(public=False)
repository.users.add(user)
self.assertTrue(repository.is_accessible_by(user))
def test_is_accessible_by_with_private_and_member_by_group(self):
"""Testing Repository.is_accessible_by with private repository and
user is a member by group
"""
user = self.create_user()
group = self.create_review_group(invite_only=True)
group.users.add(user)
repository = self.create_repository(public=False)
repository.review_groups.add(group)
self.assertTrue(repository.is_accessible_by(user))
def test_is_accessible_by_with_private_and_superuser(self):
"""Testing Repository.is_accessible_by with private repository and
user is a superuser
"""
user = self.create_user(is_superuser=True)
repository = self.create_repository(public=False)
self.assertTrue(repository.is_accessible_by(user))
def test_is_accessible_by_with_private_hidden_not_member(self):
"""Testing Repository.is_accessible_by with private hidden
repository and user not a member
"""
user = self.create_user()
repository = self.create_repository(public=False,
visible=False)
self.assertFalse(repository.is_accessible_by(user))
def test_is_accessible_by_with_private_hidden_and_member(self):
"""Testing Repository.is_accessible_by with private hidden
repository and user is a member
"""
user = self.create_user()
repository = self.create_repository(public=False,
visible=False)
repository.users.add(user)
self.assertTrue(repository.is_accessible_by(user))
def test_is_accessible_by_with_private_hidden_and_member_by_group(self):
"""Testing Repository.is_accessible_by with private hidden
repository and user is a member
"""
user = self.create_user()
group = self.create_review_group(invite_only=True)
group.users.add(user)
repository = self.create_repository(public=False,
visible=False)
repository.review_groups.add(group)
self.assertTrue(repository.is_accessible_by(user))
def test_is_accessible_by_with_private_hidden_and_superuser(self):
"""Testing Repository.is_accessible_by with private hidden
repository and superuser
"""
user = self.create_user(is_superuser=True)
repository = self.create_repository(public=False,
visible=False)
self.assertTrue(repository.is_accessible_by(user))
@add_fixtures(['test_users', 'test_site'])
def test_is_accessible_by_with_local_site_accessible(self):
"""Testing Repository.is_accessible_by with Local Site accessible by
user
"""
user = self.create_user()
repository = self.create_repository(with_local_site=True)
repository.local_site.users.add(user)
self.assertTrue(repository.is_accessible_by(user))
@add_fixtures(['test_users', 'test_site'])
def test_is_accessible_by_with_local_site_not_accessible(self):
"""Testing Repository.is_accessible_by with Local Site not accessible
by user
"""
user = self.create_user()
repository = self.create_repository(with_local_site=True)
self.assertFalse(repository.is_accessible_by(user))
self.assertFalse(repository.is_accessible_by(AnonymousUser()))
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import Connmon
import datetime
import Elastic
import glob
import Grafana
import logging
import os
import re
import shutil
import time
import Tools
import WorkloadBase
import json
class Rally(WorkloadBase.WorkloadBase):
def __init__(self, config, hosts=None):
self.logger = logging.getLogger('browbeat.Rally')
self.config = config
self.tools = Tools.Tools(self.config)
self.connmon = Connmon.Connmon(self.config)
self.grafana = Grafana.Grafana(self.config)
self.elastic = Elastic.Elastic(
self.config, self.__class__.__name__.lower())
self.error_count = 0
self.pass_count = 0
self.test_count = 0
self.scenario_count = 0
def run_scenario(self, task_file, scenario_args, result_dir, test_name, benchmark):
self.logger.debug("--------------------------------")
self.logger.debug("task_file: {}".format(task_file))
self.logger.debug("scenario_args: {}".format(scenario_args))
self.logger.debug("result_dir: {}".format(result_dir))
self.logger.debug("test_name: {}".format(test_name))
self.logger.debug("--------------------------------")
from_ts = int(time.time() * 1000)
if 'sleep_before' in self.config['rally']:
time.sleep(self.config['rally']['sleep_before'])
task_args = str(scenario_args).replace("'", "\"")
plugins = []
if "plugins" in self.config['rally']:
if len(self.config['rally']['plugins']) > 0:
for plugin in self.config['rally']['plugins']:
for name in plugin:
plugins.append(plugin[name])
plugin_string = ""
if len(plugins) > 0:
plugin_string = "--plugin-paths {}".format(",".join(plugins))
cmd = "source {}; ".format(self.config['rally']['venv'])
cmd += "rally {} task start {} --task-args \'{}\' 2>&1 | tee {}.log".format(
plugin_string, task_file, task_args, test_name)
from_time = time.time()
self.tools.run_cmd(cmd)
to_time = time.time()
if 'sleep_after' in self.config['rally']:
time.sleep(self.config['rally']['sleep_after'])
to_ts = int(time.time() * 1000)
self.grafana.create_grafana_urls({'from_ts': from_ts, 'to_ts': to_ts})
self.grafana.print_dashboard_url(test_name)
self.grafana.log_snapshot_playbook_cmd(
from_ts, to_ts, result_dir, test_name)
self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name)
return (from_time, to_time)
def update_tests(self):
self.test_count += 1
def update_pass_tests(self):
self.pass_count += 1
def update_fail_tests(self):
self.error_count += 1
def update_scenarios(self):
self.scenario_count += 1
def get_task_id(self, test_name):
cmd = "grep \"rally task results\" {}.log | awk '{{print $4}}'".format(
test_name)
return self.tools.run_cmd(cmd)
def _get_details(self):
self.logger.info(
"Current number of Rally scenarios executed:{}".format(
self.scenario_count))
self.logger.info(
"Current number of Rally tests executed:{}".format(self.test_count))
self.logger.info(
"Current number of Rally tests passed:{}".format(self.pass_count))
self.logger.info(
"Current number of Rally test failures:{}".format(self.error_count))
def gen_scenario_html(self, task_ids, test_name):
all_task_ids = ' '.join(task_ids)
cmd = "source {}; ".format(self.config['rally']['venv'])
cmd += "rally task report --task {} --out {}.html".format(
all_task_ids, test_name)
return self.tools.run_cmd(cmd)
def gen_scenario_json(self, task_id):
cmd = "source {}; ".format(self.config['rally']['venv'])
cmd += "rally task results {}".format(task_id)
return self.tools.run_cmd(cmd)
def gen_scenario_json_file(self, task_id, test_name):
cmd = "source {}; ".format(self.config['rally']['venv'])
cmd += "rally task results {} > {}.json".format(task_id, test_name)
return self.tools.run_cmd(cmd)
def rally_metadata(self, result, meta):
result['rally_metadata'] = meta
return result
def file_to_json(self, filename, push_to_es=False):
self.logger.info("Loading rally JSON file {} JSON".format(filename))
rally_json = self.elastic.load_json_file(filename)
errors, results = self.json_parse(rally_json)
for error in errors:
error_result = self.elastic.combine_metadata(error)
with open("{}/{}-error_index-es.json".format(os.path.dirname(filename),
os.path.basename(filename)),
'w+') as error_file:
json.dump(error_result, error_file)
for result in results:
result_doc = self.elastic.combine_metadata(result)
with open("{}/{}-result_index-es.json".format(os.path.dirname(filename),
os.path.splitext(
os.path.basename(filename))[0]),
'w+') as result_file:
json.dump(result_doc, result_file)
return errors, results
def json_parse(self, json_doc, metadata={}):
"""Function to extract data out of a json document
Args:
json_doc (json): json document to parse
metadata (dict): dict containing run specific metadata, ie rally UUID.
Returns:
errors (list) : errors contained within the json_doc
results (list) : results contained within the json_doc
"""
rally_data = {}
errors = []
results = []
if len(json_doc) < 1:
self.logger.error("Issue with JSON document")
return False
es_ts = datetime.datetime.utcnow()
for metrics in json_doc[0]['result']:
for workload in metrics:
if type(metrics[workload]) is dict:
for value in metrics[workload]:
if not type(metrics[workload][value]) is list:
if value not in rally_data:
rally_data[value] = []
rally_data[value].append(metrics[workload][value])
if len(metrics['error']) > 0:
iteration = 1
workload_name = value
if value.find('(') is not -1:
iteration = re.findall('\d+', value)[0]
workload_name = value.split('(')[0]
error = {'action': workload_name.strip(),
'iteration': iteration,
'error_type': metrics['error'][0],
'error_msg': metrics['error'][1],
'timestamp': str(es_ts).replace(" ", "T"),
'rally_setup': json_doc[0]['key']
}
if len(metadata) > 0:
error.update(metadata)
errors.append(error)
for workload in rally_data:
if not type(rally_data[workload]) is dict:
iteration = 1
workload_name = workload
if workload.find('(') is not -1:
iteration = re.findall('\d+', workload)[0]
workload_name = workload.split('(')[0]
rally_stats = {'action': workload_name.strip(),
'iteration': iteration,
'timestamp': str(es_ts).replace(" ", "T"),
'grafana_url': [self.grafana.grafana_urls()],
'rally_setup': json_doc[0]['key'],
'raw': rally_data[workload]}
if len(metadata) > 0:
rally_stats.update(metadata)
results.append(rally_stats)
return errors, results
def json_result(self, task_id, scenario_name, run, test_name, result_dir):
success = True
self.logger.info("Loading Task_ID {} JSON".format(task_id))
rally_json = self.elastic.load_json(self.gen_scenario_json(task_id))
errors, results = self.json_parse(rally_json, {'scenario': scenario_name,
'browbeat_rerun': run,
'result': task_id})
for error in errors:
error_result = self.elastic.combine_metadata(error)
status = self.elastic.index_result(error_result, test_name, result_dir,
'rally', 'error')
if not status:
success = False
for result in results:
result = self.elastic.combine_metadata(result)
status = self.elastic.index_result(
result, test_name, result_dir, 'rally')
if not status:
success = False
return success
def start_workloads(self):
"""Iterates through all rally scenarios in browbeat yaml config file"""
results = collections.OrderedDict()
self.logger.info("Starting Rally workloads")
es_ts = datetime.datetime.utcnow()
dir_ts = es_ts.strftime("%Y%m%d-%H%M%S")
self.logger.debug("Time Stamp (Prefix): {}".format(dir_ts))
benchmarks = self.config.get('rally')['benchmarks']
if (benchmarks is not None and len(benchmarks) > 0):
for benchmark in benchmarks:
if benchmark['enabled']:
self.logger.info("Benchmark: {}".format(benchmark['name']))
scenarios = benchmark['scenarios']
def_concurrencies = benchmark['concurrency']
def_times = benchmark['times']
self.logger.debug(
"Default Concurrencies: {}".format(def_concurrencies))
self.logger.debug("Default Times: {}".format(def_times))
for scenario in scenarios:
if scenario['enabled']:
self.update_scenarios()
self.update_total_scenarios()
scenario_name = scenario['name']
scenario_file = scenario['file']
self.logger.info(
"Running Scenario: {}".format(scenario_name))
self.logger.debug(
"Scenario File: {}".format(scenario_file))
del scenario['enabled']
del scenario['file']
del scenario['name']
if len(scenario) > 0:
self.logger.debug(
"Overriding Scenario Args: {}".format(scenario))
result_dir = self.tools.create_results_dir(
self.config['browbeat'][
'results'], dir_ts, benchmark['name'],
scenario_name)
self.logger.debug(
"Created result directory: {}".format(result_dir))
workload = self.__class__.__name__
self.workload_logger(result_dir, workload)
# Override concurrency/times
if 'concurrency' in scenario:
concurrencies = scenario['concurrency']
del scenario['concurrency']
else:
concurrencies = def_concurrencies
concurrency_count_dict = collections.Counter(
concurrencies)
if 'times' not in scenario:
scenario['times'] = def_times
for concurrency in concurrencies:
scenario['concurrency'] = concurrency
for run in range(self.config['browbeat']['rerun']):
if run not in results:
results[run] = []
self.update_tests()
self.update_total_tests()
if concurrency_count_dict[concurrency] == 1:
test_name = "{}-browbeat-{}-{}-iteration-{}".format(
dir_ts, scenario_name, concurrency, run)
else:
test_name = "{}-browbeat-{}-{}-{}-iteration-{}".format(
dir_ts, scenario_name, concurrency,
concurrency_count_dict[concurrency], run)
self.logger.debug("Duplicate concurrency {} found,"
" setting test name"
" to {}".format(concurrency, test_name))
concurrency_count_dict[
concurrency] -= 1
if not result_dir:
self.logger.error(
"Failed to create result directory")
exit(1)
# Start connmon before rally
if self.config['connmon']['enabled']:
self.connmon.start_connmon()
from_time, to_time = self.run_scenario(
scenario_file, scenario, result_dir, test_name,
benchmark['name'])
# Stop connmon at end of rally task
if self.config['connmon']['enabled']:
self.connmon.stop_connmon()
try:
self.connmon.move_connmon_results(
result_dir, test_name)
except Exception:
self.logger.error(
"Connmon Result data missing, \
Connmon never started")
return False
self.connmon.connmon_graphs(
result_dir, test_name)
new_test_name = test_name.split('-')
new_test_name = new_test_name[3:]
new_test_name = "-".join(new_test_name)
# Find task id (if task succeeded in
# running)
task_id = self.get_task_id(test_name)
if task_id:
self.logger.info(
"Generating Rally HTML for task_id : {}".
format(task_id))
self.gen_scenario_html(
[task_id], test_name)
self.gen_scenario_json_file(
task_id, test_name)
results[run].append(task_id)
self.update_pass_tests()
self.update_total_pass_tests()
if self.config['elasticsearch']['enabled']:
# Start indexing
index_status = self.json_result(
task_id, scenario_name, run, test_name, result_dir)
self.get_time_dict(to_time, from_time,
benchmark[
'name'], new_test_name,
workload, "pass", index_status)
else:
self.get_time_dict(to_time, from_time, benchmark[
'name'], new_test_name,
workload, "pass", )
else:
self.logger.error(
"Cannot find task_id")
self.update_fail_tests()
self.update_total_fail_tests()
self.get_time_dict(
to_time, from_time, benchmark[
'name'], new_test_name,
workload, "fail")
for data in glob.glob("./{}*".format(test_name)):
shutil.move(data, result_dir)
self._get_details()
else:
self.logger.info(
"Skipping {} scenario enabled: false".format(scenario['name']))
else:
self.logger.info(
"Skipping {} benchmarks enabled: false".format(benchmark['name']))
self.logger.debug("Creating Combined Rally Reports")
for run in results:
combined_html_name = 'all-rally-run-{}'.format(run)
self.gen_scenario_html(results[run], combined_html_name)
if os.path.isfile('{}.html'.format(combined_html_name)):
shutil.move('{}.html'.format(combined_html_name),
'{}/{}'.format(self.config['browbeat']['results'], dir_ts))
else:
self.logger.error("Config file contains no rally benchmarks.")
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Parser for Psi3 output files."""
import numpy
from cclib.parser import logfileparser
from cclib.parser import utils
class Psi3(logfileparser.Logfile):
"""A Psi3 log file."""
def __init__(self, *args, **kwargs):
# Call the __init__ method of the superclass
super(Psi3, self).__init__(logname="Psi3", *args, **kwargs)
def __str__(self):
"""Return a string representation of the object."""
return "Psi3 log file %s" % (self.filename)
def __repr__(self):
"""Return a representation of the object."""
return 'Psi3("%s")' % (self.filename)
def normalisesym(self, label):
"""Psi3 does not require normalizing symmetry labels."""
return label
def extract(self, inputfile, line):
"""Extract information from the file object inputfile."""
if "Version" in line:
self.metadata["package_version"] = ''.join(line.split()[1:]).lower()
# Psi3 prints the coordinates in several configurations, and we will parse the
# the canonical coordinates system in Angstroms as the first coordinate set,
# although it is actually somewhere later in the input, after basis set, etc.
# We can also get or verify the number of atoms and atomic numbers from this block.
if line.strip() == "-Geometry in the canonical coordinate system (Angstrom):":
self.skip_lines(inputfile, ['header', 'd'])
coords = []
numbers = []
line = next(inputfile)
while line.strip():
tokens = line.split()
element = tokens[0]
numbers.append(self.table.number[element])
x = float(tokens[1])
y = float(tokens[2])
z = float(tokens[3])
coords.append([x, y, z])
line = next(inputfile)
self.set_attribute('natom', len(coords))
self.set_attribute('atomnos', numbers)
if not hasattr(self, 'atomcoords'):
self.atomcoords = []
self.atomcoords.append(coords)
if line.strip() == '-SYMMETRY INFORMATION:':
line = next(inputfile)
while line.strip():
if "Number of atoms" in line:
self.set_attribute('natom', int(line.split()[-1]))
line = next(inputfile)
if line.strip() == "-BASIS SET INFORMATION:":
line = next(inputfile)
while line.strip():
if "Number of SO" in line:
self.set_attribute('nbasis', int(line.split()[-1]))
line = next(inputfile)
# In Psi3, the section with the contraction scheme can be used to infer atombasis.
if line.strip() == "-Contraction Scheme:":
self.skip_lines(inputfile, ['header', 'd'])
indices = []
line = next(inputfile)
while line.strip():
shells = line.split('//')[-1]
expression = shells.strip().replace(' ', '+')
expression = expression.replace('s', '*1')
expression = expression.replace('p', '*3')
expression = expression.replace('d', '*6')
nfuncs = eval(expression)
if len(indices) == 0:
indices.append(range(nfuncs))
else:
start = indices[-1][-1] + 1
indices.append(range(start, start+nfuncs))
line = next(inputfile)
self.set_attribute('atombasis', indices)
if line.strip() == "CINTS: An integrals program written in C":
self.skip_lines(inputfile, ['authors', 'd', 'b', 'b'])
line = next(inputfile)
assert line.strip() == "-OPTIONS:"
while line.strip():
line = next(inputfile)
line = next(inputfile)
assert line.strip() == "-CALCULATION CONSTANTS:"
while line.strip():
if "Number of atoms" in line:
natom = int(line.split()[-1])
self.set_attribute('natom', natom)
if "Number of symmetry orbitals" in line:
nbasis = int(line.split()[-1])
self.set_attribute('nbasis', nbasis)
line = next(inputfile)
if line.strip() == "CSCF3.0: An SCF program written in C":
self.skip_lines(inputfile, ['b', 'authors', 'b', 'd', 'b',
'mult', 'mult_comment', 'b'])
line = next(inputfile)
while line.strip():
if line.split()[0] == "multiplicity":
mult = int(line.split()[-1])
self.set_attribute('mult', mult)
if line.split()[0] == "charge":
charge = int(line.split()[-1])
self.set_attribute('charge', charge)
if line.split()[0] == "convergence":
conv = float(line.split()[-1])
if line.split()[0] == "reference":
self.reference = line.split()[-1]
line = next(inputfile)
if not hasattr(self, 'scftargets'):
self.scftargets = []
self.scftargets.append([conv])
# ==> Iterations <==
# Psi3 converges just the density elements, although it reports in the iterations
# changes in the energy as well as the DIIS error.
psi3_iterations_header = "iter total energy delta E delta P diiser"
if line.strip() == psi3_iterations_header:
if not hasattr(self, 'scfvalues'):
self.scfvalues = []
self.scfvalues.append([])
line = next(inputfile)
while line.strip():
ddensity = float(line.split()[-2])
self.scfvalues[-1].append([ddensity])
line = next(inputfile)
# This section, from which we parse molecular orbital symmetries and
# orbital energies, is quite similar for both Psi3 and Psi4, and in fact
# the format for orbtials is the same, although the headers and spacers
# are a bit different. Let's try to get both parsed with one code block.
#
# Here is how the block looks like for Psi4:
#
# Orbital Energies (a.u.)
# -----------------------
#
# Doubly Occupied:
#
# 1Bu -11.040586 1Ag -11.040524 2Bu -11.031589
# 2Ag -11.031589 3Bu -11.028950 3Ag -11.028820
# (...)
# 15Ag -0.415620 1Bg -0.376962 2Au -0.315126
# 2Bg -0.278361 3Bg -0.222189
#
# Virtual:
#
# 3Au 0.198995 4Au 0.268517 4Bg 0.308826
# 5Au 0.397078 5Bg 0.521759 16Ag 0.565017
# (...)
# 24Ag 0.990287 24Bu 1.027266 25Ag 1.107702
# 25Bu 1.124938
#
# The case is different in the trigger string.
if "orbital energies (a.u.)" in line.lower():
self.moenergies = [[]]
self.mosyms = [[]]
self.skip_line(inputfile, 'blank')
occupied = next(inputfile)
if self.reference[0:2] == 'RO' or self.reference[0:1] == 'R':
assert 'doubly occupied' in occupied.lower()
elif self.reference[0:1] == 'U':
assert 'alpha occupied' in occupied.lower()
# Parse the occupied MO symmetries and energies.
self._parse_mosyms_moenergies(inputfile, 0)
# The last orbital energy here represents the HOMO.
self.homos = [len(self.moenergies[0])-1]
# For a restricted open-shell calculation, this is the
# beta HOMO, and we assume the singly-occupied orbitals
# are all alpha, which are handled next.
if self.reference[0:2] == 'RO':
self.homos.append(self.homos[0])
self.skip_line(inputfile, 'blank')
unoccupied = next(inputfile)
if self.reference[0:2] == 'RO':
assert unoccupied.strip() == 'Singly Occupied:'
elif self.reference[0:1] == 'R':
assert unoccupied.strip() == 'Unoccupied orbitals'
elif self.reference[0:1] == 'U':
assert unoccupied.strip() == 'Alpha Virtual:'
# Parse the unoccupied MO symmetries and energies.
self._parse_mosyms_moenergies(inputfile, 0)
# Here is where we handle the Beta or Singly occupied orbitals.
if self.reference[0:1] == 'U':
self.mosyms.append([])
self.moenergies.append([])
line = next(inputfile)
assert line.strip() == 'Beta Occupied:'
self.skip_line(inputfile, 'blank')
self._parse_mosyms_moenergies(inputfile, 1)
self.homos.append(len(self.moenergies[1])-1)
line = next(inputfile)
assert line.strip() == 'Beta Virtual:'
self.skip_line(inputfile, 'blank')
self._parse_mosyms_moenergies(inputfile, 1)
elif self.reference[0:2] == 'RO':
line = next(inputfile)
assert line.strip() == 'Virtual:'
self.skip_line(inputfile, 'blank')
self._parse_mosyms_moenergies(inputfile, 0)
# Both Psi3 and Psi4 print the final SCF energy right after
# the orbital energies, but the label is different. Psi4 also
# does DFT, and the label is also different in that case.
if "* SCF total energy" in line:
e = float(line.split()[-1])
if not hasattr(self, 'scfenergies'):
self.scfenergies = []
self.scfenergies.append(utils.convertor(e, 'hartree', 'eV'))
# We can also get some higher moments in Psi3, although here the dipole is not printed
# separately and the order is not lexicographical. However, the numbers seem
# kind of strange -- the quadrupole seems to be traceless, although I'm not sure
# whether the standard transformation has been used. So, until we know what kind
# of moment these are and how to make them raw again, we will only parse the dipole.
#
# --------------------------------------------------------------
# *** Electric multipole moments ***
# --------------------------------------------------------------
#
# CAUTION : The system has non-vanishing dipole moment, therefore
# quadrupole and higher moments depend on the reference point.
#
# -Coordinates of the reference point (a.u.) :
# x y z
# -------------------- -------------------- --------------------
# 0.0000000000 0.0000000000 0.0000000000
#
# -Electric dipole moment (expectation values) :
#
# mu(X) = -0.00000 D = -1.26132433e-43 C*m = -0.00000000 a.u.
# mu(Y) = 0.00000 D = 3.97987832e-44 C*m = 0.00000000 a.u.
# mu(Z) = 0.00000 D = 0.00000000e+00 C*m = 0.00000000 a.u.
# |mu| = 0.00000 D = 1.32262368e-43 C*m = 0.00000000 a.u.
#
# -Components of electric quadrupole moment (expectation values) (a.u.) :
#
# Q(XX) = 10.62340220 Q(YY) = 1.11816843 Q(ZZ) = -11.74157063
# Q(XY) = 3.64633112 Q(XZ) = 0.00000000 Q(YZ) = 0.00000000
#
if line.strip() == "*** Electric multipole moments ***":
self.skip_lines(inputfile, ['d', 'b', 'caution1', 'caution2', 'b'])
coordinates = next(inputfile)
assert coordinates.split()[-2] == "(a.u.)"
self.skip_lines(inputfile, ['xyz', 'd'])
line = next(inputfile)
self.origin = numpy.array([float(x) for x in line.split()])
self.origin = utils.convertor(self.origin, 'bohr', 'Angstrom')
self.skip_line(inputfile, "blank")
line = next(inputfile)
assert "Electric dipole moment" in line
self.skip_line(inputfile, "blank")
# Make sure to use the column that has the value in Debyes.
dipole = []
for i in range(3):
line = next(inputfile)
dipole.append(float(line.split()[2]))
if not hasattr(self, 'moments'):
self.moments = [self.origin, dipole]
else:
assert self.moments[1] == dipole
def _parse_mosyms_moenergies(self, inputfile, spinidx):
"""Parse molecular orbital symmetries and energies from the
'Post-Iterations' section.
"""
line = next(inputfile)
while line.strip():
for i in range(len(line.split()) // 2):
self.mosyms[spinidx].append(line.split()[i*2][-2:])
moenergy = utils.convertor(float(line.split()[i*2+1]), "hartree", "eV")
self.moenergies[spinidx].append(moenergy)
line = next(inputfile)
return
|
|
# -*- coding: utf-8 -*-
'''
The function in `vdirsyncer.sync` can be called on two instances of `Storage`
to synchronize them. Due to the abstract API storage classes are implementing,
the two given instances don't have to be of the same exact type. This allows us
not only to synchronize a local vdir with a CalDAV server, but also synchronize
two CalDAV servers or two local vdirs.
The algorithm is based on the blogpost "How OfflineIMAP works" by Edward Z.
Yang: http://blog.ezyang.com/2012/08/how-offlineimap-works/
Some modifications to it are explained in
https://unterwaditzer.net/2016/sync-algorithm.html
'''
import contextlib
import itertools
import logging
from . import exceptions
from .utils import uniq
sync_logger = logging.getLogger(__name__)
class SyncError(exceptions.Error):
'''Errors related to synchronization.'''
class SyncConflict(SyncError):
'''
Two items changed since the last sync, they now have different contents and
no conflict resolution method was given.
:param ident: The ident of the item.
:param href_a: The item's href on side A.
:param href_b: The item's href on side B.
'''
ident = None
href_a = None
href_b = None
class IdentConflict(SyncError):
'''
Multiple items on the same storage have the same UID.
:param storage: The affected storage.
:param hrefs: List of affected hrefs on `storage`.
'''
storage = None
_hrefs = None
@property
def hrefs(self):
return self._hrefs
@hrefs.setter
def hrefs(self, val):
new_val = set(val)
assert len(new_val) > 1, val
self._hrefs = new_val
class StorageEmpty(SyncError):
'''
One storage unexpectedly got completely empty between two synchronizations.
The first argument is the empty storage.
:param empty_storage: The empty
:py:class:`vdirsyncer.storage.base.Storage`.
'''
empty_storage = None
class BothReadOnly(SyncError):
'''
Both storages are marked as read-only. Synchronization is therefore not
possible.
'''
class PartialSync(SyncError):
'''
Attempted change on read-only storage.
'''
storage = None
class _ItemMetadata:
href = None
_item = None
hash = None
etag = None
def __init__(self, **kwargs):
for k, v in kwargs.items():
assert hasattr(self, k)
setattr(self, k, v)
@property
def item(self):
return self._item
@item.setter
def item(self, item):
self._item = item
self.hash = item.hash
def to_status(self):
return {
'href': self.href,
'etag': self.etag,
'hash': self.hash
}
class _StorageInfo(object):
'''A wrapper class that holds prefetched items, the status and other
things.'''
def __init__(self, storage, status):
'''
:param status: {ident: {'href': href, 'etag': etag}}
'''
self.storage = storage
#: Represents the status as given. Must not be modified.
self.status = status
#: Represents the current state of the storage and is modified as items
#: are uploaded and downloaded. Will be dumped into status.
self.new_status = None
def prepare_new_status(self):
href_to_status = dict((meta.href, (ident, meta))
for ident, meta
in self.status.items())
prefetch = []
self.new_status = {}
def _store_props(ident, props):
new_props = self.new_status.setdefault(ident, props)
if new_props is not props:
raise IdentConflict(storage=self.storage,
hrefs=[new_props.href,
props.href])
for href, etag in self.storage.list():
ident, meta = href_to_status.get(href, (None, None))
if meta is None:
meta = _ItemMetadata()
if meta.href != href or meta.etag != etag:
# Either the item is completely new, or updated
# In both cases we should prefetch
prefetch.append(href)
else:
meta.href = href
meta.etag = etag
_store_props(ident, meta)
# Prefetch items
for href, item, etag in (self.storage.get_multi(prefetch)
if prefetch else ()):
_store_props(item.ident, _ItemMetadata(
href=href,
etag=etag,
item=item
))
def is_changed(self, ident):
status = self.status.get(ident, None)
meta = self.new_status[ident]
if status is None: # new item
return True
if meta.etag != status.etag: # etag changed
old_hash = status.hash
if old_hash is None or meta.hash != old_hash:
# item actually changed
return True
else:
# only etag changed
return False
def _migrate_status(status):
for ident in list(status):
value = status[ident]
if len(value) == 4:
href_a, etag_a, href_b, etag_b = value
status[ident] = ({
'href': href_a,
'etag': etag_a,
}, {
'href': href_b,
'etag': etag_b,
})
def sync(storage_a, storage_b, status, conflict_resolution=None,
force_delete=False, error_callback=None, partial_sync='revert'):
'''Synchronizes two storages.
:param storage_a: The first storage
:type storage_a: :class:`vdirsyncer.storage.base.Storage`
:param storage_b: The second storage
:type storage_b: :class:`vdirsyncer.storage.base.Storage`
:param status: {ident: (href_a, etag_a, href_b, etag_b)}
metadata about the two storages for detection of changes. Will be
modified by the function and should be passed to it at the next sync.
If this is the first sync, an empty dictionary should be provided.
:param conflict_resolution: A function that, given two conflicting item
versions A and B, returns a new item with conflicts resolved. The UID
must be the same. The strings `"a wins"` and `"b wins"` are also
accepted to mean that that side's version will always be taken. If none
is provided, the sync function will raise :py:exc:`SyncConflict`.
:param force_delete: When one storage got completely emptied between two
syncs, :py:exc:`StorageEmpty` is raised for
safety. Setting this parameter to ``True`` disables this safety
measure.
:param error_callback: Instead of raising errors when executing actions,
call the given function with an `Exception` as the only argument.
:param partial_sync: What to do when doing sync actions on read-only
storages.
- ``error``: Raise an error.
- ``ignore``: Those actions are simply skipped.
- ``revert`` (default): Revert changes on other side.
'''
if storage_a.read_only and storage_b.read_only:
raise BothReadOnly()
if conflict_resolution == 'a wins':
conflict_resolution = lambda a, b: a
elif conflict_resolution == 'b wins':
conflict_resolution = lambda a, b: b
_migrate_status(status)
a_status = {}
b_status = {}
for ident, (meta_a, meta_b) in status.items():
a_status[ident] = _ItemMetadata(**meta_a)
b_status[ident] = _ItemMetadata(**meta_b)
a_info = _StorageInfo(storage_a, a_status)
b_info = _StorageInfo(storage_b, b_status)
a_info.prepare_new_status()
b_info.prepare_new_status()
if status and not force_delete:
if a_info.new_status and not b_info.new_status:
raise StorageEmpty(empty_storage=storage_b)
elif b_info.new_status and not a_info.new_status:
raise StorageEmpty(empty_storage=storage_a)
actions = list(_get_actions(a_info, b_info))
with storage_a.at_once(), storage_b.at_once():
for action in actions:
try:
action.run(a_info, b_info, conflict_resolution, partial_sync)
except Exception as e:
if error_callback:
error_callback(e)
else:
raise
status.clear()
for ident in uniq(itertools.chain(a_info.new_status,
b_info.new_status)):
status[ident] = (
a_info.new_status[ident].to_status(),
b_info.new_status[ident].to_status()
)
class Action:
def _run_impl(self, a, b): # pragma: no cover
raise NotImplementedError()
def run(self, a, b, conflict_resolution, partial_sync):
with self.auto_rollback(a, b):
if self.dest.storage.read_only:
if partial_sync == 'error':
raise PartialSync(self.dest.storage)
elif partial_sync == 'ignore':
self.rollback(a, b)
return
else:
assert partial_sync == 'revert'
self._run_impl(a, b)
@contextlib.contextmanager
def auto_rollback(self, a, b):
try:
yield
except BaseException as e:
self.rollback(a, b)
raise e
def rollback(self, a, b):
for info in (a, b):
if self.ident in info.status:
info.new_status[self.ident] = info.status[self.ident]
else:
info.new_status.pop(self.ident, None)
class Upload(Action):
def __init__(self, item, dest):
self.item = item
self.ident = item.ident
self.dest = dest
def _run_impl(self, a, b):
if self.dest.storage.read_only:
href = etag = None
else:
sync_logger.info(u'Copying (uploading) item {} to {}'
.format(self.ident, self.dest.storage))
href, etag = self.dest.storage.upload(self.item)
assert self.ident not in self.dest.new_status
self.dest.new_status[self.ident] = _ItemMetadata(
href=href,
hash=self.item.hash,
etag=etag
)
class Update(Action):
def __init__(self, item, dest):
self.item = item
self.ident = item.ident
self.dest = dest
def _run_impl(self, a, b):
if self.dest.storage.read_only:
meta = _ItemMetadata(item=self.item)
else:
sync_logger.info(u'Copying (updating) item {} to {}'
.format(self.ident, self.dest.storage))
meta = self.dest.new_status[self.ident]
meta.etag = \
self.dest.storage.update(meta.href, self.item, meta.etag)
self.dest.new_status[self.ident] = meta
class Delete(Action):
def __init__(self, ident, dest):
self.ident = ident
self.dest = dest
def _run_impl(self, a, b):
meta = self.dest.new_status[self.ident]
if not self.dest.storage.read_only:
sync_logger.info(u'Deleting item {} from {}'
.format(self.ident, self.dest.storage))
self.dest.storage.delete(meta.href, meta.etag)
del self.dest.new_status[self.ident]
class ResolveConflict(Action):
def __init__(self, ident):
self.ident = ident
def run(self, a, b, conflict_resolution, partial_sync):
with self.auto_rollback(a, b):
sync_logger.info(u'Doing conflict resolution for item {}...'
.format(self.ident))
meta_a = a.new_status[self.ident]
meta_b = b.new_status[self.ident]
if meta_a.hash == meta_b.hash:
sync_logger.info(u'...same content on both sides.')
elif conflict_resolution is None:
raise SyncConflict(ident=self.ident, href_a=meta_a.href,
href_b=meta_b.href)
elif callable(conflict_resolution):
new_item = conflict_resolution(meta_a.item, meta_b.item)
if new_item.hash != meta_a.hash:
Update(new_item, a).run(a, b, conflict_resolution,
partial_sync)
if new_item.hash != meta_b.hash:
Update(new_item, b).run(a, b, conflict_resolution,
partial_sync)
else:
raise exceptions.UserError(
'Invalid conflict resolution mode: {!r}'
.format(conflict_resolution))
def _get_actions(a_info, b_info):
for ident in uniq(itertools.chain(a_info.new_status, b_info.new_status,
a_info.status)):
a = a_info.new_status.get(ident, None) # item exists in a
b = b_info.new_status.get(ident, None) # item exists in b
if a and b:
a_changed = a_info.is_changed(ident)
b_changed = b_info.is_changed(ident)
if a_changed and b_changed:
# item was modified on both sides
# OR: missing status
yield ResolveConflict(ident)
elif a_changed and not b_changed:
# item was only modified in a
yield Update(a.item, b_info)
elif not a_changed and b_changed:
# item was only modified in b
yield Update(b.item, a_info)
elif a and not b:
if a_info.is_changed(ident):
# was deleted from b but modified on a
# OR: new item was created in a
yield Upload(a.item, b_info)
else:
# was deleted from b and not modified on a
yield Delete(ident, a_info)
elif not a and b:
if b_info.is_changed(ident):
# was deleted from a but modified on b
# OR: new item was created in b
yield Upload(b.item, a_info)
else:
# was deleted from a and not changed on b
yield Delete(ident, b_info)
|
|
# -*- coding: utf-8 -*-
"""
Misc ArcPy Addons
Author: Garin Wally
License: MIT
"""
import os
import glob
import random
import re
import sys
#from time import sleep
from queue import Queue
from subprocess import Popen, PIPE
from collections import OrderedDict
from xml.dom import minidom as DOM
from ConfigParser import RawConfigParser
import pandas as pd
import numpy as np
import ogr
import arcpy
#from archacks import DIR
DIR = os.path.abspath(os.path.dirname(__file__))
NEW_GROUP_LAYER = os.path.join(DIR, "NewGroupLayer.lyr")
type_map = {
"int": ["Double", "Integer", "ShortInteger"],
"long": ["Float"],
"str": ["Text", "String"]}
# TODO: not the best...
def is_active(exe="arcmap"):
regex = "(?i){}.exe".format(exe)
if re.findall(regex, sys.executable.replace("\\", "/")):
return True
return False
# MapDocument() cannot be called from within classes and must be global
if is_active():
MXD = arcpy.mapping.MapDocument("CURRENT")
else:
MXD = None
class GDB(object):
def __init__(self, gdb_path, srid=0, datasets=[], default_queue_ds=""):
"""Geodatabase Object.
Args:
gdb_path (str): path to new/existing Geodatabase
srid (int): Spatial Reference ID to use for datasets only
datasets (list): dataset names to create using SRID
default_queue_ds (str): dataset name to use as default for queue
"""
self.path = gdb_path
if not self.path.endswith(".gdb"):
raise AttributeError("Not a Geodatabase")
self.parent_folder = os.path.dirname(self.path)
self.name = os.path.basename(self.path)
self.srid = srid
self.sr = arcpy.SpatialReference(self.srid)
self.datasets = datasets
self.default_queue_ds = default_queue_ds
self.data_queue = Queue()
def build(self):
"""Builds gdb, creates datasets, adds queued data."""
if not os.path.exists(self.path):
arcpy.CreateFileGDB_management(self.parent_folder, self.name)
arcpy.RefreshCatalog(self.path)
arcpy.env.workspace = self.path
for ds in self.datasets:
arcpy.CreateFeatureDataset_management(self.path, ds, self.srid)
arcpy.RefreshCatalog(os.path.join(self.path, ds))
if self.data_queue:
self.load_queued_data()
return
def add(self, in_data_path, data_name="", dataset=""):
"""Adds input featureclass to geodatabase.
Args:
in_data_path (str): path to input data
data_name (str): optionally rename entered data
dataset (str): dataset to send imported data
"""
if not data_name:
data_name = os.path.basename(in_data_path)
if "sde" in data_name.lower():
data_name = data_name.split(".")[-1]
elif "." in data_name:
data_name = data_name.split(".")[0]
out = os.path.join(self.path, dataset).strip("\\").strip("/")
arcpy.FeatureClassToFeatureClass_conversion(
in_data_path, out, data_name)
# Easily access data paths by fc name
setattr(self, data_name.lower(),
os.path.join(self.path, dataset, data_name))
return
def add_many(self, data_mapping={}, data_list=[], dataset=""):
"""Adds a list or dict of input feature classes.
Args:
data_mapping (dict): dictionary of {data_name: data_path}
data_list (list): list of data paths to import
dataset (str): destination dataset for imported data
"""
if data_mapping:
for k, v in data_mapping.items():
self.add(v, k)
if data_list:
for fc_path in data_list:
self.add(fc_path, dataset=dataset)
return
def load_queued_data(self):
"""Alias of 'add_many' for importing all data in the data_queue."""
# Remove path from queue
while self.data_queue.qsize() > 0:
self.add(self.data_queue.get(), "", dataset=self.default_queue_ds)
return
# Debilitatingly slow
'''
def add_table(self, table_path, table_name="", where=""):
if not table_name:
table_name = os.path.basename(table_path)
if "sde" in table_name.lower():
table_name = table_name.split(".")[-1]
elif "." in table_name:
table_name = table_name.split(".")[0]
arcpy.TableToGeodatabase_conversion(table_path, self.path)#, table_name)
return
'''
def df2tbl(df, out_path):
# Convert dataframe to array
a = np.array(np.rec.fromrecords(df.values))
# Add field names to array
a.dtype.names = tuple(df.columns.tolist())
# Sort of surprised ESRI thought of this
arcpy.da.NumPyArrayToTable(a, out_path)
# ...and of course we have to call this...
arcpy.RefreshCatalog(out_path)
return
def domains2df(workspace):
"""Converts all domains into a dict of dataframes."""
domain_obj = arcpy.da.ListDomains(workspace)
domdict = {
d.name: pd.DataFrame.from_dict(d.codedValues, orient="index").sort()
for d in domain_obj
}
for key in domdict:
domdict[key].reset_index(inplace=True)
domdict[key].columns = ["Key", "Value"]
return domdict
def domain2tbl(workspace, domain, output):
domdict = domains2df(workspace)
df2tbl(domdict[domain], output)
return
def mxds2pdfs(in_folder, out_folder, verbose=False):
"""Exports all .mxd files in a folder to .pdf files in a folder."""
for mxd_file in glob.glob("{}/*.mxd".format(in_folder)):
mxd_file = os.path.join(in_folder, mxd_file)
mxd = arcpy.mapping.MapDocument(mxd_file)
pdf_name = os.path.basename(mxd_file).replace(".mxd", ".pdf")
out_pdf = os.path.join(
out_folder,
pdf_name)
if verbose:
print(pdf_name)
arcpy.mapping.ExportToPDF(mxd, out_pdf)
return
class DataFramesWrapper(object):
"""Container for dataframes that is index-able by name and index."""
def __init__(self, mxd):
self.mxd = mxd
@property
def _dict(self):
return OrderedDict([(df.name, df) for df
in arcpy.mapping.ListDataFrames(self.mxd)])
@property
def _list(self):
return self._dict.values()
def __getitem__(self, index):
if type(index) is int:
return self._list[index]
return self._dict[index]
def __iter__(self):
"""All dataframe objects."""
return self._dict.itervalues()
def __str__(self):
return str(self._dict)
def __repr__(self):
return str(self._dict)
class Map(object):
def __init__(self):
try:
self.mxd = MXD
except: #
self.mxd = None
@property
def dataframes(self):
return DataFramesWrapper(MXD)
@property
def count_dataframes(self):
return len(self.dataframes._list)
@property
def df_layers(self):
return OrderedDict([(df.name, arcpy.mapping.ListLayers(df)) for df
in self.dataframes])
@property
def layers(self):
all_lyrs = []
for lyr_list in self.df_layers.values():
all_lyrs.extend(lyr_list)
return {lyr.name: lyr for lyr in all_lyrs}
@property
def layer_names(self):
return self.layers.keys()
def as_object(self, layer_name):
"""Returns the input layer name as an object.
Args:
layer_name (str): name of layer
Use:
city = m.as_object("City Limits")
"""
return self.layers[layer_name]
def rename_layer(self, old_name, new_name, dataframe=0):
self.layers[old_name].name = new_name
self.refresh()
return
def refresh(self):
arcpy.RefreshTOC()
arcpy.RefreshActiveView()
return
def add_group_lyr(self, name, dataframe=0):
group_lyr = arcpy.mapping.Layer(NEW_GROUP_LAYER)
arcpy.mapping.AddLayer(self.dataframes[dataframe], group_lyr, "TOP")
self.rename_layer("New Group Layer", name)
self.refresh()
return
def toggle_on(self, layer_name="*"):
"""Toggles the input or all ("*") layer's visibility to on."""
if layer_name != "*":
self.layers[layer_name].visible = True
else:
for lyr in self.layers.values():
lyr.visible = True
self.refresh()
return
def toggle_off(self, layer_name="*"):
"""Toggles the input or all ("*") layer's visibility to off."""
if layer_name != "*":
self.layers[layer_name].visible = False
else:
for lyr in self.layers.values():
lyr.visible = False
self.refresh()
return
def spatial_field_calc(target_features, output, target_field, join_features,
join_field, merge_rule, match_option="INTERSECT",
default_value=None):
"""Adds a new field to target features via a spatial join.
Args:
#
Example:
>>> spatial_field_calc("parcels", "in_memory/sfieldcalc",
"dwellings17", "permits17", "dwellings", "sum")
"""
# Set field mappings from target_features
fieldmappings = arcpy.FieldMappings()
fieldmappings.addTable(target_features)
# Set field mappings from join_features
join_map = arcpy.FieldMappings()
join_map.addTable(join_features)
# Edit the output fieldmap
field_map = join_map.getFieldMap(join_map.findFieldMapIndex(join_field))
jfield = field_map.outputField
# Name output field
jfield.name = target_field
jfield.aliasName = target_field
# Overwrite old field data with new
field_map.outputField = jfield
field_map.mergeRule = merge_rule
# Add the edited join_field fieldmap from join_features to target_features
fieldmappings.addFieldMap(field_map)
# Execute the spatial join
result = arcpy.SpatialJoin_analysis(target_features, join_features, output,
"#", "#", fieldmappings,
match_option=match_option)
# Convert NULL values to default_value
with arcpy.da.UpdateCursor(output, [target_field]) as cur:
for row in cur:
if row[0] is None:
row[0] = default_value
cur.updateRow(row)
return result
class TableOfContents(object):
"""Table of Contents Object."""
def __init__(self, mxd="CURRENT"):
self.mxd_name = mxd
self.mxd = None
if self.mxd_name:
self.set_mxd(self.mxd_name)
def set_mxd(self, mxd):
self.mxd_name = mxd
self.mxd = arcpy.mapping.MapDocument(self.mxd_name)
def as_featurelyr(self, layer_name):
"""Gets a layer as a feature layer (e.g. make selections on it)."""
flyr_name = layer_name + "_fclyr"
arcpy.MakeFeatureLayer_management(self[layer_name], flyr_name)
return flyr_name
@property
def dataframes(self):
return arcpy.mapping.ListDataFrames(self.mxd)
@property
def contents(self):
cont = {lyr.name: lyr for lyr in arcpy.mapping.ListLayers(self.mxd)}
cont.update({tbl.name: tbl for tbl in
arcpy.mapping.ListTableViews(self.mxd)})
return cont
@property
def features_selected(self): # TODO: assert actually selected not total
sel = {}
for lyr in self.contents.values():
d = {lyr.name: int(arcpy.GetCount_management(lyr).getOutput(0))}
sel.update(d)
return sel
def add_fc(self, fc_path, df_idx=0, loc="TOP"):
"""Wraps the rediculous process of adding data to an mxd"""
new_lyr = arcpy.mapping.Layer(fc_path)
arcpy.mapping.AddLayer(self.dataframes[df_idx], new_lyr, loc)
return
def remove(self, layer_name):
"""Removes layer from TOC by name."""
for df in self.dataframes:
try:
arcpy.mapping.RemoveLayer(df, TOC.contents[layer_name])
except:
pass
return
def __getitem__(self, key):
"""Support dict-style item getting."""
return self.contents[key]
if is_active():
TOC = TableOfContents()
else:
TOC = TableOfContents(None)
# =============================================================================
# LOCKS
def get_locks(gdb):
"""Generates a list of current locks in a gdb."""
# TODO: change to `glob(os.path.join(gdb, "*.lock"))`
locks = [f for f in os.listdir(gdb) if ".lock" in f]
for lock in locks:
try:
with open(gdb, "w") as f:
pass
except IOError:
yield lock
def get_lock_users(gdb):
"""Lists the users holding locks on a gdb."""
locks = [f.split(".")[1] for f in get_locks(gdb)]
return list(set(locks))
# =============================================================================
# STRING FORMATTERS
def in_dataset(path):
if not os.path.split(path)[0].endswith(".gdb"):
return True
return False
def rm_ds(dataset_path):
"""Removes the dataset name from a GDB path."""
if in_dataset(dataset_path):
parts = os.path.split(dataset_path)
return os.path.join(os.path.split(parts[0])[0], parts[1])
return dataset_path
def unc_path(drive_path, unc_path):
"""Replaces a mapped network drive with a UNC path.
Example:
>>> unc_path('I:/workspace', r'\\cityfiles\stuff')
'\\\\cityfiles\\stuff\\workspace'
"""
drive_path = drive_path.replace("/", "\\")
drive = os.path.splitdrive(drive_path)[0]
p = Popen("net use", stdout=PIPE, creationflags=0x08000000)
raw_result = p.communicate()[0]
result = re.findall("{}(.*)\r".format(drive), raw_result)[0]
unc = result.strip().split(" ")[0]
return drive_path.replace(drive, unc)
# =============================================================================
# TABLE UTILITIES
def fill_na(fc, fields, repl_value=0):
"""Update '<Null>' values (None) in input fields.
Args:
fc (str): name or path of input feature class
fields (list): list of fields to replace NULL with 'repl_value'
repl_value (many): value to replace NULL
"""
desc_fields = arcpy.Describe(fc).fields
field_objs = [f for f in desc_fields if f.name in fields]
if len(field_objs) != len(fields):
raise AttributeError("Check spelling of field names")
# Make sure fields are editable
are_editable = [f.editable for f in field_objs]
if not all(are_editable):
ne_fields = [f.name for f in field_objs if not f.editable]
raise AttributeError("Field(s) not editable: {}".format(ne_fields))
# Make sure repl_value matches type of all input fields
m = [f.type in type_map[type(repl_value).__name__] for f in field_objs]
if not all(m):
raise TypeError("Replace value and column types do not match")
# Change the NULL values (None) to 0
with arcpy.da.UpdateCursor(fc, fields) as cur:
for row in cur:
for v in row:
if v is None:
row[row.index(v)] = repl_value
cur.updateRow(row)
return
def tbl2df(tbl, fields=["*"]):
"""Loads a table or featureclass into a pandas dataframe.
Args:
tbl (str): table or featureclass path or name (in Arc Python Window)
fields (list): names of fields to load; value of '*' loads all fields
"""
# List holds each row as a transposed dataframe
frames = []
if fields == ["*"] or fields == "*":
fields = [f.name for f in arcpy.Describe(tbl).fields]
with arcpy.da.SearchCursor(tbl, fields) as cur:
for row in cur:
row_df = pd.DataFrame(list(row)).T
row_df.columns = cur.fields
frames.append(row_df)
# Make a single dataframe from the list
df = pd.concat(frames)
df.reset_index(inplace=True, drop=True)
return df
def ogdb2df(fc_path, fields=["*"]):
"""Open ESRI GDB data as a pandas dataframe (uses osgeo/OpenFileGDB).
This option can be much faster than tbl2df.
Args:
gdb_path (str): path to gdb or path to feature in gdb
fields (list): names of fields to load; value of '*' loads all fields
"""
fc_path = rm_ds(fc_path)
driver = ogr.GetDriverByName("OpenFileGDB")
gdb_path, fc_name = os.path.split(fc_path)
gdb = driver.Open(gdb_path)
fc = gdb.GetLayerByName(fc_name)
schema = fc.schema
if fields == ["*"] or fields == "*":
fields = [f.name for f in schema]
frames = []
feat = fc.GetNextFeature()
while feat:
row = [feat.GetField(f) for f in fields]
row_df = pd.DataFrame(row).T
row_df.columns = fields
frames.append(row_df)
feat = fc.GetNextFeature()
df = pd.concat(frames)
df.index = range(len(df))
return df
def tbl2excel(tbl, out_path, fields=["*"]):
"""Exports an input table or feature class to Excel."""
df = tbl2df(tbl, fields)
df.to_excel(out_path)
return
def groupby(fc, gb_field, summary_field):
fields = [gb_field, summary_field]
df = tbl2df(fc, fields)
return df.groupby(gb_field).sum()
def drop_all(fc, keep=[]):
"""Drops all nonrequired columns except those specified."""
warnings = []
fields = [f.name for f in arcpy.ListFields(fc)]
# TODO: what about difference between keep and all_fields?
rm_fields = list(set(fields).symmetric_difference(set(keep)))
for field in rm_fields:
try:
arcpy.DeleteField_management(fc, field)
except Exception: # TODO:
warnings.append(field)
print("Field(s) could not be removed: {}".format(warnings))
return
def field_value_set(fc, field):
s = set()
with arcpy.da.SearchCursor(fc, field) as cur:
for row in cur:
s.add(row[0])
return s
def is_unique(fc, fields):
"""Checks if fields of a feature class have all unique values."""
if isinstance(fields, str):
fields = [fields]
s = set()
row_cnt = 0
with arcpy.da.SearchCursor(fc, fields) as cur:
for row in cur:
row_cnt += 1
s.add(row[0])
if len(s) == row_cnt:
return True
return False
def max_in_list(find_str, in_list, digits=2):
"""Find the field containing a substring and the largest number.
Good for finding the max year of a series of fields.
Args:
find_str (str): substring of field name; use '' if for only max
in_list (list): a list of field names to search
Returns the field name containing the max number
Use:
>>> fields = ["Year", "Pop10", "Pop20", "Pop30", "Average60"]
>>> max_in_list("Pop", fields)
"Pop30"
>>> max_in_list("", fields)
"Average60"
"""
# Filter out fields without numbers
filt_re = "\d{}".format(digits)
filtered_list = [f for f in in_list if re.findall(filt_re, f)]
print filtered_list
if not filtered_list:
raise AttributeError("No list value contains a 2-digit number")
m = max([int(re.findall("\d{2}", i)[0]) for i in filtered_list
if find_str in i])
return [i for i in in_list if str(m) in i][0]
def sum_field(fc, field):
"""Returns the sum of a field."""
with arcpy.da.SearchCursor(fc, field) as cur:
total = 0
for row in cur:
total += row[0]
return total
def list_all_fields(fc):
"""Returns a list of all fields, includes joined fields."""
fields = [f.name for f in arcpy.Describe(fc).fields]
return fields
def list_joins(fc):
"""Returns a set of tables currently joined to a feature class."""
fields = list_all_fields(fc)
s = set()
[s.add(j.split("$")[0]) for j in fields if "$" in j]
return s
def oid_by_regex(fc, regex, field, oid_field="OBJECTID"):
"""Yields record oids where field value matches regex."""
with arcpy.da.SearchCursor(fc, [oid_field, field]) as cur:
for row in cur:
if row[1] and re.findall(regex, row[1]):
yield row[0]
def layer_by_regex(regex):
"""Returns the full name of a layer based on a substring or regex."""
for layer in TOC.contents.keys():
if re.findall("(?i){}".format(regex), layer):
return layer
def regex_selection(fc, regex, field, id_field="OBJECTID"):
"""For when LIKE statements just don't cut the '(?i)mustard'."""
ids = list(oid_by_regex(fc, regex, field, id_field))
if not ids:
raise IOError("Nothing found")
in_qry = "{} IN ({})".format(id_field, ', '.join([str(i) for i in ids]))
arcpy.SelectLayerByAttribute_management(fc, where_clause=in_qry)
return
def field_by_regex(fc, field_regex, escape_tables=True):
"""Returns a list of field names matching a regular expression."""
for f in arcpy.Describe(fc).fields:
if escape_tables:
field_regex = field_regex.replace("$.", "\\$\\.")
if re.findall(field_regex, f.name):
yield f.name
def select_random(layer, field, sample_size, filter_lambda=None):
"""Selects a random number of features from a layer.
Args:
layer (str): name of a layer in the TOC
field (str): name of a field/attribute in the layer
sample_size (int): number of random features to select
filter_lambda (function): optionally filter the set using a function
Example:
# Select 10 random parcels that do not have a "7" at position -4
# in the 'ParcelID' field
>>> select_random("Parcels", "ParcelID", 10, lambda x: x[-4] <> "7")
"""
# TODO: test
# TODO: allow ADD_TO_SELECTION option
# TODO: allow lambda access to any field value
# Get the layer as a dataframe of unique values
df = tbl2df(layer, field).drop_duplicates()
# Create empty set
s = set()
while len(s) < sample_size:
# Add a distinct random value to the set
s.add(random.choice(df[field].tolist()))
# Optionally reduce the set using an input function
if filter_lambda:
s = set(filter(filter_lambda, s))
# Select the features in the set
arcpy.SelectLayerByAttribute_management(
layer,
"NEW_SELECTION",
# Create a WHERE IN statement
# e.g. `"ParcelID" IN ('040000', '040001')`
"\"{field}\" IN ({values})".format(
field="ParcelID",
values=", ".join(["'" + v + "'" for v in s])))
return
# =============================================================================
# QUERIES
def like_list(field, values, case="", condition="OR"):
"""Make a `<field> LIKE '%value%'` string for list of values.
Args:
field (str): field to use in LIKE statement; may need to be quoted
values (iterable): values to convert to LIKE query
condition (str): 'AND' or 'OR' (default 'OR')
case (str): optionally convert values to title, upper, or lower
Returns joined string.
Usage:
>>> like_list('"Subdivision"', ["Ranch", "Apple"], case="upper")
'Subdivision" LIKE \'%RANCH%\' OR "Subdivision" LIKE \'%APPLE%\'"'
"""
cond = " {} ".format(condition)
if case.lower() == 'title':
values = [v.title() for v in values]
elif case.lower() == 'upper':
values = [v.upper() for v in values]
elif case.lower() == 'lower':
values = [v.lower() for v in values]
q = cond.join(["{} LIKE '%{}%'".format(field, v) for v in values])
return q
# =============================================================================
# FIELD MAPS
# Note: a field map is a string describing a field and its merge rules
# Note: a field mapping is a list of field maps joined by ';'
# TODO: remove?
'''
def get_fieldmap(fc):
"""Get current fieldmapping as list."""
mappings = arcpy.FieldMappings()
mappings.addTable(fc)
def make_fieldmap(fc, field, rename=None, merge_rule="First"):
"""Easy manipulation of FieldMap/Mappings. Not a valid FieldMap object."""
m = arcpy.FieldMap()
m.mergeRule = merge_rule
maps = arcpy.FieldMappings()
full_f_name = list(regex_fields(fc, field))[0]
m.addInputField(fc, full_f_name)
maps.addFieldMap(m)
s = maps.exportToString()
if rename:
regex_name = re.sub("\$\.", "\\$\\.", full_f_name)
regex = "{}(?!,)".format(regex_name)
s = re.sub(regex, rename, s)
return s
def make_fieldmaps(fc, fields):
if isinstance(fields, dict):
for field, rename in fields.items():
yield make_fieldmap(fc, field, rename)
else:
for field in fields:
yield make_fieldmap(fc, field)
def join_fieldmaps(maps):
return ";".join(maps)
def get_field_type(fc, field):
"""Returns a set of value types found within a field."""
s = set()
with arcpy.da.SearchCursor(fc, field) as cur:
for row in cur:
s.add(type(row[0]).__name__)
return s
'''
# TODO: 'spatial join' that copies a field from the selected to the
# intersecting features
# =============================================================================
# WRAPPERS
# Wrappers are particularly engineered for use in ArcMaps' Python Window
def fc2fc(in_fc, full_out_path, where=None, limit_fields=None):
"""Wraps 'arcpy.FeatureClassToFeatureClass_conversion with a short name."""
full_out_path = full_out_path.replace("\\", "/")
out_path, out_name = os.path.split(full_out_path)
mapping = None
# TODO:
#if limit_fields:
# mapping = limit_fields(in_fc, limit_fields)
return arcpy.FeatureClassToFeatureClass_conversion(
in_fc, out_path, out_name, where, mapping)
class GDBPkg(object):
def __init__(self, out_location, gdb_name):
"""Create a template for a file geodatabase and make all at once."""
self.out_location = out_location
self.name = gdb_name
if not gdb_name.endswith(".gdb"):
self.name = gdb_name + ".gdb"
self.contents = []
self.datasets = []
# Validate
if not os.path.exists(self.out_location):
raise IOError("Out location does not exist")
if self.exists:
raise IOError("GDB already exists")
@property
def path(self):
"""Output path for staged GDB."""
return os.path.join(self.out_location, self.name)
@property
def exists(self):
if os.path.exists(self.path):
return True
return False
def add_feature(self, out_name, feature_path, dataset=""):
"""Stages a feature class for import."""
self.contents.append([out_name, feature_path, dataset])
return
def add_dataset(self, name, refsys=0):
"""Stages a feature dataset for creation."""
self.datasets.append([name, refsys])
return
def make(self):
"""Create the staged GDB."""
# Create GDB
arcpy.CreateFileGDB_management(self.out_location, self.name)
# Create Feature Datasets
for ds_name, refsys in self.datasets:
arcpy.CreateFeatureDataset_management(self.path, ds_name, refsys)
# Import Feature Classes
for fc_name, f_path, dataset in self.contents:
if dataset:
if dataset not in [ds[0] for ds in self.datasets]:
raise IOError("{} not a dataset".format(dataset))
arcpy.FeatureClassToFeatureClass_conversion(
f_path, os.path.join(self.path, dataset), fc_name)
else:
arcpy.FeatureClassToFeatureClass_conversion(
f_path, self.path, fc_name)
return
class QueryFile(object):
"""Wraps RawConfigParser to make accessing stored queries easy."""
def __init__(self, path):
self.path = path
self._cfg = RawConfigParser()
self._cfg.read(self.path)
def get(self, section, option):
"""Gets the option from the section in the file."""
if option.lower() == "all":
all_qs = ["({})".format(self._cfg.get(section, opt))
for opt in self._cfg.options(section)]
q = " OR ".join(all_qs)
else:
q = self._cfg.get(section, option)
return q.replace("\n", " ")
# Source:
# https://blogs.esri.com/esri/arcgis/2013/04/23/updating-arcgis-com-hosted-feature-services-with-python/
class Service(object):
def __init__(self, mxd_file, host="My Hosted Services", con="",
service_type="FeatureServer", enable_caching=False,
allow_overwrite=True, capabilities=["Query"]):
"""Uploads an MXD as a Web Service."""
self.mxd = arcpy.mapping.MapDocument(mxd_file)
if self.mxd.title == "":
raise IOError("MXD Title (metadata) cannot be blank")
self.host = host
if not con:
self.con = self.host.upper().replace(" ", "_")
self.sdd = os.path.abspath("{}.sddraft".format(self.mxd.title))
self.sd = os.path.abspath("{}.sd".format(self.mxd.title))
self.analysis = arcpy.mapping.CreateMapSDDraft(
self.mxd, self.sdd, self.mxd.title, self.con)
if self.analysis["errors"]:
raise Exception(self.analysis["errors"])
# DOM Editing
self.doc = DOM.parse(self.sdd)
self._set_service_type(service_type)
self._set_caching(enable_caching)
self._set_web_capabilities(capabilities)
self._set_overwrite(allow_overwrite)
def update_draft(self):
with open(self.sdd, "w") as f:
self.doc.writexml(f)
return
def _set_caching(self, enable_caching):
cache = str(enable_caching).lower()
conf = 'ConfigurationProperties'
configProps = self.doc.getElementsByTagName(conf)[0]
propArray = configProps.firstChild
propSets = propArray.childNodes
for propSet in propSets:
keyValues = propSet.childNodes
for keyValue in keyValues:
if keyValue.tagName == 'Key':
if keyValue.firstChild.data == "isCached":
keyValue.nextSibling.firstChild.data = cache
return
def _set_service_type(self, service_type):
typeNames = self.doc.getElementsByTagName('TypeName')
for typeName in typeNames:
if typeName.firstChild.data == "MapServer":
typeName.firstChild.data = service_type
return
def _set_web_capabilities(self, capabilities):
"""Sets the web capabilities.
Args:
capabilities (list): list of capabilities
"""
capability = ",".join(capabilities)
configProps = self.doc.getElementsByTagName('Info')[0]
propSets = configProps.firstChild.childNodes
for propSet in propSets:
keyValues = propSet.childNodes
for keyValue in keyValues:
if keyValue.tagName == 'Key':
if keyValue.firstChild.data == "WebCapabilities":
keyValue.nextSibling.firstChild.data = capability
return
def _set_overwrite(self, overwrite):
replace = "esriServiceDefinitionType_Replacement"
tagsType = self.doc.getElementsByTagName('Type')
for tagType in tagsType:
if tagType.parentNode.tagName == 'SVCManifest':
if tagType.hasChildNodes():
tagType.firstChild.data = replace
tagsState = self.doc.getElementsByTagName('State')
for tagState in tagsState:
if tagState.parentNode.tagName == 'SVCManifest':
if tagState.hasChildNodes():
tagState.firstChild.data = "esriSDState_Published"
return
def upload(self):
self.update_draft()
arcpy.StageService_server(self.sdd, self.sd)
arcpy.UploadServiceDefinition_server(
self.sd, self.host, self.mxd.title,
"", "", "", "", "OVERRIDE_DEFINITION",
"SHARE_ONLINE", "PUBLIC", "SHARE_ORGANIZATION")
return
def get_props(doc):
configProps = doc.getElementsByTagName('Info')[0]
propSets = configProps.firstChild.childNodes
for propSet in propSets:
keyValues = propSet.childNodes
for keyValue in keyValues:
if keyValue.tagName == 'Key':
if keyValue.firstChild.data == "WebCapabilities":
return keyValue.nextSibling.firstChild.data.split(",")
|
|
# Flask Dev Server
PORT = 5443
# Flask-Alembic imports configuration from here instead of the alembic.ini
ALEMBIC = {
'script_location': '%(here)s/alembic/versions'
}
ALEMBIC_CONTEXT = {
'render_as_batch': True, # Necessary to support SQLite ALTER on constraints
}
# Describes a static OAuth 2 Client which is the Commandment UI
OAUTH2_CLIENT_UI = {
'client_id': 'F8955645-A21D-44AE-9387-42B0800ADF15',
'client_secret': 'A',
'token_endpoint_auth_method': 'client_secret_basic',
'grant_type': 'password',
'response_type': 'token',
'scope': 'profile',
'client_name': 'Commandment UI'
}
# http://flask-sqlalchemy.pocoo.org/2.1/config/
SQLALCHEMY_DATABASE_URI = 'sqlite:///commandment/commandment.db'
# FSADeprecationWarning: SQLALCHEMY_TRACK_MODIFICATIONS adds significant overhead and will be disabled by default in the future.
SQLALCHEMY_TRACK_MODIFICATIONS = False
# PLEASE! Do not take this key and use it for another product/project. It's
# only for Commandment's use. If you'd like to get your own (free!) key
# contact the mdmcert.download administrators and get your own key for your
# own project/product. We're trying to keep statistics on which products are
# requesting certs (per Apple T&C). Don't force Apple's hand and
# ruin it for everyone!
MDMCERT_API_KEY = 'b742461ff981756ca3f924f02db5a12e1f6639a9109db047ead1814aafc058dd'
PLISTIFY_MIMETYPE = 'application/xml'
# Internal CA - Certificate X.509 Attributes
INTERNAL_CA_CN = 'COMMANDMENT-CA'
INTERNAL_CA_O = 'Commandment'
# --------------
# SCEPy Defaults
# --------------
# Directory where certs, revocation lists, serials etc will be kept
SCEPY_CA_ROOT = "CA"
# X.509 Name Attributes used to generate the CA Certificate
SCEPY_CA_X509_CN = 'SCEPY-CA'
SCEPY_CA_X509_O = 'SCEPy'
SCEPY_CA_X509_C = 'US'
# Force a single certificate to be returned as a PKCS#7 Degenerate instead of raw DER data
SCEPY_FORCE_DEGENERATE_FOR_SINGLE_CERT = False
# These applications will not be shown in inventory.
IGNORED_APPLICATION_BUNDLE_IDS = [
'com.apple.MigrateAssistant',
'com.apple.keychainaccess',
'com.apple.grapher',
'com.apple.Grab',
'com.apple.ActivityMonitor',
'com.apple.backup.launcher', # Time Machine
'com.apple.TextEdit',
'com.apple.systempreferences',
'com.apple.CoreLocationAgent',
'com.apple.CaptiveNetworkAssistant',
'com.apple.CalendarFileHandler',
'com.apple.BluetoothUIServer',
'com.apple.BluetoothSetupAssistant',
'com.apple.AutomatorRunner',
'com.apple.AppleFileServer',
'com.apple.AirportBaseStationAgent',
'com.apple.AirPlayUIAgent',
'com.apple.AddressBook.UrlForwarder',
'com.apple.AVB-Audio-Configuration',
'com.apple.ScriptMonitor',
'com.apple.ScreenSaver.Engine',
'com.apple.systemevents',
'com.apple.stocks',
'com.apple.Spotlight',
'com.apple.SoftwareUpdate',
'com.apple.SocialPushAgent',
'com.apple.Siri',
'com.apple.screencapturetb',
'com.apple.rcd',
'com.apple.CloudKit.ShareBear',
'com.apple.cloudphotosd',
'com.apple.wifi.WiFiAgent',
'com.apple.weather',
'com.apple.VoiceOver',
'com.apple.UserNotificationCenter',
'com.apple.UnmountAssistantAgent',
'com.apple.UniversalAccessControl',
'com.apple.Ticket-Viewer',
'com.apple.ThermalTrap',
'com.apple.systemuiserver',
'com.apple.check_afp',
'com.apple.AddressBook.sync',
'com.apple.AddressBookSourceSync',
'com.apple.AddressBook.abd',
'com.apple.ABAssistantService',
'com.apple.FontRegistryUIAgent',
'com.apple.speech.synthesis.SpeechSynthesisServer',
'com.apple.print.PrinterProxy',
'com.apple.StorageManagementLauncher',
'com.apple.Terminal',
'com.apple.PhotoBooth',
'com.apple.mail',
'com.apple.notificationcenter.widgetsimulator',
'com.apple.quicklook.ui.helper',
'com.apple.quicklook.QuickLookSimulator',
'com.apple.QuickLookDaemon32',
'com.apple.QuickLookDaemon',
'com.apple.syncserver',
'com.apple.WebKit.PluginHost',
'com.apple.AirScanScanner',
'com.apple.MakePDF',
'com.apple.BuildWebPage',
'com.apple.VIM-Container',
'com.apple.TrackpadIM-Container',
'com.apple.inputmethod.Tamil',
'com.apple.TCIM-Container',
'com.apple.exposelauncher',
'com.apple.iChat',
'com.apple.Maps',
'com.apple.launchpad.launcher',
'com.apple.FaceTime',
'com.apple.Dictionary',
'com.apple.dashboardlauncher',
'com.apple.DVDPlayer',
'com.apple.Chess',
'com.apple.iCal',
'com.apple.calculator',
'com.apple.Automator',
'com.apple.KIM-Container',
'com.apple.CharacterPaletteIM',
'com.apple.inputmethod.AssistiveControl',
'com.apple.VirtualScanner',
'com.apple.Type8Camera',
'com.apple.loginwindow',
'com.apple.SetupAssistant',
'com.apple.PhotoLibraryMigrationUtility',
'com.apple.notificationcenterui',
'com.apple.ManagedClient',
'com.apple.helpviewer',
'com.apple.finder.Open-iCloudDrive',
'com.apple.finder.Open-Recents',
'com.apple.finder.Open-Network',
'com.apple.finder.Open-Computer',
'com.apple.finder.Open-AllMyFiles',
'com.apple.finder.Open-AirDrop',
'com.apple.finder',
'com.apple.dock',
'com.apple.coreservices.uiagent',
'com.apple.controlstrip',
'com.apple.CertificateAssistant',
'com.apple.wifi.diagnostics',
'com.apple.SystemImageUtility',
'com.apple.RAIDUtility',
'com.apple.NetworkUtility',
'com.apple.FolderActionsSetup',
'com.apple.DirectoryUtility',
'com.apple.AboutThisMacLauncher',
'com.apple.AppleScriptUtility',
'com.apple.AppleGraphicsWarning',
'com.apple.print.add',
'com.apple.archiveutility',
'com.apple.appstore',
'com.apple.Console',
'com.apple.bootcampassistant',
'com.apple.BluetoothFileExchange',
'com.apple.siri.launcher',
'com.apple.reminders',
'com.apple.QuickTimePlayerX',
'com.apple.Image_Capture',
'com.apple.accessibility.universalAccessAuthWarn',
'com.apple.accessibility.universalAccessHUD',
'com.apple.accessibility.DFRHUD',
'com.apple.syncservices.syncuid',
'com.apple.syncservices.ConflictResolver',
'com.apple.STMFramework.UIHelper',
'com.apple.speech.SpeechRecognitionServer',
'com.apple.speech.SpeechDataInstallerd',
'com.apple.ScreenReaderUIServer',
'com.apple.PubSubAgent',
'com.apple.nbagent',
'com.apple.soagent',
'com.apple.imtransferservices.IMTransferAgent',
'com.apple.IMAutomaticHistoryDeletionAgent',
'com.apple.imagent',
'com.apple.imavagent',
'com.apple.idsfoundation.IDSRemoteURLConnectionAgent',
'com.apple.identityservicesd',
'com.apple.FindMyMacMessenger',
'com.apple.Family',
'com.apple.familycontrols.useragent',
'com.apple.eap8021x.eaptlstrust',
'com.apple.frameworks.diskimages.diuiagent',
'com.apple.FollowUpUI',
'com.apple.CCE.CIMFindInputCode',
'com.apple.cmfsyncagent',
'com.apple.storeuid',
'com.apple.lateragent',
'com.apple.bird', # iCloud Drive
'com.apple.AskPermissionUI',
'com.apple.Calibration-Assistant',
'com.apple.AccessibilityVisualsAgent',
'com.apple.AOSPushRelay',
'com.apple.AOSHeartbeat',
'com.apple.AOSAlertManager',
'com.apple.iCloudUserNotificationsd',
'com.apple.SCIM-Container',
'com.apple.PAH-Container',
'com.apple.inputmethod.PluginIM',
'com.apple.KeyboardViewer',
'com.apple.PIPAgent',
'com.apple.OSDUIHelper',
'com.apple.ODSAgent',
'com.apple.OBEXAgent',
'com.apple..NowPlayingWidgetContainer',
'com.apple.NowPlayingTouchUI',
'com.apple.NetAuthAgent',
'com.apple.MemorySlotUtility',
'com.apple.locationmenu',
'com.apple.Language-Chooser',
'com.apple.security.Keychain-Circle-Notification',
'com.apple.KeyboardSetupAssistant',
'com.apple.JavaWebStart',
'com.apple.JarLauncher',
'com.apple.Installer-Progress',
'com.apple.PackageKit.Install-in-Progress',
'com.apple.dt.CommandLineTools.installondemand',
'com.apple.imageevents',
'com.apple.gamecenter',
'com.apple.FolderActionsDispatcher',
'com.apple.ExpansionSlotUtility',
'com.apple.EscrowSecurityAlert',
'com.apple.DwellControl',
'com.apple.DiscHelper',
'com.apple.databaseevents',
'com.apple.ColorSyncCalibrator',
'com.apple.print.AirScanLegacyDiscovery',
'com.apple.ScriptEditor.id.image-file-processing-droplet-template',
'com.apple.ScriptEditor.id.file-processing-droplet-template',
'com.apple.ScriptEditor.id.droplet-with-settable-properties-template',
'com.apple.ScriptEditor.id.cocoa-applet-template',
'com.apple.inputmethod.Ainu',
'com.apple.50onPaletteIM',
'com.apple.AutoImporter',
'com.apple.Type5Camera',
'com.apple.Type4Camera',
'com.apple.PTPCamera',
'com.apple.MassStorageCamera',
'com.apple.imautomatichistorydeletionagent',
'com.apple.SyncServices.AppleMobileSync',
'com.apple.SyncServices.AppleMobileDeviceHelper',
'com.apple.coreservices.UASharedPasteboardProgressUI',
'com.apple.SummaryService',
'com.apple.ImageCaptureService',
'com.apple.ChineseTextConverterService',
'com.apple.Pass-Viewer',
'com.apple.PowerChime',
'com.apple.ProblemReporter',
'com.apple.pluginIM.pluginIMRegistrator',
'com.apple.ReportPanic',
'com.apple.RemoteDesktopAgent',
'com.apple.RapportUIAgent',
'com.apple.MRT',
'com.apple.AirPortBaseStationAgent',
'com.apple.appstore.AppDownloadLauncher',
'com.apple.appleseed.FeedbackAssistant',
'com.apple.ScreenSharing',
'com.apple.FirmwareUpdateHelper',
'com.apple.SecurityFixer',
'com.apple.ZoomWindow.app',
'com.apple.IMServicePlugInAgent',
'com.apple.itunes.connect.ApplicationLoader',
'com.apple.DiskImageMounter',
'com.apple.NetworkDiagnostics',
'com.apple.installer',
'com.apple.VoiceOverQuickstart',
]
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import six
from webob import exc
from heat.api.openstack.v1 import util
from heat.common.i18n import _
from heat.common import identifier
from heat.common import param_utils
from heat.common import serializers
from heat.common import wsgi
from heat.rpc import api as rpc_api
from heat.rpc import client as rpc_client
def format_resource(req, res, keys=None):
keys = keys or []
def include_key(k):
return k in keys if keys else True
def transform(key, value):
if not include_key(key):
return
if key == rpc_api.RES_ID:
identity = identifier.ResourceIdentifier(**value)
links = [util.make_link(req, identity),
util.make_link(req, identity.stack(), 'stack')]
nested_id = res.get(rpc_api.RES_NESTED_STACK_ID)
if nested_id:
nested_identity = identifier.HeatIdentifier(**nested_id)
links.append(util.make_link(req, nested_identity, 'nested'))
yield ('links', links)
elif (key == rpc_api.RES_STACK_NAME or
key == rpc_api.RES_STACK_ID or
key == rpc_api.RES_ACTION or
key == rpc_api.RES_NESTED_STACK_ID):
return
elif (key == rpc_api.RES_METADATA):
return
elif (key == rpc_api.RES_STATUS and rpc_api.RES_ACTION in res):
# To avoid breaking API compatibility, we join RES_ACTION
# and RES_STATUS, so the API format doesn't expose the
# internal split of state into action/status
yield (key, '_'.join((res[rpc_api.RES_ACTION], value)))
elif (key == rpc_api.RES_NAME):
yield ('logical_resource_id', value)
yield (key, value)
else:
yield (key, value)
return dict(itertools.chain.from_iterable(
transform(k, v) for k, v in res.items()))
class ResourceController(object):
"""WSGI controller for Resources in Heat v1 API.
Implements the API actions.
"""
# Define request scope (must match what is in policy.json or policies in
# code)
REQUEST_SCOPE = 'resource'
def __init__(self, options):
self.options = options
self.rpc_client = rpc_client.EngineClient()
def _extract_to_param(self, req, rpc_param, extractor, default):
key = rpc_param
if key in req.params:
try:
return extractor(key, req.params[key])
except ValueError as e:
raise exc.HTTPBadRequest(six.text_type(e))
else:
return default
@util.registered_identified_stack
def index(self, req, identity):
"""Lists information for all resources."""
whitelist = {
'type': 'mixed',
'status': 'mixed',
'name': 'mixed',
'action': 'mixed',
'id': 'mixed',
'physical_resource_id': 'mixed'
}
invalid_keys = (set(req.params.keys()) -
set(list(whitelist) + [rpc_api.PARAM_NESTED_DEPTH,
rpc_api.PARAM_WITH_DETAIL]))
if invalid_keys:
raise exc.HTTPBadRequest(_('Invalid filter parameters %s') %
six.text_type(list(invalid_keys)))
nested_depth = self._extract_to_param(req,
rpc_api.PARAM_NESTED_DEPTH,
param_utils.extract_int,
default=0)
with_detail = self._extract_to_param(req,
rpc_api.PARAM_WITH_DETAIL,
param_utils.extract_bool,
default=False)
params = util.get_allowed_params(req.params, whitelist)
res_list = self.rpc_client.list_stack_resources(req.context,
identity,
nested_depth,
with_detail,
filters=params)
return {'resources': [format_resource(req, res) for res in res_list]}
@util.registered_identified_stack
def show(self, req, identity, resource_name):
"""Gets detailed information for a resource."""
whitelist = {'with_attr': util.PARAM_TYPE_MULTI}
params = util.get_allowed_params(req.params, whitelist)
if 'with_attr' not in params:
params['with_attr'] = None
res = self.rpc_client.describe_stack_resource(req.context,
identity,
resource_name,
**params)
return {'resource': format_resource(req, res)}
@util.registered_identified_stack
def metadata(self, req, identity, resource_name):
"""Gets metadata information for a resource."""
res = self.rpc_client.describe_stack_resource(req.context,
identity,
resource_name)
return {rpc_api.RES_METADATA: res[rpc_api.RES_METADATA]}
@util.registered_identified_stack
def signal(self, req, identity, resource_name, body=None):
self.rpc_client.resource_signal(req.context,
stack_identity=identity,
resource_name=resource_name,
details=body)
@util.registered_identified_stack
def mark_unhealthy(self, req, identity, resource_name, body):
"""Mark a resource as healthy or unhealthy."""
data = dict()
VALID_KEYS = (RES_UPDATE_MARK_UNHEALTHY, RES_UPDATE_STATUS_REASON) = (
'mark_unhealthy', rpc_api.RES_STATUS_DATA)
invalid_keys = set(body) - set(VALID_KEYS)
if invalid_keys:
raise exc.HTTPBadRequest(_("Invalid keys in resource "
"mark unhealthy %s") % invalid_keys)
if RES_UPDATE_MARK_UNHEALTHY not in body:
raise exc.HTTPBadRequest(
_("Missing mandatory (%s) key from mark unhealthy "
"request") % RES_UPDATE_MARK_UNHEALTHY)
try:
data[RES_UPDATE_MARK_UNHEALTHY] = param_utils.extract_bool(
RES_UPDATE_MARK_UNHEALTHY,
body[RES_UPDATE_MARK_UNHEALTHY])
except ValueError as e:
raise exc.HTTPBadRequest(six.text_type(e))
data[RES_UPDATE_STATUS_REASON] = body.get(RES_UPDATE_STATUS_REASON, "")
self.rpc_client.resource_mark_unhealthy(req.context,
stack_identity=identity,
resource_name=resource_name,
**data)
def create_resource(options):
"""Resources resource factory method."""
deserializer = wsgi.JSONRequestDeserializer()
serializer = serializers.JSONResponseSerializer()
return wsgi.Resource(ResourceController(options), deserializer, serializer)
|
|
from __future__ import print_function
from __future__ import unicode_literals
import io
import networking
import private
import requests
"""
Extract info from IMDB and optionally upload it to ElasticSearch
"""
import argparse
import collections
import pprint
import sys
from operator import itemgetter
import elasticsearch
import elasticsearch.helpers
import imdb
import re
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--update-elasticsearch", default=False, action="store_true")
parser.add_argument("--no-other-roles", default=False, action="store_true")
parser.add_argument("--max-actors", default=0, type=int, help="Max number of actors to process (for quick tests)")
parser.add_argument("character_file", help="Output file with one character per line for slots")
parser.add_argument("actor_file", help="Output file with one actor per line for slots")
return parser.parse_args()
def find_representative_movies(movies, max_examples=5):
"""FInd the top rated movies up to max_examples"""
movies = sorted(movies, key=lambda m: m.get("rating", 0), reverse=True)
return [m for m in movies[:max_examples] if m.get("rating", 0) >= 6]
def get_aliases(character_name):
"""Break down a name like Sandor 'The Hound' Clegane into two separate names"""
alias_pattern = re.compile(r"(.*\s)['\"](.*)['\"]\s(.*)")
m = alias_pattern.match(character_name)
if m:
plain_name = m.group(1).strip() + " " + m.group(3).strip()
alias = m.group(2).strip()
return [plain_name, alias]
else:
return [character_name]
def get_cast(actor2person, a2c_counts, c2a_counts, min_appearances, max_actors_role):
"""Generator over Person objects that meet the min number of appearances, filtering any that appear in roles played by lots of actors"""
for actor, character_counts in a2c_counts.items():
if len(character_counts) > 1:
print("{} has played multiple roles: {}".format(actor, character_counts.items()))
for character, actor_counts in c2a_counts.items():
if sum(actor_counts.values()) < min_appearances:
continue
if len(actor_counts) > max_actors_role:
print("Skipping {} with {} actors".format(character, len(actor_counts)))
continue
for actor in actor_counts.keys():
yield actor2person[actor]
def save_slot_values(filename, values):
with io.open(filename, "w", encoding="UTF-8") as out:
for house in sorted(values):
out.write(house)
out.write("\n")
def main():
args = parse_args()
# GoT id: 0944947
ia = imdb.IMDb()
actor2char = dict()
char2actor = dict()
actor2known_for = dict()
show = ia.search_movie("Game of Thrones")[0]
# fetch the detail page info
ia.update(show, "episodes")
# print("GoT show keys", show.keys())
actor2person, a2c_counts, c2a_counts = get_full_cast(ia, show)
characters = set()
ia.update(show)
for person in get_cast(actor2person, a2c_counts, c2a_counts, min_appearances=3, max_actors_role=3):
# useful info for Person: name, currentRole, actor/actress lists other movies, canonical name
char_name = unicode(person.currentRole)
actor2char[person["name"]] = char_name
char2actor[char_name] = person["name"]
# set of flat names
for character_alias in get_aliases(char_name):
characters.add(character_alias)
print(person["name"], "is", " aka ".join(get_aliases(char_name)))
# pretty slow update so it can be disabled
if not args.no_other_roles:
ia.update(person)
other_movies = person.get("actor", person.get("actress"))
if other_movies:
other_movies = [m for m in other_movies if show["title"] not in m["title"]]
update_movies(ia, other_movies)
other_movies = find_representative_movies(other_movies)
other_movies = sorted(other_movies, key=lambda m: m.get("rating", 0), reverse=True)
pprint.pprint([(m["title"], m.get("rating", 0)) for m in other_movies[:5]])
actor2known_for[person["name"]] = [m["title"] for m in other_movies[:5]]
if args.max_actors > 0 and len(actor2char) >= args.max_actors:
break
# redo the char2actor mapping with the count stats on the actors
char2actor = {c: sorted(c2a_counts[c].keys(), key=lambda a: c2a_counts[c][a], reverse=True) for c in char2actor.keys()}
# save slot-value files
save_slot_values(args.actor_file, actor2char.keys())
save_slot_values(args.character_file, characters)
if args.update_elasticsearch:
es = networking.get_elasticsearch()
# make sure the index exists
es.indices.create(index=private.ES_INDEX, ignore=400)
# delete anything of the current types
for type in ["character", "actor"]:
requests.delete("/".join([private.ES_URL, private.ES_INDEX, type]), auth=networking.get_aws_auth())
elasticsearch.helpers.bulk(es, get_character_actions(char2actor, private.ES_INDEX, "character"))
elasticsearch.helpers.bulk(es, get_actor_actions(actor2char, actor2known_for, private.ES_INDEX, "actor"))
def get_full_cast(ia, show):
"""Get the full cast of the show by iterating over all episodes"""
a2c_counts = collections.defaultdict(lambda: collections.defaultdict(int))
c2a_counts = collections.defaultdict(lambda: collections.defaultdict(int))
actor2person = dict()
for season_number, season in show["episodes"].items():
for episode_number, episode in season.items():
print("Season", season_number, "Episode", episode_number, episode.keys())
ia.update(episode)
try:
for person in episode["cast"]:
actor = person["name"]
actor2person[actor] = person
character = unicode(person.currentRole)
a2c_counts[actor][character] += 1
c2a_counts[character][actor] += 1
except KeyError:
print("Cast not available for Season {}, Episode {}".format(season_number, episode_number))
# give some sense of progress
print_cast(c2a_counts)
return actor2person, a2c_counts, c2a_counts
def print_cast(c2a_counts, min_appearances=3):
for character, actor_counts in sorted(c2a_counts.items(), key=lambda p: sum(p[1].values()), reverse=True):
if sum(actor_counts.values()) < min_appearances:
break
if character.strip():
print(character, "played by", ", ".join("{}: {}".format(k, v) for k, v in sorted(actor_counts.items(), key=itemgetter(1), reverse=True)))
def get_character_actions(char2actors, index_name, type_name, max_actors=3):
for character, actors in char2actors.items():
if len(actors) > max_actors:
print("Skipping {}, {} - shouldn't have made it here though".format(character, actors))
continue
yield {
"_op_type": "create",
"_index": index_name,
"_type": type_name,
"name": character,
"actors": actors
}
def get_actor_actions(actor2char, actor2other, index_name, type_name):
for actor in actor2char.keys():
yield {
"_op_type": "create",
"_index": index_name,
"_type": type_name,
"name": actor,
"character": actor2char[actor],
"other_roles": actor2other.get(actor)
}
def update_movies(ia, movies, max_updates=10):
"""Fill in details on other movies unless they have title substring with the current one"""
for movie in movies[:max_updates]:
ia.update(movie)
if __name__ == "__main__":
sys.exit(main())
|
|
import math
import animation
from math import sin, cos
from geometry import Point, dot_product, cross_product
from PIL import Image, ImageDraw
def planar_apply(p, f, axis):
if axis == "x":
y,z = f(Point(p.y, p.z)).tuple()
return Point(p.x, y, z)
elif axis == "y":
x,z = f(Point(p.x, p.z)).tuple()
return Point(x, p.y, z)
elif axis == "z":
x,y = f(Point(p.x, p.y)).tuple()
return Point(x, y, p.z)
else:
raise Exception("Unexpected axis {}".format(axis))
def exp(radius, angle):
return Point(radius*math.cos(angle), radius*math.sin(angle))
def rotated(p, angle):
theta = math.atan2(p.y,p.x)
theta += angle
radius = p.magnitude()
return exp(radius, theta)
def planar_rotated(p, angle, axis):
f = lambda p: rotated(p, angle)
return planar_apply(p, f, axis)
#---------------------------------------- end of point geometry stuff ----------------------------------------
def cyclic_pairs(seq):
for i in range(len(seq)):
j = (i+1) % len(seq)
yield seq[i], seq[j]
def frange(start, stop, step):
i = 0
while True:
cur = start + (step * i)
if cur > stop: break
yield cur
i += 1
def dot(draw, p, radius=3, **kwargs):
kwargs.setdefault("fill", "black")
draw.chord((p.x-radius, p.y-radius, p.x+radius, p.y+radius), 0, 359, **kwargs)
def angle_3d(a,b):
return math.acos(dot_product(a,b) / (a.magnitude() * b.magnitude()))
#finds the point of intersection of line ab and segment cd.
def line_segment_intersection(a,b,c,d):
#a line can be expressed as the set of points p for which
#(p - p0) dot n = 0
#where p0 is a point on the line and n is a normal vector to the line.
#the vector equation for a line segment is
#p = f*c + (1-f)*d = f*c - f*d + d = f*(c-d) + d
#where f is a number between 0 and 1 inclusive.
#
#(f*(c-d) + d - p0) dot n = 0
#((f*(c-d)) dot n) - ((p0 - d) dot n) = 0
#((f*(c-d)) dot n) = ((p0 - d) dot n)
#f*((c-d) dot n) = ((p0 - d) dot n)
#f = ((p0 - d) dot n) / ((c-d) dot n)
p0 = a
n = exp(1, (a-b).angle() + math.radians(90))
f = dot_product(p0-d, n) / dot_product(c-d, n)
if 0 <= f <= 1:
return (c+d)*f - d
return None
def segment_segment_intersection(a,b,c,d):
p = line_segment_intersection(a,b,c,d)
q = line_segment_intersection(c,d,a,b)
if p is None or q is None:
return None
return p
def get_bbox(poly):
return (
min(p.x for p in poly),
min(p.y for p in poly),
max(p.x for p in poly),
max(p.y for p in poly)
)
def point_in_polygon(p, poly):
bbox = get_bbox(poly)
if p.x < bbox[0] or p.y < bbox[1] or p.x > bbox[2] or p.y > bbox[3]:
return False
far_away_point = Point(-100, 0)
count = 0
for a,b in cyclic_pairs(poly):
x = segment_segment_intersection(far_away_point, p, a, b)
if x is not None:
count += 1
return count %2 == 1
def lies_behind(p, poly):
#draw a line from the viewer's position to p, and see if it intersects the plane formed by poly.
#formula courtesy of https://en.wikipedia.org/wiki/Line%E2%80%93plane_intersection
a,b,c = poly
n = cross_product(b-a, c-a)
p0 = a
l = Point(0,0,1)
l0 = p
if dot_product(l,n) == 0:
#I think this only happens when the poly is viewed edge-on.
return False
d = dot_product(p0 - l0, n) / dot_product(l,n)
return d < 0
def flattened(p):
return Point(p.x, p.y)
def is_occluded_by(p, poly):
p = flattened(p)
poly = map(flattened, poly)
return point_in_polygon(p, poly)
def make_frame(f):
def get_torus_point(big_r, little_r, theta, iota):
return Point(
(big_r + little_r * cos(theta)) * cos(iota),
(big_r + little_r * cos(theta)) * sin(iota),
little_r*sin(theta)
)
big_r = 0.66
little_r = 0.33
points = []
normals = []
slices = []
minor_resolution = 16
major_resolution = 32
minor_offset = -f*2*math.pi/minor_resolution
major_offset = f*2*math.pi/major_resolution
for theta in frange(minor_offset, minor_offset+2*math.pi, 2*math.pi / minor_resolution):
slice = []
for iota in frange(major_offset, major_offset+2*math.pi, 2*math.pi / major_resolution):
adjusted_iota = iota + theta/major_resolution
p = get_torus_point(big_r, little_r, theta, adjusted_iota)
shell_p = get_torus_point(big_r, little_r+1, theta, adjusted_iota)
normal = shell_p - p
points.append(p)
normals.append(normal)
slice.append(p)
slices.append(slice)
polys = []
for slice_a, slice_b in cyclic_pairs(slices):
for (a,b),(c,d) in zip(cyclic_pairs(slice_a), cyclic_pairs(slice_b)):
polys.append((a,b,c))
polys.append((d,b,c))
size = 800
margin = 50
to_screen = lambda s: int(((s+1)/2.0)*(size-margin*2)) + margin
rotations = [
(math.radians(-45), "x"),
(math.radians(45), "y")
]
for angle, axis in rotations:
points = [planar_rotated(p, angle, axis) for p in points]
normals = [planar_rotated(p, angle, axis) for p in normals]
polys = [[planar_rotated(p, angle, axis) for p in poly ] for poly in polys]
img = Image.new("RGB", (size, size), "white")
draw = ImageDraw.Draw(img)
dot_size_resolution = 6
dot_size = lambda z: dot_size_resolution-math.floor(((z + 1)/2)*dot_size_resolution)
for idx, (p,n) in enumerate(zip(points, normals)):
#print "{} / {}\r".format(idx, len(points)),
#backface culling
angle = angle_3d(n, Point(0,0,1))
if angle < math.radians(90): continue
#occlusion culling
# if any(lies_behind(p, poly) and not any(p == vertex for vertex in poly) and is_occluded_by(p, poly) for poly in polys ):
# continue
#0 for points directly facing the camera, 1 for ones perpendicular
magnitude = (angle - math.radians(180)) / math.radians(-90)
#magnitude = magnitude ** 1/3.0
x = int(255*magnitude)
color = "#{:02x}{:02x}{:02x}".format(x,x,x)
dot(draw, p.map(to_screen), dot_size(p.z), fill=color)
draw_edges = False
if draw_edges:
for poly in polys:
for a,b in cyclic_pairs(poly):
draw.line(flattened(a.map(to_screen)).tuple() + flattened(b.map(to_screen)).tuple(), fill="black")
return img.resize((size/2, size/2), Image.ANTIALIAS)
images = []
frames = 40
for i in range(frames):
print i
images.append(make_frame(float(i)/frames))
animation.make_gif(images)
#make_frame(0).save("output.png")
|
|
#############################################################################
#
# $Id: protocol.py,v 2.94.2.38 2009/12/06 17:16:47 irmen Exp $
# Pyro Protocol Adapters
#
# This is part of "Pyro" - Python Remote Objects
# which is (c) Irmen de Jong - [email protected]
#
#############################################################################
import socket, struct, os, time, sys, hmac, types, random, errno, select
import imp, marshal, new, __builtin__
try:
import hashlib
md5=hashlib.md5
except ImportError:
import md5
md5=md5.md5
import Pyro.constants, Pyro.util
from Pyro.errors import *
from Pyro.errors import _InternalNoModuleError
pickle = Pyro.util.getPickle()
Log = Pyro.util.Log
if Pyro.util.supports_multithreading():
from threading import Thread,currentThread
_has_threading = 1
else:
_has_threading = 0
if Pyro.util.supports_compression():
import zlib
_has_compression = 1
else:
_has_compression = 0
try:
from M2Crypto import SSL
from M2Crypto.SSL import SSLError
if _has_threading:
import M2Crypto
M2Crypto.threading.init()
except ImportError:
class SSLError(Exception): pass
#------ Get the hostname (possibly of other machines) (returns None on error)
def getHostname(ip=None):
try:
if ip:
(hn,alias,ips) = socket.gethostbyaddr(ip)
return hn
else:
return socket.gethostname()
except socket.error:
return None
#------ Get IP address (return None on error)
def getIPAddress(host=None):
try:
return socket.gethostbyname(host or getHostname())
except socket.error:
return None
#------ Socket helper functions for sending and receiving data correctly.
# process optional timeout on socket.
# notice the check for M2Crypto SSL sockets: if there's data pending,
# a select on them will fail. So we avoid calling select in that case.
def _sock_timeout_send(sock, timeout):
if timeout and (not hasattr(sock,'pending') or sock.pending()==0):
r,w,e=safe_select([],[sock],[],timeout)
if not w:
raise TimeoutError('connection timeout sending')
def _sock_timeout_recv(sock, timeout):
if timeout and (not hasattr(sock,'pending') or sock.pending()==0):
r,w,e=safe_select([sock],[],[],timeout)
if not r:
raise TimeoutError('connection timeout receiving')
# Receive a precise number of bytes from a socket. Raises the
# ConnectionClosedError if that number of bytes was not available.
# (the connection has probably been closed then).
# Never will this function return an empty message (if size>0).
# We need this because 'recv' isn't guaranteed to return all desired
# bytes in one call, for instance, when network load is high.
# Use a list of all chunks and join at the end: faster!
# Handle EINTR states (interrupted system call) by just retrying.
def sock_recvmsg(sock, size, timeout=0):
while True:
try:
return _recv_msg(sock,size,timeout)
except socket.timeout:
raise TimeoutError("connection timeout receiving")
except socket.error,x:
if x.args[0] == errno.EINTR or (hasattr(errno, 'WSAEINTR') and x.args[0] == errno.WSAEINTR):
# interrupted system call, just retry
continue
raise ConnectionClosedError('connection lost: %s' % x)
except SSLError,x:
raise ConnectionClosedError('connection lost: %s' % x)
# select the optimal recv() implementation
if hasattr(socket,"MSG_WAITALL") and not Pyro.config.PYRO_BROKEN_MSGWAITALL:
def _recv_msg(sock,size,timeout):
_sock_timeout_recv(sock,timeout)
try:
chunk=sock.recv(size, socket.MSG_WAITALL) # receive all data in one call
except TypeError:
# M2Crypto sock.recv() doesn't support MSG_WAITALL parameter
return __recv_msg_compat(sock,size,timeout)
else:
if len(chunk)!=size:
err=ConnectionClosedError('connection lost')
err.partialMsg=chunk # store the message that was received until now
raise err
return chunk
else:
def _recv_msg(sock,size,timeout):
_sock_timeout_recv(sock, timeout)
return __recv_msg_compat(sock,size,timeout)
def __recv_msg_compat(sock,size,timeout): # compatibility implementation for non-MSG_WAITALL / M2Crypto
msglen=0
msglist=[]
# Receive chunks of max. 60kb size:
# (rather arbitrary limit, but it avoids memory/buffer problems on certain OSes -- VAX/VMS, Windows)
while msglen<size:
chunk=sock.recv(min(60000,size-msglen))
if not chunk:
if hasattr(sock,'pending'):
# m2crypto ssl socket - they have problems with a defaulttimeout
if socket.getdefaulttimeout() != None:
raise ConnectionClosedError("m2crypto SSL can't be used when socket.setdefaulttimeout() has been set")
err = ConnectionClosedError('connection lost')
err.partialMsg=''.join(msglist) # store the message that was received until now
raise err
msglist.append(chunk)
msglen+=len(chunk)
return ''.join(msglist)
# Send a message over a socket. Raises ConnectionClosedError if the msg
# couldn't be sent (the connection has probably been lost then).
# We need this because 'send' isn't guaranteed to send all desired
# bytes in one call, for instance, when network load is high.
def sock_sendmsg(sock,msg,timeout=0):
try:
_sock_timeout_send(sock,timeout)
sock.sendall(msg)
except socket.error:
raise ConnectionClosedError('connection lost')
# set socket option to try to re-use a server port if possible
def set_reuse_addr(sock):
if os.name not in ('nt','dos','ce') and sys.platform!='cygwin':
# only do this on a non-windows platform. Windows screws things up with REUSEADDR...
try:
sock.setsockopt ( socket.SOL_SOCKET, socket.SO_REUSEADDR,
sock.getsockopt (socket.SOL_SOCKET, socket.SO_REUSEADDR) | 1)
except:
pass
# set socket option to enable timeout checking for server sockets.
def set_sock_keepalive(sock):
if Pyro.config.PYRO_SOCK_KEEPALIVE:
try:
sock.setsockopt ( socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1 )
except:
Pyro.config.PYRO_SOCK_KEEPALIVE=0 # it didn't work--disable keepalives.
#------ PYRO: adapter (default Pyro wire protocol)
#------ This adapter is for protocol version 4 ONLY
# Future adapters could be downwards compatible and more flexible.
PFLG_COMPRESSED = 0x01 # protocol flag: compressed body
PFLG_CHECKSUM = 0x02 # protocol flag: checksum body
PFLG_XMLPICKLE_GNOSIS = 0x04 # protocol flag: used xml pickling (Gnosis)
class PYROAdapter(object):
headerFmt = '!4sHHlHl' # header (id, ver, hsiz,bsiz,pflags,crc)
headerID = 'PYRO'
connectMSG='CONNECT'
acceptMSG= 'GRANTED'
denyMSG= 'DENIED' # must be same length as acceptMSG,
# note that a 1-character code is appended!
AUTH_CHALLENGE_SIZE = 16
headerSize = struct.calcsize(headerFmt)
version=5 # version 5 protocol
def __init__(self):
self.onewayMethods=[] # methods that should be called one-way
self.timeout=None # socket timeout
self.ident='' # connection identification
self.setNewConnectionValidator(DefaultConnValidator())
self.__getLockObjects()
def sendAccept(self, conn): # called by TCPServer
sock_sendmsg(conn.sock, self.acceptMSG, self.timeout)
def sendDeny(self, conn, reasonCode=Pyro.constants.DENIED_UNSPECIFIED): # called by TCPServer
sock_sendmsg(conn.sock, self.denyMSG+str(reasonCode)[0], self.timeout)
def __del__(self):
self.release(nolog=1)
def __getstate__(self):
# need to tweak the pickle because lock objects and conn objects can't be pickled
self.release() # cannot pickle the active connection so just release it
d=self.__dict__.copy()
del d["lock"]
del d["bindlock"]
return d
def __setstate__(self, state):
# restore the pickle state and recreate the unpickleable lock objects
self.__dict__.update(state)
self.__getLockObjects()
def __getLockObjects(self):
self.lock=Pyro.util.getLockObject()
self.bindlock=Pyro.util.getLockObject()
def recvAuthChallenge(self, conn):
ver,body,pflags = self.receiveMsg(conn)
if ver==self.version and len(body)==self.AUTH_CHALLENGE_SIZE:
return body
raise ValueError("Received version must be "+`self.version`+" and auth challenge must be exactly "+`self.AUTH_CHALLENGE_SIZE`+" bytes")
def setNewConnectionValidator(self,validator):
if not isinstance(validator, DefaultConnValidator):
raise TypeError("validator must be specialization of DefaultConnValidator")
self.newConnValidator=validator
def getNewConnectionValidator(self):
return self.newConnValidator
def bindToURI(self,URI):
# Client-side connection stuff. Use auth code from our own connValidator.
if URI.protocol not in ('PYRO', 'PYROLOC'):
Log.error('PYROAdapter','incompatible protocol in URI:',URI.protocol)
raise ProtocolError('incompatible protocol in URI')
try:
self.bindlock.acquire() # only 1 thread at a time can bind the URI
try:
self.URI=URI.clone()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((URI.address, URI.port))
conn=TCPConnection(sock,sock.getpeername())
# receive the authentication challenge string, and use that to build the actual identification string.
try:
authChallenge=self.recvAuthChallenge(conn)
except ProtocolError,x:
# check if we were denied
if hasattr(x,"partialMsg") and x.partialMsg[:len(self.denyMSG)]==self.denyMSG:
raise ConnectionDeniedError(Pyro.constants.deniedReasons[int(x.partialMsg[-1])])
else:
raise
# reply with our ident token, generated from the ident passphrase and the challenge
msg = self._sendConnect(sock,self.newConnValidator.createAuthToken(self.ident, authChallenge, conn.addr, self.URI, None) )
if msg==self.acceptMSG:
self.conn=conn
self.conn.connected=1
Log.msg('PYROAdapter','connected to',str(URI))
if URI.protocol=='PYROLOC':
self.resolvePYROLOC_URI("PYRO") # updates self.URI
elif msg[:len(self.denyMSG)]==self.denyMSG:
try:
raise ConnectionDeniedError(Pyro.constants.deniedReasons[int(msg[-1])])
except (KeyError,ValueError):
raise ConnectionDeniedError('invalid response')
except socket.error:
Log.msg('PYROAdapter','connection failed to URI',str(URI))
raise ProtocolError('connection failed')
finally:
self.bindlock.release()
def resolvePYROLOC_URI(self, newProtocol):
# This method looks up the object URI referenced by
# the PYROLOC string, and updates self.URI in place!
objectName=self.URI.objectID
Log.msg('PYROAdapter','resolving PYROLOC name: ',objectName)
# call the special Resolve method on the daemon itself:
self.URI.objectID=Pyro.constants.INTERNAL_DAEMON_GUID
result=self.remoteInvocation('ResolvePYROLOC',0,objectName)
# found it, switching to regular pyro protocol
self.URI.objectID=result
self.URI.protocol=newProtocol
def _sendConnect(self, sock, ident):
body=self.connectMSG+ident
sock_sendmsg(sock, self.createMsg(body), self.timeout)
return sock_recvmsg(sock, len(self.acceptMSG),self.timeout)
def release(self,nolog=0):
if hasattr(self,'conn'):
if not nolog:
Log.msg('PYROAdapter','releasing connection')
self.conn.close()
del self.conn
def connected(self):
if hasattr(self,'conn'):
return self.conn.connected
return 0
def rebindURI(self, tries=sys.maxint, wait=1):
t=0
while t<tries:
try:
self.bindToURI(self.URI)
return
except ProtocolError:
t+=1
if t<tries:
time.sleep(wait)
raise TimeoutError('connection lost')
def createMsg(self, body, replyflags=0):
pflgs=replyflags
if _has_compression and Pyro.config.PYRO_COMPRESSION:
before=len(body)
bz=zlib.compress(body) # default compression level
if len(bz)<before:
pflgs|=PFLG_COMPRESSED
body=bz
crc=0
if Pyro.config.PYRO_CHECKSUM and _has_compression:
crc=zlib.adler32(body)
pflgs|=PFLG_CHECKSUM
if Pyro.config.PYRO_XML_PICKLE=='gnosis':
pflgs|=PFLG_XMLPICKLE_GNOSIS
return struct.pack(self.headerFmt, self.headerID, self.version, self.headerSize, len(body), pflgs, crc) + body
def setOneway(self, methods):
self.onewayMethods.extend(methods)
def setTimeout(self, timeout):
if os.name=='java':
# don't allow the use of the timeout feature in jython because it causes unreliable behavior
raise RuntimeError("using setTimeout causes unreliable behavior in Jython")
self.timeout=timeout
def setIdentification(self, ident, munge=True):
if ident:
if munge:
self.ident=self.newConnValidator.mungeIdent(ident) # don't store ident itself.
else:
self.ident=ident # per-munged ident string
else:
self.ident=''
def getIdentification(self):
return self.ident
# Retrieve code from the remote peer. Works recursively.
def _retrieveCode(self, mname, level):
Log.msg("PYROAdapter","client cannot find module: "+mname)
# XXX this is nasty code, and also duplicated in core.py remote_supply_code()
if mname in sys.modules:
# module is already loaded, do nothing
# XXX how can we be sure if the module is "complete"?
# XXX because things might go wrong during the loading code below?
return
try:
# Called by the client-side to obtain code from the server-side.
# Call the special method on the server to retrieve the code.
# No need for complex exception stuff like when the server needs
# code from the client (see handleInvocation): because the server
# is a Pyro object we can actually *call* it :-)
module = self._remoteInvocationMobileCode("remote_retrieve_code",0,mname)
imp.acquire_lock() # obtain the global import lock
mname = mname.split('.')
path = ''
mod = new.module("pyro-server-context")
for m in mname:
path += '.' + m
# use already loaded modules instead of overwriting them
real_path = path[1:]
if sys.modules.has_key(real_path):
mod = sys.modules[real_path]
else:
setattr(mod, m, new.module(real_path))
mod = getattr(mod, m)
sys.modules[real_path] = mod
Log.msg('PYROAdapter','loading supplied code: ',mname)
if module[0:4] != imp.get_magic():
code = compile(module, "<downloaded>", "exec")
else:
code = marshal.loads(module[8:])
importer=None
try:
loaded = 0
# XXX probably want maxtries here...
while not loaded:
# install a custom importer to intercept any extra needed modules
# when executing the module code just obtained from the server
imp.acquire_lock()
importer = agent_import(__builtin__.__import__)
__builtin__.__import__ = importer
imp.release_lock()
try:
exec code in mod.__dict__
loaded = 1
except ImportError:
mname = importer.name
if importer is not None:
__builtin__.__import__ = importer.orig_import
importer = None
# XXX probably want maxrecursion here...
self._retrieveCode(mname, level+1)
finally:
if importer is not None:
__builtin__.__import__ = importer.orig_import
finally:
imp.release_lock() # release the global import lock
def _remoteInvocationMobileCode(self, method, flags, *args):
# special trimmed-down version for mobile code methods (no locking etc)
body=pickle.dumps((self.URI.objectID,method,flags,args),Pyro.config.PYRO_PICKLE_FORMAT)
sock_sendmsg(self.conn.sock, self.createMsg(body), self.timeout)
ver,answer,pflags = self.receiveMsg(self.conn,1)
if answer is None:
raise ProtocolError('incorrect answer received')
answer=pickle.loads(answer)
if isinstance(answer,PyroExceptionCapsule):
if isinstance(answer.excObj,_InternalNoModuleError):
# server couldn't load module, supply it
return self.processMissingModuleError(answer.excObj, method, flags, args)
else:
# we have an encapsulated exception, raise it again.
answer.raiseEx()
return answer
def remoteInvocation(self, method, flags, *args):
try:
self.lock.acquire() # only 1 thread at a time may use this connection to call a remote method
return self._remoteInvocation(method, flags, *args)
finally:
self.lock.release()
def _remoteInvocation(self, method, flags, *args):
if 'conn' not in self.__dict__.keys():
Log.msg('PYROAdapter','no connection, trying to bind again')
if 'URI' in self.__dict__.keys():
self.bindToURI(self.URI)
else:
raise ProtocolError('trying to rebind, but was never bound before')
if method in self.onewayMethods:
flags |= Pyro.constants.RIF_Oneway
body=pickle.dumps((self.URI.objectID,method,flags,args),Pyro.config.PYRO_PICKLE_FORMAT)
sock_sendmsg(self.conn.sock, self.createMsg(body), self.timeout)
if flags & Pyro.constants.RIF_Oneway:
return None # no answer required, return immediately
ver,answer,pflags = self.receiveMsg(self.conn,1) # read the server's response, send no further replies
if answer is None:
raise ProtocolError('incorrect answer received')
# Try to get the answer from the server.
# If there are import problems, try to get those modules from
# the server too (if mobile code is enabled).
if not Pyro.config.PYRO_MOBILE_CODE:
answer = pickle.loads(answer)
else:
importer=None
try:
imp.acquire_lock()
loaded = 0
# XXX maxtries here...
while not loaded:
# install a custom importer to intercept any extra needed modules
# when unpickling the answer just obtained from the server
imp.acquire_lock()
importer = agent_import(__builtin__.__import__)
__builtin__.__import__ = importer
imp.release_lock()
try:
answer = pickle.loads(answer)
loaded = 1
except ImportError:
mname = importer.name
if importer is not None:
__builtin__.__import__ = importer.orig_import
importer = None
self._retrieveCode(mname, 0)
finally:
if importer is not None:
__builtin__.__import__ = importer.orig_import
imp.release_lock()
if isinstance(answer,PyroExceptionCapsule):
if isinstance(answer.excObj,_InternalNoModuleError):
# server couldn't load the module, send it
return self.processMissingModuleError(answer.excObj, method, flags, args)
else:
# we have an encapsulated exception, raise it again.
answer.raiseEx()
return answer
def processMissingModuleError(self, errorinfo, method, flags, args):
# server couldn't load module, supply it
# XXX this code is ugly. and duplicated in remote_retrieve_code in core.py
Log.msg('PYROAdapter',"server can't load module: "+errorinfo.modulename)
try:
importmodule=new.module('-agent-import-')
mname=errorinfo.modulename
# not used: fromlist=errorinfo.fromlist
try:
exec 'import '+mname in importmodule.__dict__
except ImportError:
Log.error('PYROAdapter','Server wanted a non-existing module:',mname)
raise PyroError('Server wanted a non-existing module',mname)
m=eval('importmodule.'+mname)
bytecode=None
if hasattr(m,"_PYRO_bytecode"):
# use the bytecode that was put there earlier,
# this avoids recompiles of the source .py if we don't have .pyc bytecode available
bytecode=m._PYRO_bytecode
else:
# try to load the module's compiled source, or the real .py source if that fails.
# note that the source code (.py) is opened with universal newline mode
if not hasattr(m,"__file__"):
raise PyroError("cannot read module source code",mname)
(filebase,ext)=os.path.splitext(m.__file__)
if ext.startswith(".PY"):
exts = ( (".PYO","rb"), (".PYC","rb"), (".PY","rU") ) # uppercase
else:
exts = ( (".pyo","rb"), (".pyc","rb"), (".py","rU") ) # lowercase
for ext,mode in exts:
try:
bytecode=open(filebase+ext, mode).read()
break
except EnvironmentError:
pass
if bytecode:
Log.msg('PYROAdapter',"sending module to server: "+mname)
self._remoteInvocationMobileCode("remote_supply_code",0,mname, bytecode, self.conn.sock.getsockname())
# retry the method invocation
return self._remoteInvocation(* (method, flags)+args) # use the non-locking call
Log.error("PYROAdapter","cannot read module source code for module:", mname)
raise PyroError("cannot read module source code",mname)
finally:
del importmodule
# (private) receives a socket message, returns: (protocolver, message, protocolflags)
def receiveMsg(self,conn,noReply=0):
msg=sock_recvmsg(conn.sock, self.headerSize, self.timeout)
(hid, ver, hsiz, bsiz, pflags, crc) = struct.unpack(self.headerFmt,msg)
# store in the connection what pickle method this is
if pflags&PFLG_XMLPICKLE_GNOSIS:
conn.pflags|=PFLG_XMLPICKLE_GNOSIS
if ver!=self.version:
msg='incompatible protocol version'
Log.error('PYROAdapter',msg)
if not noReply:
# try to report error to client, but most likely the connection will terminate:
self.returnException(conn, ProtocolError(msg))
raise ProtocolError(msg)
if hid!=self.headerID or hsiz!=self.headerSize:
msg='invalid header'
Log.error('PYROAdapter',msg)
Log.error('PYROAdapter','INVALID HEADER DETAILS: ',conn,( hid, ver, hsiz, bsiz,pflags))
if not noReply:
# try to report error to client, but most likely the connection will terminate:
self.returnException(conn, ProtocolError(msg), shutdown=1)
raise ProtocolError(msg)
body=sock_recvmsg(conn.sock, bsiz, self.timeout)
if pflags&PFLG_CHECKSUM:
if _has_compression:
if crc!=zlib.adler32(body):
msg='checksum error'
Log.error('PYROAdapter',msg)
if not noReply:
self.returnException(conn, ProtocolError(msg))
raise ProtocolError(msg)
else:
raise ProtocolError('cannot perform checksum')
if pflags&PFLG_COMPRESSED:
if _has_compression:
body=zlib.decompress(body)
else:
# We received a compressed message but cannot decompress.
# Is this really a server error? We now throw an exception on the server...
raise ProtocolError('compression not supported')
return ver,body,pflags
def _unpickleRequest(self, pflags, body):
if pflags&PFLG_XMLPICKLE_GNOSIS:
if Pyro.config.PYRO_XML_PICKLE=='gnosis':
return pickle.loads(body)
else:
return Pyro.util.getXMLPickle('gnosis').loads(body)
elif Pyro.config.PYRO_XML_PICKLE:
Log.error('PYROAdapter','xml pickle required, got other pickle')
raise ProtocolError('xml pickle required, got other pickle')
else:
return pickle.loads(body)
def handleInvocation(self,daemon,conn):
ver,body,pflags = self.receiveMsg(conn)
if not body:
# something went wrong even before receiving the full message body
return
if ver!=self.version:
Log.error('PYROAdapter','incompatible protocol version')
self.returnException(conn, ProtocolError('incompatible protocol version'))
return
# Unpickle the request, which is a tuple:
# (object ID, method name, flags, (arg1,arg2,...))
importer=fromlist=None
try:
try:
# install a custom importer to intercept any extra needed modules
# when unpickling the request just obtained from the client
imp.acquire_lock()
importer=agent_import(__builtin__.__import__)
__builtin__.__import__=importer
req=self._unpickleRequest(pflags, body)
if type(req)!=tuple or len(req)!=4 or type(req[3])!=tuple:
# sanity check failed
raise ProtocolError("invalid request data format")
finally:
__builtin__.__import__=importer.orig_import
imp.release_lock()
except ImportError,x:
if Pyro.config.PYRO_MOBILE_CODE:
# return a special exception that will be processed by client;
# it will call the internal 'remote_supply_code' member
if importer:
modname=importer.name
fromlist=importer.fromlist
else:
modname = x.args[0][16:]
fromlist=None
self.returnException(conn, _InternalNoModuleError(modname,fromlist),0) # don't shutdown!
else:
Log.error('PYROAdapter','code problem with incoming object: '+str(x))
self.returnException(conn, NoModuleError(* x.args))
return
try:
# find the object in the implementation database of our daemon
o=daemon.getLocalObject(req[0])
except (KeyError, TypeError) ,x:
Log.warn('PYROAdapter','Invocation to unknown object ignored:',x)
self.returnException(conn, ProtocolError('unknown object ID'))
return
else:
# Do the invocation. We are already running in our own thread.
if req[2]&Pyro.constants.RIF_Oneway and Pyro.config.PYRO_ONEWAY_THREADED and daemon.threaded:
# received a oneway call, run this in its own thread.
thread=Thread(target=self._handleInvocation2, args=(daemon,req,pflags,conn,o))
thread.setDaemon(1) # thread must exit at program termination.
thread.localStorage=daemon.getLocalStorage() # set local storage for the new thread
thread.start()
else:
# not oneway or not in threaded mode, just do the invocation synchronously
self._handleInvocation2(daemon,req,pflags,conn,o)
def _handleInvocation2(self, daemon, req, pflags, conn, obj):
try:
flags=req[2]
importer=None
if not Pyro.config.PYRO_MOBILE_CODE:
res = obj.Pyro_dyncall(req[1],flags,req[3]) # (method,flags,args)
else:
try:
# install a custom importer to intercept any extra needed modules
# when executing the remote method. (using the data passed in by
# the client may trigger additional imports)
imp.acquire_lock()
importer=agent_import(__builtin__.__import__)
__builtin__.__import__=importer
res = obj.Pyro_dyncall(req[1],flags,req[3]) # (method,flags,args)
finally:
__builtin__.__import__=importer.orig_import
imp.release_lock()
if flags&Pyro.constants.RIF_Oneway:
return # no result, return immediately
# reply the result to the caller
if pflags&PFLG_XMLPICKLE_GNOSIS:
replyflags=PFLG_XMLPICKLE_GNOSIS
if Pyro.config.PYRO_XML_PICKLE=='gnosis':
body=pickle.dumps(res,Pyro.config.PYRO_PICKLE_FORMAT)
else:
body=Pyro.util.getXMLPickle('gnosis').dumps(res,Pyro.config.PYRO_PICKLE_FORMAT)
else:
replyflags=0
body=pickle.dumps(res,Pyro.config.PYRO_PICKLE_FORMAT)
sock_sendmsg(conn.sock, self.createMsg(body,replyflags),self.timeout)
except ImportError,ix:
if Pyro.config.PYRO_MOBILE_CODE:
# Return a special exception that will be processed by client;
# it will call the internal 'remote_supply_code' member.
# We have to use this seemingly complex way to signal the client
# to supply us some code, but it is only a proxy! We can't *call* it!
if importer:
# grab the import info from our importer
name=importer.name
fromlist=importer.fromlist
else:
# XXX the importerror sometimes doesn't contain the package :-(
name=ix.args[0][16:]
fromlist=None
Log.msg('PYROAdapter','failed to import',name)
self.returnException(conn, _InternalNoModuleError(name,fromlist),0) # don't shutdown!
else:
Log.error('PYROAdapter','code problem with incoming object: '+str(ix))
self.returnException(conn, NoModuleError(* ix.args))
except Exception:
# Handle the exception. Pass in if it was a oneway call,
# those calls don't need any response to be sent.
daemon.handleError(conn, bool(flags&Pyro.constants.RIF_Oneway))
def returnException(self, conn, exc, shutdown=1, args=None):
# return an encapsulated exception to the client
if conn.pflags&PFLG_XMLPICKLE_GNOSIS:
pic=Pyro.util.getXMLPickle('gnosis')
else:
pic=pickle
try:
body=pic.dumps(PyroExceptionCapsule(exc,args),Pyro.config.PYRO_PICKLE_FORMAT)
except Exception,x:
# hmm, pickling the exception failed... pickle the string instead
body=pic.dumps(PyroExceptionCapsule(PyroError(str(x)),args),Pyro.config.PYRO_PICKLE_FORMAT)
sock_sendmsg(conn.sock, self.createMsg(body),self.timeout)
if shutdown:
conn.close()
def handleConnection(self, conn, tcpserver):
# Server-side connection stuff. Use auth code from tcpserver's validator.
try:
# Validate the connection source (host) immediately,
# if it's ok, send authentication challenge, and read identification data to validate.
(ok,reasonCode) = tcpserver.newConnValidator.acceptHost(tcpserver,conn)
if ok:
challenge=tcpserver.newConnValidator.createAuthChallenge(tcpserver,conn)
if len(challenge)!=self.AUTH_CHALLENGE_SIZE:
raise ValueError("Auth challenge must be exactly "+`self.AUTH_CHALLENGE_SIZE`+" bytes")
sock_sendmsg(conn.sock, self.createMsg(challenge),self.timeout)
ver,body,pflags = self.receiveMsg(conn)
# only process the message if it makes a bit of sense
if ver==self.version and body.startswith(self.connectMSG):
token=body[len(self.connectMSG):]
(ok,reasonCode) = tcpserver.newConnValidator.acceptIdentification(tcpserver,conn,token,challenge)
if ok:
self.sendAccept(conn)
conn.connected=1
return 1
else:
self.sendDeny(conn,reasonCode)
else:
self.sendDeny(conn,reasonCode)
return 0
except ProtocolError:
# ignore the message if it caused protocol errors
return 0
# import wrapper class to help with importing remote modules
class agent_import(object):
def __init__(self, orig_import):
self.orig_import=orig_import
def __call__(self,name,iglobals={},ilocals={},fromlist=None, *rest, **krest):
if os.name=="java":
# workaround for odd Jython bug, iglobals and ilocals may not exist in this scope...(?!)
iglobals=vars().get("iglobals",{})
ilocals=vars().get("ilocals",{})
# save the import details:
self.name=name # note: this must be a str object
self.fromlist=fromlist
return self.orig_import(name,iglobals,ilocals,fromlist, *rest, **krest)
#
# The SSL adapter that handles SSL connections instead of regular sockets.
#
class PYROSSLAdapter(PYROAdapter):
def __init__(self):
PYROAdapter.__init__(self)
try:
from M2Crypto import SSL
except ImportError:
raise ProtocolError('SSL not available')
self.ctx = SSL.Context('sslv23')
if Pyro.config.PYROSSL_KEY:
keyfile = os.path.join(Pyro.config.PYROSSL_CERTDIR, Pyro.config.PYROSSL_KEY)
else:
keyfile = None
self.ctx.load_cert(os.path.join(Pyro.config.PYROSSL_CERTDIR, Pyro.config.PYROSSL_CERT),
keyfile)
self.ctx.load_client_ca(os.path.join(Pyro.config.PYROSSL_CERTDIR, Pyro.config.PYROSSL_CA_CERT))
self.ctx.load_verify_info(os.path.join(Pyro.config.PYROSSL_CERTDIR, Pyro.config.PYROSSL_CA_CERT))
self.ctx.set_verify(SSL.verify_peer | SSL.verify_fail_if_no_peer_cert,10)
self.ctx.set_allow_unknown_ca(1)
Log.msg('PYROSSLAdapter','SSL Context initialized')
def setTimeout(self, timeout):
PYROAdapter.setTimeout(self, timeout)
def bindToURI(self,URI):
if URI.protocol not in ('PYROSSL','PYROLOCSSL'):
Log.error('PYROSSLAdapter','incompatible protocol in URI:',URI.protocol)
raise ProtocolError('incompatible protocol in URI')
try:
self.URI=URI.clone()
sock = SSL.Connection(self.ctx,socket.socket(socket.AF_INET, socket.SOCK_STREAM))
if not Pyro.config.PYROSSL_POSTCONNCHECK:
sock.postConnectionCheck=None
sock.connect((URI.address, URI.port))
conn=TCPConnection(sock, sock.getpeername())
# receive the authentication challenge string, and use that to build the actual identification string.
authChallenge=self.recvAuthChallenge(conn)
# reply with our ident token, generated from the ident passphrase and the challenge
msg = self._sendConnect(sock,self.newConnValidator.createAuthToken(self.ident, authChallenge, conn.addr, self.URI, None) )
if msg==self.acceptMSG:
self.conn=conn
self.conn.connected=1
Log.msg('PYROSSLAdapter','connected to',str(URI))
if URI.protocol=='PYROLOCSSL':
self.resolvePYROLOC_URI("PYROSSL") # updates self.URI
elif msg[:len(self.denyMSG)]==self.denyMSG:
try:
raise ConnectionDeniedError(Pyro.constants.deniedReasons[int(msg[-1])])
except (KeyError,ValueError):
raise ConnectionDeniedError('invalid response')
except socket.error:
Log.msg('PYROSSLAdapter','connection failed to URI',str(URI))
raise ProtocolError('connection failed')
def _sendConnect(self, sock, ident):
return PYROAdapter._sendConnect(self, sock, ident)
def getProtocolAdapter(protocol):
if protocol in ('PYRO', 'PYROLOC'):
return PYROAdapter()
elif protocol in ('PYROSSL', 'PYROLOCSSL'):
return PYROSSLAdapter()
else:
Log.error('getProtocolAdapter','unsupported protocol:',protocol)
raise ProtocolError('unsupported protocol')
#-------- TCPConnection object for TCPServer class
class TCPConnection(object):
def __init__(self, sock, addr):
self.sock = sock
set_sock_keepalive(self.sock) # enable tcp/ip keepalive on this socket
self.addr = addr
self.connected=0 # connected?
self.pflags=0 # protocol flags
def __del__(self):
self.close()
def fileno(self):
return self.sock.fileno()
def close(self):
#self.sock.makefile().flush()
self.sock.close()
self.connected=0
def shutdown(self):
#self.sock.makefile().flush()
self.sock.shutdown(2) # no further send/receives
def __str__(self):
return 'TCPConnection with '+str(self.addr)+' connected='+str(self.connected)
#-------- The New Connection Validators:
#-------- DefaultConnValidator checks max number of connections & identification
#-------- and ident check is done using hmac-md5 secure hash of passphrase+challenge.
#-------- Contains client- & server-side auth code.
class DefaultConnValidator(object):
def __init__(self):
self.setAllowedIdentifications(None) # default=accept all (None means all!)
def acceptHost(self,daemon,connection):
if len(daemon.connections)>=Pyro.config.PYRO_MAXCONNECTIONS:
Log.msg('DefaultConnValidator','Too many open connections, closing',connection,'#conns=',len(daemon.connections))
return (0, Pyro.constants.DENIED_SERVERTOOBUSY)
return (1,0)
def acceptIdentification(self, daemon, connection, token, challenge):
if "all" in self.allowedIDs:
return (1,0)
for authid in self.allowedIDs[:]:
if self.createAuthToken(authid, challenge, connection.addr, None, daemon) == token:
return (1,0)
Log.warn('DefaultConnValidator','connect authentication failed on conn ',connection)
return (0,Pyro.constants.DENIED_SECURITY)
def createAuthToken(self, authid, challenge, peeraddr, URI, daemon):
# Called from both client and server, is used to be able to validate the token.
# client: URI & peeraddr provided, daemon is None
# server: URI is None, peeraddr and daemon provided.
# Return hmac-md5 secure hash of our authentication phrase & the challenge.
return hmac.new(challenge, authid).digest()
def createAuthChallenge(self, tcpserver, conn):
# Server-side only, when new connection comes in.
# Challenge is secure hash of: server IP, process ID, timestamp, random value
# (NOTE: MUST RETURN EXACTLY AUTH_CHALLENGE_SIZE(=16) BYTES!)
try:
pid=os.getpid()
except:
pid=id(self) # at least jython has no getpid()
string = '%s-%d-%.20f-%.20f' %(str(getIPAddress()), pid, time.time(), random.random())
return md5(string).digest()
def mungeIdent(self, ident):
# munge the identification string into something else that's
# not easily guessed or recognised, like the md5 hash:
return md5(ident).digest()
def setAllowedIdentifications(self, ids):
if ids is not None:
if type(ids) in (types.TupleType, types.ListType):
self.allowedIDs=map(self.mungeIdent, ids) # don't store ids themselves
else:
raise TypeError("ids must be a list")
else:
self.allowedIDs=["all"] # trick: allow all incoming authentications.
#-------- basic SSL connection validator, a specialized default validator.
class BasicSSLValidator(DefaultConnValidator):
def __init__(self):
DefaultConnValidator.__init__(self)
def acceptHost(self,daemon,connection):
(ok,code) = DefaultConnValidator.acceptHost(self, daemon, connection)
if ok:
peercert=connection.sock.get_peer_cert()
return self.checkCertificate(peercert)
return (ok,code)
def checkCertificate(self,cert):
# do something interesting with the cert here, in a subclass :)
if cert is None:
return (0,Pyro.constants.DENIED_SECURITY)
return (1,0)
#-------- Helper class for local storage.
class LocalStorage(object):
def __init__(self):
self.caller=None
#-------- TCPServer base class
class TCPServer(object):
def __init__(self, port, host='', threaded=_has_threading,prtcol='PYRO'):
self._ssl_server = 0
self.connections = [] # connection threads
self.initTLS=lambda tls: None # default do-nothing func
if host:
socket.gethostbyname(host) # validate hostname
try:
if prtcol=='PYROSSL':
try:
from M2Crypto import SSL
except ImportError:
raise ProtocolError('SSL not available')
try:
self.ctx = SSL.Context('sslv23')
if Pyro.config.PYROSSL_KEY:
keyfile = os.path.join(Pyro.config.PYROSSL_CERTDIR, Pyro.config.PYROSSL_KEY)
else:
keyfile = None
self.ctx.load_cert(os.path.join(Pyro.config.PYROSSL_CERTDIR, Pyro.config.PYROSSL_CERT),
keyfile)
self.ctx.load_client_ca(os.path.join(Pyro.config.PYROSSL_CERTDIR, Pyro.config.PYROSSL_CA_CERT))
self.ctx.load_verify_info(os.path.join(Pyro.config.PYROSSL_CERTDIR, Pyro.config.PYROSSL_CA_CERT))
self.ctx.set_verify(SSL.verify_peer | SSL.verify_fail_if_no_peer_cert,10)
self.ctx.set_allow_unknown_ca(1)
self._ssl_server = 1
Log.msg('TCPServer','SSL Context initialized')
except:
Log.warn('TCPServer','SSL Context could not be initialized !!!')
self.setNewConnectionValidator(BasicSSLValidator())
else:
self.setNewConnectionValidator(DefaultConnValidator())
# create server socket for new connections
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
set_reuse_addr(self.sock)
self.sock.bind((host,port))
self.sock.listen(Pyro.config.PYRO_TCP_LISTEN_BACKLOG)
if self._ssl_server:
self.sock = SSL.Connection(self.ctx,self.sock) # wrap server socket as SSL socket
# rest of members
self.threaded = threaded
self.mustShutdown=0 # global shutdown
self.localStorage=LocalStorage() # TLS for systems that don't have threads
return
except socket.error,msg:
raise ProtocolError(msg)
Log.msg('TCPServer','initialized')
def __del__(self):
self.closedown(nolog=1)
def setInitTLS(self, initTLS):
if not callable(initTLS):
raise TypeError("initTLS must be callable object")
self.initTLS=initTLS
# if in single thread mode, (re-)init the TLS right away.
if not Pyro.config.PYRO_MULTITHREADED:
self.initTLS(self.localStorage)
def closedown(self, nolog=0):
# explicit closedown request
if len(self.connections)>0:
if not nolog:
Log.warn('TCPServer','Shutting down but there are still',len(self.connections),'active connections')
for c in self.connections[:]:
if isinstance(c,TCPConnection):
c.close()
if isinstance(c,Thread):
c.join()
self.connections=[]
if hasattr(self,'sock'):
self.sock.close()
del self.sock
def setNewConnectionValidator(self,validator):
if not isinstance(validator, DefaultConnValidator):
raise TypeError("validator must be specialization of DefaultConnValidator")
self.newConnValidator=validator
def getNewConnectionValidator(self):
return self.newConnValidator
def connectionHandler(self, conn):
# Handle the connection and all requests that arrive on it.
# This is only called in multithreading mode.
try:
if self.getAdapter().handleConnection(conn, self):
Log.msg('TCPServer','new connection ',conn, ' #conns=',len(self.connections))
while not self.mustShutdown:
try:
if not conn.connected:
# connection has been closed in the meantime!
raise ConnectionClosedError()
self.handleInvocation(conn)
except ConnectionClosedError:
# client went away. Exit immediately
self.removeConnection(conn)
return
except (PyroExceptionCapsule, Exception):
self.handleError(conn)
else:
# log entry has already been written by newConnValidator
self.removeConnection(conn)
finally:
# exiting thread.
self._removeFromConnectionList(None)
def _removeFromConnectionList(self, obj):
if self.threaded and currentThread:
obj=currentThread()
try:
self.connections.remove(obj)
except ValueError:
pass
# this is the preferred way of dealing with the request loop.
def requestLoop(self, condition=lambda:1, timeout=3, others=[], callback=None):
while condition() and not self.mustShutdown:
self.handleRequests(timeout,others,callback)
def handleRequests(self, timeout=None, others=[], callback=None):
if others and not callback:
raise ProtocolError('callback required')
if self.threaded:
self._handleRequest_Threaded(timeout,others,callback)
else:
self._handleRequest_NoThreads(timeout,others,callback)
def _handleRequest_NoThreads(self,timeout,others,callback):
# self.connections is used to keep track of TCPConnections
socklist = self.connections+[self.sock]+others
ins,outs,exs = safe_select(socklist,[],[],timeout)
if self.sock in ins:
# it was the server socket, new incoming connection
ins.remove(self.sock)
if self._ssl_server:
try:
csock, addr = self.sock.accept()
#if not Pyro.config.PYROSSL_POSTCONNCHECK:
# csock.postConnectionCheck=None
except SSL.SSLError,error:
Log.warn('TCPServer','SSL error: '+str(error))
return
else:
csock, addr = self.sock.accept()
conn=TCPConnection(csock,addr)
if self.getAdapter().handleConnection(conn, self):
Log.msg('TCPServer','new connection ',conn, ' #conns=',len(self.connections))
self.connections.append(conn)
else:
# connection denied, log entry has already been written by newConnValidator
self.removeConnection(conn)
for c in ins[0:]:
if isinstance(c,TCPConnection):
ins.remove(c)
try:
self.handleInvocation(c)
if not c.connected:
self.removeConnection(c)
except ConnectionClosedError:
# client went away.
self.removeConnection(c)
except:
self.handleError(c)
if ins and callback:
# the 'others' must have fired...
callback(ins)
# def handleInvocation(self, conn): .... abstract method (implemented in subclass)
def _handleRequest_Threaded(self,timeout,others,callback):
# self.connections is used to keep track of connection Threads
socklist = [self.sock]+others
ins,outs,exs = safe_select(socklist,[],[],timeout)
if self.sock in ins:
# it was the server socket, new incoming connection
if self._ssl_server:
try:
csock, addr = self.sock.accept()
#if not Pyro.config.PYROSSL_POSTCONNCHECK:
# csock.postConnectionCheck=None
except SSL.SSLError,error:
Log.warn('TCPServer','SSL error: '+str(error))
return
else:
csock, addr = self.sock.accept()
conn=TCPConnection(csock,addr)
thread=Thread(target=self.connectionHandler, args=(conn,))
thread.setDaemon(1) # thread must exit at program termination.
thread.localStorage=LocalStorage()
self.initTLS(thread.localStorage)
self.connections.append(thread)
thread.start()
elif callback:
# the 'others' must have fired...
callback(ins)
def getLocalStorage(self):
# return storage object for this thread.
if self.threaded:
return currentThread().localStorage
else:
return self.localStorage
# to be called if a dropped connection is detected:
def removeConnection(self, conn):
conn.close()
self._removeFromConnectionList(conn)
Log.msg('TCPServer','removed connection ',conn,' #conns=',len(self.connections))
# to be called to stop all connections and shut down.
def shutdown(self):
self.mustShutdown=1
def getAdapter(self):
raise NotImplementedError,'must be overridden to return protocol adapter'
def handleError(self,conn,onewaycall=False):
raise NotImplementedError,'must be overridden'
def getServerSockets(self):
if self.threaded:
return [self.sock]
else:
return map(lambda conn: conn.sock, self.connections)+[self.sock]
# Sometimes safe_select() raises an select.error exception with the EINTR
# errno flag set, which basically tells the caller to try again later.
# This safe_select method works around this case and indeed just tries again.
_selectfunction=select.select
if os.name=="java":
from select import cpython_compatible_select as _selectfunction
def safe_select(r,w,e,timeout=None):
while True:
try:
if timeout is not None:
return _selectfunction(r,w,e,timeout)
else:
return _selectfunction(r,w,e)
except select.error,x:
if x.args[0] == errno.EINTR or (hasattr(errno, 'WSAEINTR') and x.args[0] == errno.WSAEINTR):
pass
else:
raise
|
|
import datetime
import unittest2
import webtest
import json
import md5
import api_main
import cron_main
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from consts.account_permissions import AccountPermissions
from consts.auth_type import AuthType
from consts.event_type import EventType
from consts.media_type import MediaType
from consts.playoff_type import PlayoffType
from models.account import Account
from models.api_auth_access import ApiAuthAccess
from models.award import Award
from models.event import Event
from models.event_team import EventTeam
from models.match import Match
from models.media import Media
from models.sitevar import Sitevar
from models.team import Team
class TestApiTrustedController(unittest2.TestCase):
def setUp(self):
self.testapp = webtest.TestApp(api_main.app)
self.cronapp = webtest.TestApp(cron_main.app)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_memcache_stub()
self.testbed.init_user_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.testbed.init_taskqueue_stub(root_path=".")
self.taskqueue_stub = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
self.teams_auth = ApiAuthAccess(id='tEsT_id_0',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_TEAMS])
self.matches_auth = ApiAuthAccess(id='tEsT_id_1',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_MATCHES])
self.rankings_auth = ApiAuthAccess(id='tEsT_id_2',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_RANKINGS])
self.alliances_auth = ApiAuthAccess(id='tEsT_id_3',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_ALLIANCES])
self.awards_auth = ApiAuthAccess(id='tEsT_id_4',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_AWARDS])
self.video_auth = ApiAuthAccess(id='tEsT_id_5',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.MATCH_VIDEO])
self.expired_auth = ApiAuthAccess(id='tEsT_id_6',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_MATCHES],
expiration=datetime.datetime(year=1970, month=1, day=1))
self.owned_auth = ApiAuthAccess(id='tEsT_id_7',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_MATCHES],
owner=ndb.Key(Account, "42"))
self.owned_auth_expired = ApiAuthAccess(id='tEsT_id_8',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_MATCHES],
owner=ndb.Key(Account, "42"),
expiration=datetime.datetime(year=1970, month=1, day=1))
self.event_info_auth = ApiAuthAccess(id='tEsT_id_9',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_INFO])
self.event = Event(
id='2014casj',
event_type_enum=EventType.REGIONAL,
event_short='casj',
year=2014,
)
self.event.put()
def tearDown(self):
self.testbed.deactivate()
def loginUser(self, is_admin=False):
self.testbed.setup_env(
user_email="[email protected]",
user_id="42",
user_is_admin='1' if is_admin else '0',
overwrite=True)
def grantPermission(self, permission):
self.account = Account(id="42", permissions=[permission])
self.account.put()
def test_auth(self):
request_path = '/api/trusted/v1/event/2014casj/matches/update'
request_path_caps_key = '/api/trusted/v1/event/2014CASJ/matches/update'
# Fail
response = self.testapp.post(request_path, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertTrue('Error' in response.json)
# Fail
request_body = json.dumps([])
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 401)
self.assertTrue('Error' in response.json)
self.rankings_auth.put()
self.matches_auth.put()
# Pass
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
# Pass; all caps key
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path_caps_key, request_body)).hexdigest()
response = self.testapp.post(request_path_caps_key, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
# Fail; bad X-TBA-Auth-Id
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'badTestAuthId', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 401)
self.assertTrue('Error' in response.json)
# Fail; bad sig
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': '123abc'}, expect_errors=True)
self.assertEqual(response.status_code, 401)
self.assertTrue('Error' in response.json)
# Fail; bad sig due to wrong body
body2 = json.dumps([{}])
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, body2, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 401)
self.assertTrue('Error' in response.json)
# Fail; bad event
request_path2 = '/api/trusted/v1/event/2014cama/matches/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path2, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 401)
self.assertTrue('Error' in response.json)
# Fail; insufficient auth_types_enum
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_2', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 401)
self.assertTrue('Error' in response.json)
# Fail; expired keys
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_6', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 401)
self.assertTrue('Error' in response.json)
def test_admin_auth(self):
# Ensure that a logged in admin user can access any evet
request_path = '/api/trusted/v1/event/2014casj/matches/update'
request_body = json.dumps([])
self.loginUser(is_admin=True)
response = self.testapp.post(request_path, request_body, expect_errors=True)
self.assertEqual(response.status_code, 200)
def test_user_auth(self):
# Ensure that a logged in user can use auths granted to their account
request_path = '/api/trusted/v1/event/2014casj/matches/update'
request_body = json.dumps([])
self.owned_auth.put()
self.loginUser()
response = self.testapp.post(request_path, request_body, expect_errors=True)
self.assertEqual(response.status_code, 200)
def test_user_expired_auth(self):
# Ensure that a logged in user can use auths granted to their account
request_path = '/api/trusted/v1/event/2014casj/matches/update'
request_body = json.dumps([])
self.owned_auth_expired.put()
self.loginUser()
# Should end up with a 400 error because the expired key didn't count and no explicit
# Auth-Id header was passed
response = self.testapp.post(request_path, request_body, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertTrue('Error' in response.json)
def test_user_permission(self):
self.loginUser()
self.grantPermission(AccountPermissions.OFFSEASON_EVENTWIZARD)
# This should only work for current year offseasons
self.event.year = datetime.datetime.now().year
self.event.event_type_enum = EventType.OFFSEASON
self.event.put()
request_path = '/api/trusted/v1/event/2014casj/matches/update'
request_body = json.dumps([])
response = self.testapp.post(request_path, request_body, expect_errors=True)
self.assertEqual(response.status_code, 200)
def test_user_permission_fail_not_current_year(self):
self.loginUser()
self.grantPermission(AccountPermissions.OFFSEASON_EVENTWIZARD)
# This should only work for current year offseasons
self.event.year = 2012 # Unless this runs in a time machine...
self.event.event_type_enum = EventType.OFFSEASON
self.event.put()
request_path = '/api/trusted/v1/event/2014casj/matches/update'
request_body = json.dumps([])
response = self.testapp.post(request_path, request_body, expect_errors=True)
self.assertEqual(response.status_code, 400)
def test_user_permission_fail_not_offseason_event(self):
self.loginUser()
self.grantPermission(AccountPermissions.OFFSEASON_EVENTWIZARD)
# This should only work for current year offseasons
self.event.year = datetime.datetime.now().year
self.event.event_type_enum = EventType.REGIONAL
self.event.put()
request_path = '/api/trusted/v1/event/2014casj/matches/update'
request_body = json.dumps([])
response = self.testapp.post(request_path, request_body, expect_errors=True)
self.assertEqual(response.status_code, 400)
def test_user_permission_fail_not_granted(self):
self.loginUser()
# This should only work for current year offseasons
self.event.year = datetime.datetime.now().year
self.event.event_type_enum = EventType.OFFSEASON
self.event.put()
request_path = '/api/trusted/v1/event/2014casj/matches/update'
request_body = json.dumps([])
response = self.testapp.post(request_path, request_body, expect_errors=True)
self.assertEqual(response.status_code, 400)
def test_killswitch(self):
request_path = '/api/trusted/v1/event/2014casj/matches/update'
request_body = json.dumps([])
# Pass
self.matches_auth.put()
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
# Now, set the disable sitevar
trusted_sitevar = Sitevar(
id='trustedapi',
values_json=json.dumps({
3: False,
})
)
trusted_sitevar.put()
# Fail
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 401)
def test_alliance_selections_update(self):
self.alliances_auth.put()
alliances = [['frc971', 'frc254', 'frc1662'],
['frc1678', 'frc368', 'frc4171'],
['frc2035', 'frc192', 'frc4990'],
['frc1323', 'frc846', 'frc2135'],
['frc2144', 'frc1388', 'frc668'],
['frc1280', 'frc604', 'frc100'],
['frc114', 'frc852', 'frc841'],
['frc2473', 'frc3256', 'frc1868']]
request_body = json.dumps(alliances)
request_path = '/api/trusted/v1/event/2014casj/alliance_selections/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_3', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(self.event.alliance_selections), 8)
for i, selection in enumerate(self.event.alliance_selections):
self.assertEqual(alliances[i], selection['picks'])
def test_empty_alliance_selections_update(self):
self.alliances_auth.put()
alliances = [['frc971', 'frc254', 'frc1662'],
['frc1678', 'frc368', 'frc4171'],
['frc2035', 'frc192', 'frc4990'],
['frc1323', 'frc846', 'frc2135'],
[], [], [], []]
request_body = json.dumps(alliances)
request_path = '/api/trusted/v1/event/2014casj/alliance_selections/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_3', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(self.event.alliance_selections), 4)
for i, selection in enumerate(self.event.alliance_selections):
self.assertEqual(alliances[i], selection['picks'])
def test_awards_update(self):
self.awards_auth.put()
awards = [{'name_str': 'Winner', 'team_key': 'frc254'},
{'name_str': 'Winner', 'team_key': 'frc604'},
{'name_str': 'Volunteer Blahblah', 'team_key': 'frc1', 'awardee': 'Bob Bobby'}]
request_body = json.dumps(awards)
request_path = '/api/trusted/v1/event/2014casj/awards/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_4', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_awards = Award.query(Award.event == self.event.key).fetch(None)
self.assertEqual(len(db_awards), 2)
self.assertTrue('2014casj_1' in [a.key.id() for a in db_awards])
self.assertTrue('2014casj_5' in [a.key.id() for a in db_awards])
awards = [{'name_str': 'Winner', 'team_key': 'frc254'},
{'name_str': 'Winner', 'team_key': 'frc604'}]
request_body = json.dumps(awards)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_4', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_awards = Award.query(Award.event == self.event.key).fetch(None)
self.assertEqual(len(db_awards), 1)
self.assertTrue('2014casj_1' in [a.key.id() for a in db_awards])
def test_matches_update(self):
self.matches_auth.put()
update_request_path = '/api/trusted/v1/event/2014casj/matches/update'
delete_request_path = '/api/trusted/v1/event/2014casj/matches/delete'
delete_all_request_path = '/api/trusted/v1/event/2014casj/matches/delete_all'
# add one match
matches = [{
'comp_level': 'qm',
'set_number': 1,
'match_number': 1,
'alliances': {
'red': {'teams': ['frc1', 'frc2', 'frc3'],
'score': 25},
'blue': {'teams': ['frc4', 'frc5', 'frc6'],
'score': 26},
},
'time_string': '9:00 AM',
'time_utc': '2014-08-31T16:00:00',
}]
request_body = json.dumps(matches)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', update_request_path, request_body)).hexdigest()
response = self.testapp.post(update_request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_matches = Match.query(Match.event == self.event.key).fetch(None)
self.assertEqual(len(db_matches), 1)
self.assertTrue('2014casj_qm1' in [m.key.id() for m in db_matches])
# add another match
matches = [{
'comp_level': 'f',
'set_number': 1,
'match_number': 1,
'alliances': {
'red': {'teams': ['frc1', 'frc2', 'frc3'],
'score': 250},
'blue': {'teams': ['frc4', 'frc5', 'frc6'],
'score': 260},
},
'time_string': '10:00 AM',
'time_utc': '2014-08-31T17:00:00',
}]
request_body = json.dumps(matches)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', update_request_path, request_body)).hexdigest()
response = self.testapp.post(update_request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_matches = Match.query(Match.event == self.event.key).fetch(None)
self.assertEqual(len(db_matches), 2)
self.assertTrue('2014casj_qm1' in [m.key.id() for m in db_matches])
self.assertTrue('2014casj_f1m1' in [m.key.id() for m in db_matches])
# add a match and delete a match
matches = [{
'comp_level': 'f',
'set_number': 1,
'match_number': 2,
'alliances': {
'red': {'teams': ['frc1', 'frc2', 'frc3'],
'score': 250,
'surrogates': ['frc1'],
'dqs': ['frc2']},
'blue': {'teams': ['frc4', 'frc5', 'frc6'],
'score': 260,
'surrogates': [],
'dqs': []},
},
'score_breakdown': {
'red': {'auto': 20, 'assist': 40, 'truss+catch': 20, 'teleop_goal+foul': 20},
'blue': {'auto': 40, 'assist': 60, 'truss+catch': 10, 'teleop_goal+foul': 40},
},
'time_string': '11:00 AM',
'time_utc': '2014-08-31T18:00:00',
}]
request_body = json.dumps(matches)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', update_request_path, request_body)).hexdigest()
response = self.testapp.post(update_request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
keys_to_delete = ['qm1']
request_body = json.dumps(keys_to_delete)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', delete_request_path, request_body)).hexdigest()
response = self.testapp.post(delete_request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json['keys_deleted'], ['qm1'])
db_matches = Match.query(Match.event == self.event.key).fetch(None)
self.assertEqual(len(db_matches), 2)
self.assertTrue('2014casj_f1m1' in [m.key.id() for m in db_matches])
self.assertTrue('2014casj_f1m2' in [m.key.id() for m in db_matches])
db_matches = Match.query(Match.event == self.event.key).fetch(None)
self.assertEqual(len(db_matches), 2)
self.assertTrue('2014casj_f1m1' in [m.key.id() for m in db_matches])
self.assertTrue('2014casj_f1m2' in [m.key.id() for m in db_matches])
# verify match data
match = Match.get_by_id('2014casj_f1m2')
self.assertEqual(match.time, datetime.datetime(2014, 8, 31, 18, 0))
self.assertEqual(match.time_string, '11:00 AM')
self.assertEqual(match.alliances['red']['teams'], ['frc1', 'frc2', 'frc3'])
self.assertEqual(match.alliances['red']['score'], 250)
self.assertEqual(match.alliances['red']['surrogates'], ['frc1'])
self.assertEqual(match.alliances['red']['dqs'], ['frc1', 'frc2', 'frc3'])
self.assertEqual(match.score_breakdown['red']['truss+catch'], 20)
self.assertEqual(match.alliances['blue']['teams'], ['frc4', 'frc5', 'frc6'])
self.assertEqual(match.alliances['blue']['score'], 260)
self.assertEqual(match.alliances['blue']['surrogates'], [])
self.assertEqual(match.alliances['blue']['dqs'], [])
# test delete all matches
request_body = ''
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', delete_all_request_path, request_body)).hexdigest()
response = self.testapp.post(delete_all_request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 400)
request_body = '2014casj'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', delete_all_request_path, request_body)).hexdigest()
response = self.testapp.post(delete_all_request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_matches = Match.query(Match.event == self.event.key).fetch(None)
self.assertEqual(len(db_matches), 0)
def test_rankings_update(self):
self.rankings_auth.put()
rankings = {
'breakdowns': ['QS', 'Auton', 'Teleop', 'T&C'],
'rankings': [
{'team_key': 'frc254', 'rank': 1, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc971', 'rank': 2, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200}
],
}
request_body = json.dumps(rankings)
request_path = '/api/trusted/v1/event/2014casj/rankings/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_2', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.event.rankings[0], ['Rank', 'Team', 'QS', 'Auton', 'Teleop', 'T&C', 'DQ', 'Played'])
self.assertEqual(self.event.rankings[1], [1, '254', 20, 500, 500, 200, 0, 10])
def test_rankings_wlt_update(self):
self.rankings_auth.put()
rankings = {
'breakdowns': ['QS', 'Auton', 'Teleop', 'T&C', 'wins', 'losses', 'ties'],
'rankings': [
{'team_key': 'frc254', 'rank': 1, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc971', 'rank': 2, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200}
],
}
request_body = json.dumps(rankings)
request_path = '/api/trusted/v1/event/2014casj/rankings/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_2', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.event.rankings[0], ['Rank', 'Team', 'QS', 'Auton', 'Teleop', 'T&C', 'Record (W-L-T)', 'DQ', 'Played'])
self.assertEqual(self.event.rankings[1], [1, '254', 20, 500, 500, 200, '10-0-0', 0, 10])
def test_eventteams_update(self):
self.teams_auth.put()
team_list = ['frc254', 'frc971', 'frc604']
request_body = json.dumps(team_list)
# Insert teams into db, otherwise they won't get added (see 072058b)
Team(id='frc254', team_number=254).put()
Team(id='frc971', team_number=971).put()
Team(id='frc604', team_number=604).put()
Team(id='frc100', team_number=100).put()
request_path = '/api/trusted/v1/event/2014casj/team_list/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_0', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_eventteams = EventTeam.query(EventTeam.event == self.event.key).fetch(None)
self.assertEqual(len(db_eventteams), 3)
self.assertTrue('2014casj_frc254' in [et.key.id() for et in db_eventteams])
self.assertTrue('2014casj_frc971' in [et.key.id() for et in db_eventteams])
self.assertTrue('2014casj_frc604' in [et.key.id() for et in db_eventteams])
team_list = ['frc254', 'frc100']
request_body = json.dumps(team_list)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_0', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_eventteams = EventTeam.query(EventTeam.event == self.event.key).fetch(None)
self.assertEqual(len(db_eventteams), 2)
self.assertTrue('2014casj_frc254' in [et.key.id() for et in db_eventteams])
self.assertTrue('2014casj_frc100' in [et.key.id() for et in db_eventteams])
def test_eventteams_unknown(self):
self.teams_auth.put()
team_list = ['frc254', 'frc971', 'frc604']
request_body = json.dumps(team_list)
# Insert teams into db, otherwise they won't get added (see 072058b)
Team(id='frc254', team_number=254).put()
Team(id='frc971', team_number=971).put()
request_path = '/api/trusted/v1/event/2014casj/team_list/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_0', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_eventteams = EventTeam.query(EventTeam.event == self.event.key).fetch(None)
self.assertEqual(len(db_eventteams), 2)
self.assertTrue('2014casj_frc254' in [et.key.id() for et in db_eventteams])
self.assertTrue('2014casj_frc971' in [et.key.id() for et in db_eventteams])
self.assertTrue('2014casj_frc604' not in [et.key.id() for et in db_eventteams])
team_list = ['frc254', 'frc100']
request_body = json.dumps(team_list)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_0', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_eventteams = EventTeam.query(EventTeam.event == self.event.key).fetch(None)
self.assertEqual(len(db_eventteams), 1)
self.assertTrue('2014casj_frc254' in [et.key.id() for et in db_eventteams])
self.assertTrue('2014casj_frc100' not in [et.key.id() for et in db_eventteams])
def test_match_videos_add(self):
self.video_auth.put()
match1 = Match(
id="2014casj_qm1",
alliances_json="""{"blue": {"score": -1, "teams": ["frc3464", "frc20", "frc1073"]}, "red": {"score": -1, "teams": ["frc69", "frc571", "frc176"]}}""",
comp_level="qm",
event=ndb.Key(Event, '2014casj'),
year=2014,
set_number=1,
match_number=1,
team_key_names=[u'frc69', u'frc571', u'frc176', u'frc3464', u'frc20', u'frc1073'],
youtube_videos=["abcdef"]
)
match1.put()
match2 = Match(
id="2014casj_sf1m1",
alliances_json="""{"blue": {"score": -1, "teams": ["frc3464", "frc20", "frc1073"]}, "red": {"score": -1, "teams": ["frc69", "frc571", "frc176"]}}""",
comp_level="sf",
event=ndb.Key(Event, '2014casj'),
year=2014,
set_number=1,
match_number=1,
team_key_names=[u'frc69', u'frc571', u'frc176', u'frc3464', u'frc20', u'frc1073'],
)
match2.put()
match_videos = {'qm1': 'aFZy8iibMD0', 'sf1m1': 'RpSgUrsghv4'}
request_body = json.dumps(match_videos)
request_path = '/api/trusted/v1/event/2014casj/match_videos/add'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_5', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(set(Match.get_by_id('2014casj_qm1').youtube_videos), {'abcdef', 'aFZy8iibMD0'})
self.assertEqual(set(Match.get_by_id('2014casj_sf1m1').youtube_videos), {'RpSgUrsghv4'})
def test_event_media_add(self):
self.video_auth.put()
event = Event(
id='2014casj',
event_type_enum=EventType.REGIONAL,
event_short='casj',
year=2014,
)
event.put()
videos = ['aFZy8iibMD0']
request_body = json.dumps(videos)
request_path = '/api/trusted/v1/event/2014casj/media/add'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_5', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
media_key = Media.render_key_name(MediaType.YOUTUBE_VIDEO, 'aFZy8iibMD0')
media = Media.get_by_id(media_key)
self.assertIsNotNone(media)
self.assertEqual(media.media_type_enum, MediaType.YOUTUBE_VIDEO)
self.assertEqual(media.foreign_key, 'aFZy8iibMD0')
self.assertIn(ndb.Key(Event, '2014casj'), media.references)
def test_update_event_info(self):
self.event_info_auth.put()
request = {
'first_event_code': 'abc123',
'playoff_type': PlayoffType.ROUND_ROBIN_6_TEAM,
'webcasts': [{'url': 'https://youtu.be/abc123'},
{'type': 'youtube', 'channel': 'cde456'}],
'remap_teams': {
'frc9323': 'frc1323B',
'frc9254': 'frc254B',
'frc8254': 'frc254C',
'frc9000': 'frc6000',
},
'someother': 'randomstuff', # This should be ignored
}
request_body = json.dumps(request)
request_path = '/api/trusted/v1/event/2014casj/info/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_9', 'X-TBA-Auth-Sig': sig})
self.assertEqual(response.status_code, 200)
event = Event.get_by_id('2014casj')
self.assertEqual(event.first_code, 'abc123')
self.assertEqual(event.official, True)
self.assertEqual(event.playoff_type, PlayoffType.ROUND_ROBIN_6_TEAM)
webcasts = event.webcast
self.assertEqual(len(webcasts), 2)
webcast = webcasts[0]
self.assertEqual(webcast['type'], 'youtube')
self.assertEqual(webcast['channel'], 'abc123')
webcast = webcasts[1]
self.assertEqual(webcast['type'], 'youtube')
self.assertEqual(webcast['channel'], 'cde456')
self.assertEqual(event.remap_teams, {
'frc9323': 'frc1323B',
'frc9254': 'frc254B',
'frc8254': 'frc254C',
'frc9000': 'frc6000',
})
# Test invalid remap_teams
request = {
'remap_teams': {
'frc9323': 'frc1323b', # lower case
}
}
request_body = json.dumps(request)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_9', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertEqual(event.remap_teams, {
'frc9323': 'frc1323B',
'frc9254': 'frc254B',
'frc8254': 'frc254C',
'frc9000': 'frc6000',
})
request = {
'remap_teams': {
'frc9323': 'frc1323A', # "A" team
}
}
request_body = json.dumps(request)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_9', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertEqual(event.remap_teams, {
'frc9323': 'frc1323B',
'frc9254': 'frc254B',
'frc8254': 'frc254C',
'frc9000': 'frc6000',
})
request = {
'remap_teams': {
'frc9323': 'frc1323BB', # Two letters
}
}
request_body = json.dumps(request)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_9', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertEqual(event.remap_teams, {
'frc9323': 'frc1323B',
'frc9254': 'frc254B',
'frc8254': 'frc254C',
'frc9000': 'frc6000',
})
request = {
'remap_teams': {
'frc1323B': 'frc1323', # Mapping from B team
}
}
request_body = json.dumps(request)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_9', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertEqual(event.remap_teams, {
'frc9323': 'frc1323B',
'frc9254': 'frc254B',
'frc8254': 'frc254C',
'frc9000': 'frc6000',
})
request = {
'remap_teams': {
'1323': 'frc1323B', # Bad starting format
}
}
request_body = json.dumps(request)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_9', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertEqual(event.remap_teams, {
'frc9323': 'frc1323B',
'frc9254': 'frc254B',
'frc8254': 'frc254C',
'frc9000': 'frc6000',
})
request = {
'remap_teams': {
'frc1323': '1323B', # Bad ending format
}
}
request_body = json.dumps(request)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_9', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertEqual(event.remap_teams, {
'frc9323': 'frc1323B',
'frc9254': 'frc254B',
'frc8254': 'frc254C',
'frc9000': 'frc6000',
})
def test_remapping(self):
self.event_info_auth.put()
self.matches_auth.put()
self.rankings_auth.put()
self.alliances_auth.put()
self.awards_auth.put()
request = {
'remap_teams': {
'frc1': 'frc101B',
'frc2': 'frc102B',
'frc3': 'frc102C',
'frc4': 'frc104'
},
}
request_body = json.dumps(request)
request_path = '/api/trusted/v1/event/2014casj/info/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_9', 'X-TBA-Auth-Sig': sig})
self.assertEqual(response.status_code, 200)
# Test remapped matches
matches = [{
'comp_level': 'f',
'set_number': 1,
'match_number': 2,
'alliances': {
'red': {'teams': ['frc1', 'frc2', 'frc3'],
'score': 250,
'surrogates': ['frc1'],
'dqs': ['frc1', 'frc2', 'frc3']},
'blue': {'teams': ['frc4', 'frc5', 'frc6'],
'score': 260,
'surrogates': [],
'dqs': []},
},
'score_breakdown': {
'red': {'auto': 20, 'assist': 40, 'truss+catch': 20, 'teleop_goal+foul': 20},
'blue': {'auto': 40, 'assist': 60, 'truss+catch': 10, 'teleop_goal+foul': 40},
},
'time_string': '11:00 AM',
'time_utc': '2014-08-31T18:00:00',
}]
request_body = json.dumps(matches)
request_path = '/api/trusted/v1/event/2014casj/matches/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
# verify remapped match data
match = Match.get_by_id('2014casj_f1m2')
self.assertEqual(match.time, datetime.datetime(2014, 8, 31, 18, 0))
self.assertEqual(match.time_string, '11:00 AM')
self.assertEqual(match.alliances['red']['teams'], ['frc101B', 'frc102B', 'frc102C'])
self.assertEqual(match.alliances['red']['score'], 250)
self.assertEqual(match.alliances['red']['surrogates'], ['frc101B'])
self.assertEqual(match.alliances['red']['dqs'], ['frc101B', 'frc102B', 'frc102C'])
self.assertEqual(match.score_breakdown['red']['truss+catch'], 20)
self.assertEqual(match.alliances['blue']['teams'], ['frc104', 'frc5', 'frc6'])
self.assertEqual(match.alliances['blue']['score'], 260)
self.assertEqual(match.alliances['blue']['surrogates'], [])
self.assertEqual(match.alliances['blue']['dqs'], [])
self.assertEqual(set(match.team_key_names), set(['frc101B', 'frc102B', 'frc102C', 'frc104', 'frc5', 'frc6']))
# Test remapped alliances
alliances = [['frc1', 'frc2', 'frc3'],
['frc4', 'frc5', 'frc6'],
['frc7', 'frc8', 'frc9']]
request_body = json.dumps(alliances)
request_path = '/api/trusted/v1/event/2014casj/alliance_selections/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_3', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
# Test remapped rankings
rankings = {
'breakdowns': ['QS', 'Auton', 'Teleop', 'T&C', 'wins', 'losses', 'ties'],
'rankings': [
{'team_key': 'frc1', 'rank': 1, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc2', 'rank': 2, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc3', 'rank': 3, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc4', 'rank': 4, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc5', 'rank': 5, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc6', 'rank': 6, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
],
}
request_body = json.dumps(rankings)
request_path = '/api/trusted/v1/event/2014casj/rankings/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_2', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
# verify remapped alliances
self.assertEqual(len(self.event.alliance_selections), 3)
self.assertEqual(self.event.alliance_selections[0]['picks'], ['frc101B', 'frc102B', 'frc102C'])
self.assertEqual(self.event.alliance_selections[1]['picks'], ['frc104', 'frc5', 'frc6'])
self.assertEqual(self.event.alliance_selections[2]['picks'], ['frc7', 'frc8', 'frc9'])
# verify remapped rankings
self.assertEqual(self.event.rankings[0], ['Rank', 'Team', 'QS', 'Auton', 'Teleop', 'T&C', 'Record (W-L-T)', 'DQ', 'Played'])
self.assertEqual(self.event.rankings[1], [1, '101B', 20, 500, 500, 200, '10-0-0', 0, 10])
self.assertEqual(self.event.rankings[2], [2, '102B', 20, 500, 500, 200, '10-0-0', 0, 10])
self.assertEqual(self.event.rankings[3], [3, '102C', 20, 500, 500, 200, '10-0-0', 0, 10])
self.assertEqual(self.event.rankings[4], [4, '104', 20, 500, 500, 200, '10-0-0', 0, 10])
self.assertEqual(self.event.rankings[5], [5, '5', 20, 500, 500, 200, '10-0-0', 0, 10])
self.assertEqual(self.event.rankings[6], [6, '6', 20, 500, 500, 200, '10-0-0', 0, 10])
# Test remapped awards
awards = [{'name_str': 'Winner', 'team_key': 'frc1'},
{'name_str': 'Winner', 'team_key': 'frc2'},
{'name_str': 'Winner', 'team_key': 'frc3'},
{'name_str': 'Volunteer Blahblah', 'team_key': 'frc4', 'awardee': 'Bob Bobby'},
{'name_str': 'Chairman\'s Blahblah', 'team_key': 'frc5'},
{'name_str': 'Finalist', 'team_key': 'frc6'}]
request_body = json.dumps(awards)
request_path = '/api/trusted/v1/event/2014casj/awards/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_4', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
for team in Award.get_by_id('2014casj_1').recipient_dict.keys():
self.assertTrue(str(team) in {'101B', '102B', '102C'})
for team in Award.get_by_id('2014casj_5').recipient_dict.keys():
self.assertTrue(str(team) in {'104'})
for team in Award.get_by_id('2014casj_0').recipient_dict.keys():
self.assertTrue(str(team) in {'5'})
for team in Award.get_by_id('2014casj_2').recipient_dict.keys():
self.assertTrue(str(team) in {'6'})
def test_remapping_after(self):
self.event_info_auth.put()
self.matches_auth.put()
self.rankings_auth.put()
self.alliances_auth.put()
self.awards_auth.put()
# Test remapped matches
matches = [{
'comp_level': 'f',
'set_number': 1,
'match_number': 2,
'alliances': {
'red': {'teams': ['frc1', 'frc2', 'frc3'],
'score': 250,
'surrogates': ['frc1'],
'dqs': ['frc1', 'frc2', 'frc3']},
'blue': {'teams': ['frc4', 'frc5', 'frc6'],
'score': 260,
'surrogates': [],
'dqs': []},
},
'score_breakdown': {
'red': {'auto': 20, 'assist': 40, 'truss+catch': 20, 'teleop_goal+foul': 20},
'blue': {'auto': 40, 'assist': 60, 'truss+catch': 10, 'teleop_goal+foul': 40},
},
'time_string': '11:00 AM',
'time_utc': '2014-08-31T18:00:00',
}]
request_body = json.dumps(matches)
request_path = '/api/trusted/v1/event/2014casj/matches/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
# Test remapped alliances
alliances = [['frc1', 'frc2', 'frc3'],
['frc4', 'frc5', 'frc6'],
['frc7', 'frc8', 'frc9']]
request_body = json.dumps(alliances)
request_path = '/api/trusted/v1/event/2014casj/alliance_selections/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_3', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
# Test remapped rankings
rankings = {
'breakdowns': ['QS', 'Auton', 'Teleop', 'T&C', 'wins', 'losses', 'ties'],
'rankings': [
{'team_key': 'frc1', 'rank': 1, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc2', 'rank': 2, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc3', 'rank': 3, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc4', 'rank': 4, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc5', 'rank': 5, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc6', 'rank': 6, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
],
}
request_body = json.dumps(rankings)
request_path = '/api/trusted/v1/event/2014casj/rankings/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_2', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
# Test remapped awards
awards = [{'name_str': 'Winner', 'team_key': 'frc1'},
{'name_str': 'Winner', 'team_key': 'frc2'},
{'name_str': 'Winner', 'team_key': 'frc3'},
{'name_str': 'Volunteer Blahblah', 'team_key': 'frc4', 'awardee': 'Bob Bobby'},
{'name_str': 'Chairman\'s Blahblah', 'team_key': 'frc5'},
{'name_str': 'Finalist', 'team_key': 'frc6'}]
request_body = json.dumps(awards)
request_path = '/api/trusted/v1/event/2014casj/awards/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_4', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
# Set remapping
request = {
'remap_teams': {
'frc1': 'frc101B',
'frc2': 'frc102B',
'frc3': 'frc102C',
'frc4': 'frc104'
},
}
request_body = json.dumps(request)
request_path = '/api/trusted/v1/event/2014casj/info/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_9', 'X-TBA-Auth-Sig': sig})
self.assertEqual(response.status_code, 200)
# Run tasks
tasks = self.taskqueue_stub.GetTasks('admin')
for task in tasks:
self.cronapp.get(task["url"])
# verify remapped match data
match = Match.get_by_id('2014casj_f1m2')
self.assertEqual(match.time, datetime.datetime(2014, 8, 31, 18, 0))
self.assertEqual(match.time_string, '11:00 AM')
self.assertEqual(match.alliances['red']['teams'], ['frc101B', 'frc102B', 'frc102C'])
self.assertEqual(match.alliances['red']['score'], 250)
self.assertEqual(match.alliances['red']['surrogates'], ['frc101B'])
self.assertEqual(match.alliances['red']['dqs'], ['frc101B', 'frc102B', 'frc102C'])
self.assertEqual(match.score_breakdown['red']['truss+catch'], 20)
self.assertEqual(match.alliances['blue']['teams'], ['frc104', 'frc5', 'frc6'])
self.assertEqual(match.alliances['blue']['score'], 260)
self.assertEqual(match.alliances['blue']['surrogates'], [])
self.assertEqual(match.alliances['blue']['dqs'], [])
self.assertEqual(set(match.team_key_names), set(['frc101B', 'frc102B', 'frc102C', 'frc104', 'frc5', 'frc6']))
# verify remapped alliances
self.assertEqual(len(self.event.alliance_selections), 3)
self.assertEqual(self.event.alliance_selections[0]['picks'], ['frc101B', 'frc102B', 'frc102C'])
self.assertEqual(self.event.alliance_selections[1]['picks'], ['frc104', 'frc5', 'frc6'])
self.assertEqual(self.event.alliance_selections[2]['picks'], ['frc7', 'frc8', 'frc9'])
# verify remapped rankings
self.assertEqual(self.event.rankings[0], ['Rank', 'Team', 'QS', 'Auton', 'Teleop', 'T&C', 'Record (W-L-T)', 'DQ', 'Played'])
self.assertEqual(self.event.rankings[1], [1, '101B', 20, 500, 500, 200, '10-0-0', 0, 10])
self.assertEqual(self.event.rankings[2], [2, '102B', 20, 500, 500, 200, '10-0-0', 0, 10])
self.assertEqual(self.event.rankings[3], [3, '102C', 20, 500, 500, 200, '10-0-0', 0, 10])
self.assertEqual(self.event.rankings[4], [4, '104', 20, 500, 500, 200, '10-0-0', 0, 10])
self.assertEqual(self.event.rankings[5], [5, '5', 20, 500, 500, 200, '10-0-0', 0, 10])
self.assertEqual(self.event.rankings[6], [6, '6', 20, 500, 500, 200, '10-0-0', 0, 10])
# verify remapped awards
for team in Award.get_by_id('2014casj_1').recipient_dict.keys():
self.assertTrue(str(team) in {'101B', '102B', '102C'})
for team in Award.get_by_id('2014casj_5').recipient_dict.keys():
self.assertTrue(str(team) in {'104'})
for team in Award.get_by_id('2014casj_0').recipient_dict.keys():
self.assertTrue(str(team) in {'5'})
for team in Award.get_by_id('2014casj_2').recipient_dict.keys():
self.assertTrue(str(team) in {'6'})
|
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GNMT attention sequence-to-sequence model with dynamic RNN support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import attention_model
import model_helper
from utils import misc_utils as utils
__all__ = ["GNMTModel"]
class GNMTModel(attention_model.AttentionModel):
"""Sequence-to-sequence dynamic model with GNMT attention architecture.
"""
def __init__(self,
hparams,
mode,
features,
scope=None,
extra_args=None):
self.is_gnmt_attention = (
hparams.attention_architecture in ["gnmt", "gnmt_v2"])
super(GNMTModel, self).__init__(
hparams=hparams,
mode=mode,
features=features,
scope=scope,
extra_args=extra_args)
def _build_encoder(self, hparams):
"""Build a GNMT encoder."""
if hparams.encoder_type == "uni" or hparams.encoder_type == "bi":
return super(GNMTModel, self)._build_encoder(hparams)
if hparams.encoder_type != "gnmt":
raise ValueError("Unknown encoder_type %s" % hparams.encoder_type)
# Build GNMT encoder.
num_bi_layers = 1
num_uni_layers = self.num_encoder_layers - num_bi_layers
utils.print_out("# Build a GNMT encoder")
utils.print_out(" num_bi_layers = %d" % num_bi_layers)
utils.print_out(" num_uni_layers = %d" % num_uni_layers)
source = self.features["source"]
if self.time_major:
source = tf.transpose(source)
with tf.variable_scope("encoder"):
self.encoder_emb_inp = tf.cast(
self.encoder_emb_lookup_fn(self.embedding_encoder, source),
self.dtype)
# Execute _build_bidirectional_rnn from Model class
bi_encoder_outputs, bi_encoder_state = self._build_bidirectional_rnn(
inputs=self.encoder_emb_inp,
sequence_length=self.features["source_sequence_length"],
dtype=self.dtype,
hparams=hparams,
num_bi_layers=num_bi_layers,
num_bi_residual_layers=0, # no residual connection
)
# Build unidirectional layers
encoder_state, encoder_outputs = self._build_all_encoder_layers(
bi_encoder_outputs, num_uni_layers, self.dtype, hparams)
# Pass all encoder states to the decoder
# except the first bi-directional layer
encoder_state = (bi_encoder_state[1],) + (
(encoder_state,) if num_uni_layers == 1 else encoder_state)
return encoder_outputs, encoder_state
def _build_all_encoder_layers(self, bi_encoder_outputs,
num_uni_layers, dtype, hparams):
"""Build encoder layers all at once."""
uni_cell = model_helper.create_rnn_cell(
unit_type=hparams.unit_type,
num_units=hparams.num_units,
num_layers=num_uni_layers,
num_residual_layers=self.num_encoder_residual_layers,
forget_bias=hparams.forget_bias,
dropout=hparams.dropout,
mode=self.mode,
single_cell_fn=self.single_cell_fn,
global_step=self.global_step)
if hparams.use_dynamic_rnn:
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(
uni_cell,
bi_encoder_outputs,
dtype=dtype,
sequence_length=self.features["source_sequence_length"],
time_major=self.time_major)
else:
encoder_outputs, encoder_state = tf.contrib.recurrent.functional_rnn(
uni_cell,
bi_encoder_outputs,
dtype=dtype,
sequence_length=self.features["source_sequence_length"],
time_major=self.time_major,
use_tpu=hparams.use_tpu)
# Use the top layer for now
self.encoder_state_list = [encoder_outputs]
return encoder_state, encoder_outputs
def _build_individual_encoder_layers(self, bi_encoder_outputs,
num_uni_layers, dtype, hparams):
"""Run each of the encoder layer separately, not used in general seq2seq."""
uni_cell_lists = model_helper._cell_list(
unit_type=hparams.unit_type,
num_units=hparams.num_units,
num_layers=num_uni_layers,
num_residual_layers=self.num_encoder_residual_layers,
forget_bias=hparams.forget_bias,
dropout=hparams.dropout,
mode=self.mode,
single_cell_fn=self.single_cell_fn,
global_step=self.global_step)
encoder_inp = bi_encoder_outputs
encoder_states = []
self.encoder_state_list = [bi_encoder_outputs[:, :, :hparams.num_units],
bi_encoder_outputs[:, :, hparams.num_units:]]
with tf.variable_scope("rnn/multi_rnn_cell"):
for i, cell in enumerate(uni_cell_lists):
with tf.variable_scope("cell_%d" % i) as scope:
if hparams.use_dynamic_rnn:
encoder_inp, encoder_state = tf.nn.dynamic_rnn(
cell,
encoder_inp,
dtype=dtype,
sequence_length=self.features["source_sequence_length"],
time_major=self.time_major,
scope=scope)
else:
encoder_inp, encoder_state = tf.contrib.recurrent.functional_rnn(
cell,
encoder_inp,
dtype=dtype,
sequence_length=self.features["source_sequence_length"],
time_major=self.time_major,
scope=scope,
use_tpu=hparams.use_tpu)
encoder_states.append(encoder_state)
self.encoder_state_list.append(encoder_inp)
encoder_state = tuple(encoder_states)
encoder_outputs = self.encoder_state_list[-1]
return encoder_state, encoder_outputs
def _build_decoder_cell(self, hparams, encoder_outputs, encoder_state,
source_sequence_length):
"""Build a RNN cell with GNMT attention architecture."""
# Standard attention
if not self.is_gnmt_attention:
return super(GNMTModel, self)._build_decoder_cell(
hparams, encoder_outputs, encoder_state, source_sequence_length)
# GNMT attention
attention_option = hparams.attention
attention_architecture = hparams.attention_architecture
num_units = hparams.num_units
infer_mode = hparams.infer_mode
dtype = self.dtype
if self.time_major:
memory = tf.transpose(encoder_outputs, [1, 0, 2])
else:
memory = encoder_outputs
if (self.mode == tf.contrib.learn.ModeKeys.INFER and
infer_mode == "beam_search"):
memory, source_sequence_length, encoder_state, batch_size = (
self._prepare_beam_search_decoder_inputs(
hparams.beam_width, memory, source_sequence_length,
encoder_state))
else:
batch_size = self.batch_size
attention_mechanism = self.attention_mechanism_fn(
attention_option, num_units, memory, source_sequence_length, self.mode)
cell_list = model_helper._cell_list( # pylint: disable=protected-access
unit_type=hparams.unit_type,
num_units=num_units,
num_layers=self.num_decoder_layers,
num_residual_layers=self.num_decoder_residual_layers,
forget_bias=hparams.forget_bias,
dropout=hparams.dropout,
mode=self.mode,
single_cell_fn=self.single_cell_fn,
residual_fn=gnmt_residual_fn,
global_step=self.global_step)
# Only wrap the bottom layer with the attention mechanism.
attention_cell = cell_list.pop(0)
# Only generate alignment in greedy INFER mode.
alignment_history = (self.mode == tf.contrib.learn.ModeKeys.INFER and
infer_mode != "beam_search")
attention_cell = tf.contrib.seq2seq.AttentionWrapper(
attention_cell,
attention_mechanism,
attention_layer_size=None, # don't use attention layer.
output_attention=False,
alignment_history=alignment_history,
name="attention")
if attention_architecture == "gnmt":
cell = GNMTAttentionMultiCell(
attention_cell, cell_list)
elif attention_architecture == "gnmt_v2":
cell = GNMTAttentionMultiCell(
attention_cell, cell_list, use_new_attention=True)
else:
raise ValueError(
"Unknown attention_architecture %s" % attention_architecture)
if hparams.pass_hidden_state:
decoder_initial_state = tuple(
zs.clone(cell_state=es)
if isinstance(zs, tf.contrib.seq2seq.AttentionWrapperState) else es
for zs, es in zip(
cell.zero_state(batch_size, dtype), encoder_state))
else:
decoder_initial_state = cell.zero_state(batch_size, dtype)
return cell, decoder_initial_state
class GNMTAttentionMultiCell(tf.nn.rnn_cell.MultiRNNCell):
"""A MultiCell with GNMT attention style."""
def __init__(self, attention_cell, cells, use_new_attention=False):
"""Creates a GNMTAttentionMultiCell.
Args:
attention_cell: An instance of AttentionWrapper.
cells: A list of RNNCell wrapped with AttentionInputWrapper.
use_new_attention: Whether to use the attention generated from current
step bottom layer's output. Default is False.
"""
cells = [attention_cell] + cells
self.use_new_attention = use_new_attention
super(GNMTAttentionMultiCell, self).__init__(cells, state_is_tuple=True)
def __call__(self, inputs, state, scope=None):
"""Run the cell with bottom layer's attention copied to all upper layers."""
if not tf.contrib.framework.nest.is_sequence(state):
raise ValueError(
"Expected state to be a tuple of length %d, but received: %s"
% (len(self.state_size), state))
with tf.variable_scope(scope or "multi_rnn_cell"):
new_states = []
with tf.variable_scope("cell_0_attention"):
attention_cell = self._cells[0]
attention_state = state[0]
cur_inp, new_attention_state = attention_cell(inputs, attention_state)
new_states.append(new_attention_state)
for i in range(1, len(self._cells)):
with tf.variable_scope("cell_%d" % i):
cell = self._cells[i]
cur_state = state[i]
if self.use_new_attention:
cur_inp = tf.concat([cur_inp, new_attention_state.attention], -1)
else:
cur_inp = tf.concat([cur_inp, attention_state.attention], -1)
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
return cur_inp, tuple(new_states)
def gnmt_residual_fn(inputs, outputs):
"""Residual function that handles different inputs and outputs inner dims.
Args:
inputs: cell inputs, this is actual inputs concatenated with the attention
vector.
outputs: cell outputs
Returns:
outputs + actual inputs
"""
def split_input(inp, out):
out_dim = out.get_shape().as_list()[-1]
inp_dim = inp.get_shape().as_list()[-1]
return tf.split(inp, [out_dim, inp_dim - out_dim], axis=-1)
actual_inputs, _ = tf.contrib.framework.nest.map_structure(
split_input, inputs, outputs)
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
tf.contrib.framework.nest.assert_same_structure(actual_inputs, outputs)
tf.contrib.framework.nest.map_structure(
assert_shape_match, actual_inputs, outputs)
return tf.contrib.framework.nest.map_structure(
lambda inp, out: inp + out, actual_inputs, outputs)
|
|
#!/usr/bin/env python
"""
Copyright (c) 2019 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import axis_ep
import eth_ep
import xgmii_ep
import baser_serdes_ep
module = 'eth_mac_phy_10g'
testbench = 'test_%s' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("../rtl/axis_baser_tx_64.v")
srcs.append("../rtl/axis_baser_rx_64.v")
srcs.append("../rtl/eth_mac_phy_10g_rx.v")
srcs.append("../rtl/eth_mac_phy_10g_tx.v")
srcs.append("../rtl/eth_phy_10g_rx_if.v")
srcs.append("../rtl/eth_phy_10g_tx_if.v")
srcs.append("../rtl/eth_phy_10g_rx_ber_mon.v")
srcs.append("../rtl/eth_phy_10g_rx_frame_sync.v")
srcs.append("../rtl/lfsr.v")
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
DATA_WIDTH = 64
KEEP_WIDTH = (DATA_WIDTH/8)
HDR_WIDTH = (DATA_WIDTH/32)
ENABLE_PADDING = 1
ENABLE_DIC = 1
MIN_FRAME_LENGTH = 64
PTP_PERIOD_NS = 0x6
PTP_PERIOD_FNS = 0x6666
TX_PTP_TS_ENABLE = 0
TX_PTP_TS_WIDTH = 96
TX_PTP_TAG_ENABLE = TX_PTP_TS_ENABLE
TX_PTP_TAG_WIDTH = 16
RX_PTP_TS_ENABLE = 0
RX_PTP_TS_WIDTH = 96
TX_USER_WIDTH = (TX_PTP_TAG_WIDTH if TX_PTP_TAG_ENABLE else 0) + 1
RX_USER_WIDTH = (RX_PTP_TS_WIDTH if RX_PTP_TS_ENABLE else 0) + 1
BIT_REVERSE = 0
SCRAMBLER_DISABLE = 0
PRBS31_ENABLE = 1
TX_SERDES_PIPELINE = 2
RX_SERDES_PIPELINE = 2
BITSLIP_HIGH_CYCLES = 1
BITSLIP_LOW_CYCLES = 8
COUNT_125US = 125000/6.4
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
rx_clk = Signal(bool(0))
rx_rst = Signal(bool(0))
tx_clk = Signal(bool(0))
tx_rst = Signal(bool(0))
tx_axis_tdata = Signal(intbv(0)[DATA_WIDTH:])
tx_axis_tkeep = Signal(intbv(0)[KEEP_WIDTH:])
tx_axis_tvalid = Signal(bool(0))
tx_axis_tlast = Signal(bool(0))
tx_axis_tuser = Signal(intbv(0)[TX_USER_WIDTH:])
serdes_rx_data = Signal(intbv(0)[DATA_WIDTH:])
serdes_rx_hdr = Signal(intbv(1)[HDR_WIDTH:])
tx_ptp_ts = Signal(intbv(0)[TX_PTP_TS_WIDTH:])
rx_ptp_ts = Signal(intbv(0)[RX_PTP_TS_WIDTH:])
ifg_delay = Signal(intbv(0)[8:])
tx_prbs31_enable = Signal(bool(0))
rx_prbs31_enable = Signal(bool(0))
serdes_rx_data_int = Signal(intbv(0)[DATA_WIDTH:])
serdes_rx_hdr_int = Signal(intbv(1)[HDR_WIDTH:])
# Outputs
tx_axis_tready = Signal(bool(0))
rx_axis_tdata = Signal(intbv(0)[DATA_WIDTH:])
rx_axis_tkeep = Signal(intbv(0)[KEEP_WIDTH:])
rx_axis_tvalid = Signal(bool(0))
rx_axis_tlast = Signal(bool(0))
rx_axis_tuser = Signal(intbv(0)[RX_USER_WIDTH:])
serdes_tx_data = Signal(intbv(0)[DATA_WIDTH:])
serdes_tx_hdr = Signal(intbv(1)[HDR_WIDTH:])
serdes_rx_bitslip = Signal(bool(0))
tx_axis_ptp_ts = Signal(intbv(0)[TX_PTP_TS_WIDTH:])
tx_axis_ptp_ts_tag = Signal(intbv(0)[TX_PTP_TAG_WIDTH:])
tx_axis_ptp_ts_valid = Signal(bool(0))
tx_start_packet = Signal(intbv(0)[2:])
tx_error_underflow = Signal(bool(0))
rx_start_packet = Signal(intbv(0)[2:])
rx_error_count = Signal(intbv(0)[7:])
rx_error_bad_frame = Signal(bool(0))
rx_error_bad_fcs = Signal(bool(0))
rx_bad_block = Signal(bool(0))
rx_block_lock = Signal(bool(0))
rx_high_ber = Signal(bool(0))
# sources and sinks
axis_source_pause = Signal(bool(0))
serdes_source = baser_serdes_ep.BaseRSerdesSource()
serdes_source_logic = serdes_source.create_logic(
rx_clk,
tx_data=serdes_rx_data_int,
tx_header=serdes_rx_hdr_int,
name='serdes_source'
)
serdes_sink = baser_serdes_ep.BaseRSerdesSink()
serdes_sink_logic = serdes_sink.create_logic(
tx_clk,
rx_data=serdes_tx_data,
rx_header=serdes_tx_hdr,
name='serdes_sink'
)
axis_source = axis_ep.AXIStreamSource()
axis_source_logic = axis_source.create_logic(
tx_clk,
tx_rst,
tdata=tx_axis_tdata,
tkeep=tx_axis_tkeep,
tvalid=tx_axis_tvalid,
tready=tx_axis_tready,
tlast=tx_axis_tlast,
tuser=tx_axis_tuser,
pause=axis_source_pause,
name='axis_source'
)
axis_sink = axis_ep.AXIStreamSink()
axis_sink_logic = axis_sink.create_logic(
rx_clk,
rx_rst,
tdata=rx_axis_tdata,
tkeep=rx_axis_tkeep,
tvalid=rx_axis_tvalid,
tlast=rx_axis_tlast,
tuser=rx_axis_tuser,
name='axis_sink'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
rx_clk=rx_clk,
rx_rst=rx_rst,
tx_clk=tx_clk,
tx_rst=tx_rst,
tx_axis_tdata=tx_axis_tdata,
tx_axis_tkeep=tx_axis_tkeep,
tx_axis_tvalid=tx_axis_tvalid,
tx_axis_tready=tx_axis_tready,
tx_axis_tlast=tx_axis_tlast,
tx_axis_tuser=tx_axis_tuser,
rx_axis_tdata=rx_axis_tdata,
rx_axis_tkeep=rx_axis_tkeep,
rx_axis_tvalid=rx_axis_tvalid,
rx_axis_tlast=rx_axis_tlast,
rx_axis_tuser=rx_axis_tuser,
serdes_tx_data=serdes_tx_data,
serdes_tx_hdr=serdes_tx_hdr,
serdes_rx_data=serdes_rx_data,
serdes_rx_hdr=serdes_rx_hdr,
serdes_rx_bitslip=serdes_rx_bitslip,
tx_ptp_ts=tx_ptp_ts,
rx_ptp_ts=rx_ptp_ts,
tx_axis_ptp_ts=tx_axis_ptp_ts,
tx_axis_ptp_ts_tag=tx_axis_ptp_ts_tag,
tx_axis_ptp_ts_valid=tx_axis_ptp_ts_valid,
tx_start_packet=tx_start_packet,
tx_error_underflow=tx_error_underflow,
rx_start_packet=rx_start_packet,
rx_error_count=rx_error_count,
rx_error_bad_frame=rx_error_bad_frame,
rx_error_bad_fcs=rx_error_bad_fcs,
rx_bad_block=rx_bad_block,
rx_block_lock=rx_block_lock,
rx_high_ber=rx_high_ber,
ifg_delay=ifg_delay,
tx_prbs31_enable=tx_prbs31_enable,
rx_prbs31_enable=rx_prbs31_enable
)
@always(delay(4))
def clkgen():
clk.next = not clk
tx_clk.next = not tx_clk
rx_clk.next = not rx_clk
load_bit_offset = []
@instance
def shift_bits():
bit_offset = 0
last_data = 0
while True:
yield clk.posedge
if load_bit_offset:
bit_offset = load_bit_offset.pop(0)
if serdes_rx_bitslip:
bit_offset += 1
bit_offset = bit_offset % 66
data = int(serdes_rx_data_int) << 2 | int(serdes_rx_hdr_int)
out_data = ((last_data | data << 66) >> 66-bit_offset) & 0x3ffffffffffffffff
last_data = data
serdes_rx_data.next = out_data >> 2
serdes_rx_hdr.next = out_data & 3
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
tx_rst.next = 1
rx_rst.next = 1
yield clk.posedge
rst.next = 0
tx_rst.next = 0
rx_rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
ifg_delay.next = 12
# testbench stimulus
# wait for block lock
while not rx_block_lock:
yield clk.posedge
# dump garbage
while not axis_sink.empty():
axis_sink.recv()
yield clk.posedge
print("test 1: test rx packet")
current_test.next = 1
test_frame = eth_ep.EthFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0x5A5152535455
test_frame.eth_type = 0x8000
test_frame.payload = bytearray(range(32))
test_frame.update_fcs()
axis_frame = test_frame.build_axis_fcs()
serdes_source.send(b'\x55\x55\x55\x55\x55\x55\x55\xD5'+bytearray(axis_frame))
yield axis_sink.wait()
rx_frame = axis_sink.recv()
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis(rx_frame)
eth_frame.update_fcs()
assert eth_frame == test_frame
yield delay(100)
yield clk.posedge
print("test 2: test tx packet")
current_test.next = 2
test_frame = eth_ep.EthFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0x5A5152535455
test_frame.eth_type = 0x8000
test_frame.payload = bytearray(range(32))
test_frame.update_fcs()
axis_frame = test_frame.build_axis()
axis_source.send(axis_frame)
yield serdes_sink.wait()
rx_frame = serdes_sink.recv()
assert rx_frame.data[0:8] == bytearray(b'\x55\x55\x55\x55\x55\x55\x55\xD5')
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis_fcs(rx_frame.data[8:])
print(hex(eth_frame.eth_fcs))
print(hex(eth_frame.calc_fcs()))
assert len(eth_frame.payload.data) == 46
assert eth_frame.eth_fcs == eth_frame.calc_fcs()
assert eth_frame.eth_dest_mac == test_frame.eth_dest_mac
assert eth_frame.eth_src_mac == test_frame.eth_src_mac
assert eth_frame.eth_type == test_frame.eth_type
assert eth_frame.payload.data.index(test_frame.payload.data) == 0
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.sql
~~~~~~~~~~~~~~~~~~~
Lexers for various SQL dialects and related interactive sessions.
Postgres specific lexers:
`PostgresLexer`
A SQL lexer for the PostgreSQL dialect. Differences w.r.t. the SQL
lexer are:
- keywords and data types list parsed from the PG docs (run the
`_postgres_builtins` module to update them);
- Content of $-strings parsed using a specific lexer, e.g. the content
of a PL/Python function is parsed using the Python lexer;
- parse PG specific constructs: E-strings, $-strings, U&-strings,
different operators and punctuation.
`PlPgsqlLexer`
A lexer for the PL/pgSQL language. Adds a few specific construct on
top of the PG SQL lexer (such as <<label>>).
`PostgresConsoleLexer`
A lexer to highlight an interactive psql session:
- identifies the prompt and does its best to detect the end of command
in multiline statement where not all the lines are prefixed by a
prompt, telling them apart from the output;
- highlights errors in the output and notification levels;
- handles psql backslash commands.
The ``tests/examplefiles`` contains a few test files with data to be
parsed by these lexers.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups, words
from pygments.token import Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Generic
from pygments.lexers import get_lexer_by_name, ClassNotFound
from pygments.util import iteritems
from pygments.lexers._postgres_builtins import KEYWORDS, DATATYPES, \
PSEUDO_TYPES, PLPGSQL_KEYWORDS
__all__ = ['PostgresLexer', 'PlPgsqlLexer', 'PostgresConsoleLexer',
'SqlLexer', 'MySqlLexer', 'SqliteConsoleLexer', 'RqlLexer']
line_re = re.compile('.*?\n')
language_re = re.compile(r"\s+LANGUAGE\s+'?(\w+)'?", re.IGNORECASE)
def language_callback(lexer, match):
"""Parse the content of a $-string using a lexer
The lexer is chosen looking for a nearby LANGUAGE.
"""
l = None
m = language_re.match(lexer.text[match.end():match.end()+100])
if m is not None:
l = lexer._get_lexer(m.group(1))
else:
m = list(language_re.finditer(
lexer.text[max(0, match.start()-100):match.start()]))
if m:
l = lexer._get_lexer(m[-1].group(1))
if l:
yield (match.start(1), String, match.group(1))
for x in l.get_tokens_unprocessed(match.group(2)):
yield x
yield (match.start(3), String, match.group(3))
else:
yield (match.start(), String, match.group())
class PostgresBase(object):
"""Base class for Postgres-related lexers.
This is implemented as a mixin to avoid the Lexer metaclass kicking in.
this way the different lexer don't have a common Lexer ancestor. If they
had, _tokens could be created on this ancestor and not updated for the
other classes, resulting e.g. in PL/pgSQL parsed as SQL. This shortcoming
seem to suggest that regexp lexers are not really subclassable.
"""
def get_tokens_unprocessed(self, text, *args):
# Have a copy of the entire text to be used by `language_callback`.
self.text = text
for x in super(PostgresBase, self).get_tokens_unprocessed(
text, *args):
yield x
def _get_lexer(self, lang):
if lang.lower() == 'sql':
return get_lexer_by_name('postgresql', **self.options)
tries = [lang]
if lang.startswith('pl'):
tries.append(lang[2:])
if lang.endswith('u'):
tries.append(lang[:-1])
if lang.startswith('pl') and lang.endswith('u'):
tries.append(lang[2:-1])
for l in tries:
try:
return get_lexer_by_name(l, **self.options)
except ClassNotFound:
pass
else:
# TODO: better logging
# print >>sys.stderr, "language not found:", lang
return None
class PostgresLexer(PostgresBase, RegexLexer):
"""
Lexer for the PostgreSQL dialect of SQL.
.. versionadded:: 1.5
"""
name = 'PostgreSQL SQL dialect'
aliases = ['postgresql', 'postgres']
mimetypes = ['text/x-postgresql']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'--.*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'(' + '|'.join(s.replace(" ", "\s+")
for s in DATATYPES + PSEUDO_TYPES)
+ r')\b', Name.Builtin),
(words(KEYWORDS, suffix=r'\b'), Keyword),
(r'[+*/<>=~!@#%^&|`?-]+', Operator),
(r'::', Operator), # cast
(r'\$\d+', Name.Variable),
(r'([0-9]*\.[0-9]*|[0-9]+)(e[+-]?[0-9]+)?', Number.Float),
(r'[0-9]+', Number.Integer),
(r"(E|U&)?'(''|[^'])*'", String.Single),
(r'(U&)?"(""|[^"])*"', String.Name), # quoted identifier
(r'(?s)(\$[^$]*\$)(.*?)(\1)', language_callback),
(r'[a-z_]\w*', Name),
# psql variable in SQL
(r""":(['"]?)[a-z]\w*\b\1""", Name.Variable),
(r'[;:()\[\]{},.]', Punctuation),
],
'multiline-comments': [
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[^/*]+', Comment.Multiline),
(r'[/*]', Comment.Multiline)
],
}
class PlPgsqlLexer(PostgresBase, RegexLexer):
"""
Handle the extra syntax in Pl/pgSQL language.
.. versionadded:: 1.5
"""
name = 'PL/pgSQL'
aliases = ['plpgsql']
mimetypes = ['text/x-plpgsql']
flags = re.IGNORECASE
tokens = dict((k, l[:]) for (k, l) in iteritems(PostgresLexer.tokens))
# extend the keywords list
for i, pattern in enumerate(tokens['root']):
if pattern[1] == Keyword:
tokens['root'][i] = (
words(KEYWORDS + PLPGSQL_KEYWORDS, suffix=r'\b'),
Keyword)
del i
break
else:
assert 0, "SQL keywords not found"
# Add specific PL/pgSQL rules (before the SQL ones)
tokens['root'][:0] = [
(r'\%[a-z]\w*\b', Name.Builtin), # actually, a datatype
(r':=', Operator),
(r'\<\<[a-z]\w*\>\>', Name.Label),
(r'\#[a-z]\w*\b', Keyword.Pseudo), # #variable_conflict
]
class PsqlRegexLexer(PostgresBase, RegexLexer):
"""
Extend the PostgresLexer adding support specific for psql commands.
This is not a complete psql lexer yet as it lacks prompt support
and output rendering.
"""
name = 'PostgreSQL console - regexp based lexer'
aliases = [] # not public
flags = re.IGNORECASE
tokens = dict((k, l[:]) for (k, l) in iteritems(PostgresLexer.tokens))
tokens['root'].append(
(r'\\[^\s]+', Keyword.Pseudo, 'psql-command'))
tokens['psql-command'] = [
(r'\n', Text, 'root'),
(r'\s+', Text),
(r'\\[^\s]+', Keyword.Pseudo),
(r""":(['"]?)[a-z]\w*\b\1""", Name.Variable),
(r"'(''|[^'])*'", String.Single),
(r"`([^`])*`", String.Backtick),
(r"[^\s]+", String.Symbol),
]
re_prompt = re.compile(r'^(\S.*?)??[=\-\(\$\'\"][#>]')
re_psql_command = re.compile(r'\s*\\')
re_end_command = re.compile(r';\s*(--.*?)?$')
re_psql_command = re.compile(r'(\s*)(\\.+?)(\s+)$')
re_error = re.compile(r'(ERROR|FATAL):')
re_message = re.compile(
r'((?:DEBUG|INFO|NOTICE|WARNING|ERROR|'
r'FATAL|HINT|DETAIL|CONTEXT|LINE [0-9]+):)(.*?\n)')
class lookahead(object):
"""Wrap an iterator and allow pushing back an item."""
def __init__(self, x):
self.iter = iter(x)
self._nextitem = None
def __iter__(self):
return self
def send(self, i):
self._nextitem = i
return i
def __next__(self):
if self._nextitem is not None:
ni = self._nextitem
self._nextitem = None
return ni
return next(self.iter)
next = __next__
class PostgresConsoleLexer(Lexer):
"""
Lexer for psql sessions.
.. versionadded:: 1.5
"""
name = 'PostgreSQL console (psql)'
aliases = ['psql', 'postgresql-console', 'postgres-console']
mimetypes = ['text/x-postgresql-psql']
def get_tokens_unprocessed(self, data):
sql = PsqlRegexLexer(**self.options)
lines = lookahead(line_re.findall(data))
# prompt-output cycle
while 1:
# consume the lines of the command: start with an optional prompt
# and continue until the end of command is detected
curcode = ''
insertions = []
while 1:
try:
line = next(lines)
except StopIteration:
# allow the emission of partially collected items
# the repl loop will be broken below
break
# Identify a shell prompt in case of psql commandline example
if line.startswith('$') and not curcode:
lexer = get_lexer_by_name('console', **self.options)
for x in lexer.get_tokens_unprocessed(line):
yield x
break
# Identify a psql prompt
mprompt = re_prompt.match(line)
if mprompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, mprompt.group())]))
curcode += line[len(mprompt.group()):]
else:
curcode += line
# Check if this is the end of the command
# TODO: better handle multiline comments at the end with
# a lexer with an external state?
if re_psql_command.match(curcode) \
or re_end_command.search(curcode):
break
# Emit the combined stream of command and prompt(s)
for item in do_insertions(insertions,
sql.get_tokens_unprocessed(curcode)):
yield item
# Emit the output lines
out_token = Generic.Output
while 1:
line = next(lines)
mprompt = re_prompt.match(line)
if mprompt is not None:
# push the line back to have it processed by the prompt
lines.send(line)
break
mmsg = re_message.match(line)
if mmsg is not None:
if mmsg.group(1).startswith("ERROR") \
or mmsg.group(1).startswith("FATAL"):
out_token = Generic.Error
yield (mmsg.start(1), Generic.Strong, mmsg.group(1))
yield (mmsg.start(2), out_token, mmsg.group(2))
else:
yield (0, out_token, line)
class SqlLexer(RegexLexer):
"""
Lexer for Structured Query Language. Currently, this lexer does
not recognize any special syntax except ANSI SQL.
"""
name = 'SQL'
aliases = ['sql']
filenames = ['*.sql']
mimetypes = ['text/x-sql']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'--.*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'multiline-comments'),
(words((
'ABORT', 'ABS', 'ABSOLUTE', 'ACCESS', 'ADA', 'ADD', 'ADMIN', 'AFTER', 'AGGREGATE',
'ALIAS', 'ALL', 'ALLOCATE', 'ALTER', 'ANALYSE', 'ANALYZE', 'AND', 'ANY', 'ARE', 'AS',
'ASC', 'ASENSITIVE', 'ASSERTION', 'ASSIGNMENT', 'ASYMMETRIC', 'AT', 'ATOMIC',
'AUTHORIZATION', 'AVG', 'BACKWARD', 'BEFORE', 'BEGIN', 'BETWEEN', 'BITVAR',
'BIT_LENGTH', 'BOTH', 'BREADTH', 'BY', 'C', 'CACHE', 'CALL', 'CALLED', 'CARDINALITY',
'CASCADE', 'CASCADED', 'CASE', 'CAST', 'CATALOG', 'CATALOG_NAME', 'CHAIN',
'CHARACTERISTICS', 'CHARACTER_LENGTH', 'CHARACTER_SET_CATALOG',
'CHARACTER_SET_NAME', 'CHARACTER_SET_SCHEMA', 'CHAR_LENGTH', 'CHECK',
'CHECKED', 'CHECKPOINT', 'CLASS', 'CLASS_ORIGIN', 'CLOB', 'CLOSE', 'CLUSTER',
'COALSECE', 'COBOL', 'COLLATE', 'COLLATION', 'COLLATION_CATALOG',
'COLLATION_NAME', 'COLLATION_SCHEMA', 'COLUMN', 'COLUMN_NAME',
'COMMAND_FUNCTION', 'COMMAND_FUNCTION_CODE', 'COMMENT', 'COMMIT',
'COMMITTED', 'COMPLETION', 'CONDITION_NUMBER', 'CONNECT', 'CONNECTION',
'CONNECTION_NAME', 'CONSTRAINT', 'CONSTRAINTS', 'CONSTRAINT_CATALOG',
'CONSTRAINT_NAME', 'CONSTRAINT_SCHEMA', 'CONSTRUCTOR', 'CONTAINS',
'CONTINUE', 'CONVERSION', 'CONVERT', 'COPY', 'CORRESPONTING', 'COUNT',
'CREATE', 'CREATEDB', 'CREATEUSER', 'CROSS', 'CUBE', 'CURRENT', 'CURRENT_DATE',
'CURRENT_PATH', 'CURRENT_ROLE', 'CURRENT_TIME', 'CURRENT_TIMESTAMP',
'CURRENT_USER', 'CURSOR', 'CURSOR_NAME', 'CYCLE', 'DATA', 'DATABASE',
'DATETIME_INTERVAL_CODE', 'DATETIME_INTERVAL_PRECISION', 'DAY',
'DEALLOCATE', 'DECLARE', 'DEFAULT', 'DEFAULTS', 'DEFERRABLE', 'DEFERRED',
'DEFINED', 'DEFINER', 'DELETE', 'DELIMITER', 'DELIMITERS', 'DEREF', 'DESC',
'DESCRIBE', 'DESCRIPTOR', 'DESTROY', 'DESTRUCTOR', 'DETERMINISTIC',
'DIAGNOSTICS', 'DICTIONARY', 'DISCONNECT', 'DISPATCH', 'DISTINCT', 'DO',
'DOMAIN', 'DROP', 'DYNAMIC', 'DYNAMIC_FUNCTION', 'DYNAMIC_FUNCTION_CODE',
'EACH', 'ELSE', 'ENCODING', 'ENCRYPTED', 'END', 'END-EXEC', 'EQUALS', 'ESCAPE', 'EVERY',
'EXCEPTION', 'EXCEPT', 'EXCLUDING', 'EXCLUSIVE', 'EXEC', 'EXECUTE', 'EXISTING',
'EXISTS', 'EXPLAIN', 'EXTERNAL', 'EXTRACT', 'FALSE', 'FETCH', 'FINAL', 'FIRST', 'FOR',
'FORCE', 'FOREIGN', 'FORTRAN', 'FORWARD', 'FOUND', 'FREE', 'FREEZE', 'FROM', 'FULL',
'FUNCTION', 'G', 'GENERAL', 'GENERATED', 'GET', 'GLOBAL', 'GO', 'GOTO', 'GRANT', 'GRANTED',
'GROUP', 'GROUPING', 'HANDLER', 'HAVING', 'HIERARCHY', 'HOLD', 'HOST', 'IDENTITY',
'IGNORE', 'ILIKE', 'IMMEDIATE', 'IMMUTABLE', 'IMPLEMENTATION', 'IMPLICIT', 'IN',
'INCLUDING', 'INCREMENT', 'INDEX', 'INDITCATOR', 'INFIX', 'INHERITS', 'INITIALIZE',
'INITIALLY', 'INNER', 'INOUT', 'INPUT', 'INSENSITIVE', 'INSERT', 'INSTANTIABLE',
'INSTEAD', 'INTERSECT', 'INTO', 'INVOKER', 'IS', 'ISNULL', 'ISOLATION', 'ITERATE', 'JOIN',
'KEY', 'KEY_MEMBER', 'KEY_TYPE', 'LANCOMPILER', 'LANGUAGE', 'LARGE', 'LAST',
'LATERAL', 'LEADING', 'LEFT', 'LENGTH', 'LESS', 'LEVEL', 'LIKE', 'LIMIT', 'LISTEN', 'LOAD',
'LOCAL', 'LOCALTIME', 'LOCALTIMESTAMP', 'LOCATION', 'LOCATOR', 'LOCK', 'LOWER',
'MAP', 'MATCH', 'MAX', 'MAXVALUE', 'MESSAGE_LENGTH', 'MESSAGE_OCTET_LENGTH',
'MESSAGE_TEXT', 'METHOD', 'MIN', 'MINUTE', 'MINVALUE', 'MOD', 'MODE', 'MODIFIES',
'MODIFY', 'MONTH', 'MORE', 'MOVE', 'MUMPS', 'NAMES', 'NATIONAL', 'NATURAL', 'NCHAR',
'NCLOB', 'NEW', 'NEXT', 'NO', 'NOCREATEDB', 'NOCREATEUSER', 'NONE', 'NOT', 'NOTHING',
'NOTIFY', 'NOTNULL', 'NULL', 'NULLABLE', 'NULLIF', 'OBJECT', 'OCTET_LENGTH', 'OF', 'OFF',
'OFFSET', 'OIDS', 'OLD', 'ON', 'ONLY', 'OPEN', 'OPERATION', 'OPERATOR', 'OPTION', 'OPTIONS',
'OR', 'ORDER', 'ORDINALITY', 'OUT', 'OUTER', 'OUTPUT', 'OVERLAPS', 'OVERLAY', 'OVERRIDING',
'OWNER', 'PAD', 'PARAMETER', 'PARAMETERS', 'PARAMETER_MODE', 'PARAMATER_NAME',
'PARAMATER_ORDINAL_POSITION', 'PARAMETER_SPECIFIC_CATALOG',
'PARAMETER_SPECIFIC_NAME', 'PARAMATER_SPECIFIC_SCHEMA', 'PARTIAL',
'PASCAL', 'PENDANT', 'PLACING', 'PLI', 'POSITION', 'POSTFIX', 'PRECISION', 'PREFIX',
'PREORDER', 'PREPARE', 'PRESERVE', 'PRIMARY', 'PRIOR', 'PRIVILEGES', 'PROCEDURAL',
'PROCEDURE', 'PUBLIC', 'READ', 'READS', 'RECHECK', 'RECURSIVE', 'REF', 'REFERENCES',
'REFERENCING', 'REINDEX', 'RELATIVE', 'RENAME', 'REPEATABLE', 'REPLACE', 'RESET',
'RESTART', 'RESTRICT', 'RESULT', 'RETURN', 'RETURNED_LENGTH',
'RETURNED_OCTET_LENGTH', 'RETURNED_SQLSTATE', 'RETURNS', 'REVOKE', 'RIGHT',
'ROLE', 'ROLLBACK', 'ROLLUP', 'ROUTINE', 'ROUTINE_CATALOG', 'ROUTINE_NAME',
'ROUTINE_SCHEMA', 'ROW', 'ROWS', 'ROW_COUNT', 'RULE', 'SAVE_POINT', 'SCALE', 'SCHEMA',
'SCHEMA_NAME', 'SCOPE', 'SCROLL', 'SEARCH', 'SECOND', 'SECURITY', 'SELECT', 'SELF',
'SENSITIVE', 'SERIALIZABLE', 'SERVER_NAME', 'SESSION', 'SESSION_USER', 'SET',
'SETOF', 'SETS', 'SHARE', 'SHOW', 'SIMILAR', 'SIMPLE', 'SIZE', 'SOME', 'SOURCE', 'SPACE',
'SPECIFIC', 'SPECIFICTYPE', 'SPECIFIC_NAME', 'SQL', 'SQLCODE', 'SQLERROR',
'SQLEXCEPTION', 'SQLSTATE', 'SQLWARNINIG', 'STABLE', 'START', 'STATE', 'STATEMENT',
'STATIC', 'STATISTICS', 'STDIN', 'STDOUT', 'STORAGE', 'STRICT', 'STRUCTURE', 'STYPE',
'SUBCLASS_ORIGIN', 'SUBLIST', 'SUBSTRING', 'SUM', 'SYMMETRIC', 'SYSID', 'SYSTEM',
'SYSTEM_USER', 'TABLE', 'TABLE_NAME', ' TEMP', 'TEMPLATE', 'TEMPORARY', 'TERMINATE',
'THAN', 'THEN', 'TIMESTAMP', 'TIMEZONE_HOUR', 'TIMEZONE_MINUTE', 'TO', 'TOAST',
'TRAILING', 'TRANSATION', 'TRANSACTIONS_COMMITTED',
'TRANSACTIONS_ROLLED_BACK', 'TRANSATION_ACTIVE', 'TRANSFORM',
'TRANSFORMS', 'TRANSLATE', 'TRANSLATION', 'TREAT', 'TRIGGER', 'TRIGGER_CATALOG',
'TRIGGER_NAME', 'TRIGGER_SCHEMA', 'TRIM', 'TRUE', 'TRUNCATE', 'TRUSTED', 'TYPE',
'UNCOMMITTED', 'UNDER', 'UNENCRYPTED', 'UNION', 'UNIQUE', 'UNKNOWN', 'UNLISTEN',
'UNNAMED', 'UNNEST', 'UNTIL', 'UPDATE', 'UPPER', 'USAGE', 'USER',
'USER_DEFINED_TYPE_CATALOG', 'USER_DEFINED_TYPE_NAME',
'USER_DEFINED_TYPE_SCHEMA', 'USING', 'VACUUM', 'VALID', 'VALIDATOR', 'VALUES',
'VARIABLE', 'VERBOSE', 'VERSION', 'VIEW', 'VOLATILE', 'WHEN', 'WHENEVER', 'WHERE',
'WITH', 'WITHOUT', 'WORK', 'WRITE', 'YEAR', 'ZONE'), suffix=r'\b'),
Keyword),
(words((
'ARRAY', 'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'CHARACTER', 'DATE',
'DEC', 'DECIMAL', 'FLOAT', 'INT', 'INTEGER', 'INTERVAL', 'NUMBER', 'NUMERIC', 'REAL',
'SERIAL', 'SMALLINT', 'VARCHAR', 'VARYING', 'INT8', 'SERIAL8', 'TEXT'), suffix=r'\b'),
Name.Builtin),
(r'[+*/<>=~!@#%^&|`?-]', Operator),
(r'[0-9]+', Number.Integer),
# TODO: Backslash escapes?
(r"'(''|[^'])*'", String.Single),
(r'"(""|[^"])*"', String.Symbol), # not a real string literal in ANSI SQL
(r'[a-z_][\w$]*', Name), # allow $s in strings for Oracle
(r'[;:()\[\],.]', Punctuation)
],
'multiline-comments': [
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[^/*]+', Comment.Multiline),
(r'[/*]', Comment.Multiline)
]
}
class MySqlLexer(RegexLexer):
"""
Special lexer for MySQL.
"""
name = 'MySQL'
aliases = ['mysql']
mimetypes = ['text/x-mysql']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'(#|--\s+).*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'[0-9]+', Number.Integer),
(r'[0-9]*\.[0-9]+(e[+-][0-9]+)', Number.Float),
(r"'(\\\\|\\'|''|[^'])*'", String.Single),
(r'"(\\\\|\\"|""|[^"])*"', String.Double),
(r"`(\\\\|\\`|``|[^`])*`", String.Symbol),
(r'[+*/<>=~!@#%^&|`?-]', Operator),
(r'\b(tinyint|smallint|mediumint|int|integer|bigint|date|'
r'datetime|time|bit|bool|tinytext|mediumtext|longtext|text|'
r'tinyblob|mediumblob|longblob|blob|float|double|double\s+'
r'precision|real|numeric|dec|decimal|timestamp|year|char|'
r'varchar|varbinary|varcharacter|enum|set)(\b\s*)(\()?',
bygroups(Keyword.Type, Text, Punctuation)),
(r'\b(add|all|alter|analyze|and|as|asc|asensitive|before|between|'
r'bigint|binary|blob|both|by|call|cascade|case|change|char|'
r'character|check|collate|column|condition|constraint|continue|'
r'convert|create|cross|current_date|current_time|'
r'current_timestamp|current_user|cursor|database|databases|'
r'day_hour|day_microsecond|day_minute|day_second|dec|decimal|'
r'declare|default|delayed|delete|desc|describe|deterministic|'
r'distinct|distinctrow|div|double|drop|dual|each|else|elseif|'
r'enclosed|escaped|exists|exit|explain|fetch|flush|float|float4|'
r'float8|for|force|foreign|from|fulltext|grant|group|having|'
r'high_priority|hour_microsecond|hour_minute|hour_second|if|'
r'ignore|in|index|infile|inner|inout|insensitive|insert|int|'
r'int1|int2|int3|int4|int8|integer|interval|into|is|iterate|'
r'join|key|keys|kill|leading|leave|left|like|limit|lines|load|'
r'localtime|localtimestamp|lock|long|loop|low_priority|match|'
r'minute_microsecond|minute_second|mod|modifies|natural|'
r'no_write_to_binlog|not|numeric|on|optimize|option|optionally|'
r'or|order|out|outer|outfile|precision|primary|procedure|purge|'
r'raid0|read|reads|real|references|regexp|release|rename|repeat|'
r'replace|require|restrict|return|revoke|right|rlike|schema|'
r'schemas|second_microsecond|select|sensitive|separator|set|'
r'show|smallint|soname|spatial|specific|sql|sql_big_result|'
r'sql_calc_found_rows|sql_small_result|sqlexception|sqlstate|'
r'sqlwarning|ssl|starting|straight_join|table|terminated|then|'
r'to|trailing|trigger|undo|union|unique|unlock|unsigned|update|'
r'usage|use|using|utc_date|utc_time|utc_timestamp|values|'
r'varying|when|where|while|with|write|x509|xor|year_month|'
r'zerofill)\b', Keyword),
# TODO: this list is not complete
(r'\b(auto_increment|engine|charset|tables)\b', Keyword.Pseudo),
(r'(true|false|null)', Name.Constant),
(r'([a-z_]\w*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[a-z_]\w*', Name),
(r'@[a-z0-9]*[._]*[a-z0-9]*', Name.Variable),
(r'[;:()\[\],.]', Punctuation)
],
'multiline-comments': [
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[^/*]+', Comment.Multiline),
(r'[/*]', Comment.Multiline)
]
}
class SqliteConsoleLexer(Lexer):
"""
Lexer for example sessions using sqlite3.
.. versionadded:: 0.11
"""
name = 'sqlite3con'
aliases = ['sqlite3']
filenames = ['*.sqlite3-console']
mimetypes = ['text/x-sqlite3-console']
def get_tokens_unprocessed(self, data):
sql = SqlLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(data):
line = match.group()
if line.startswith('sqlite> ') or line.startswith(' ...> '):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:8])]))
curcode += line[8:]
else:
if curcode:
for item in do_insertions(insertions,
sql.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
if line.startswith('SQL error: '):
yield (match.start(), Generic.Traceback, line)
else:
yield (match.start(), Generic.Output, line)
if curcode:
for item in do_insertions(insertions,
sql.get_tokens_unprocessed(curcode)):
yield item
class RqlLexer(RegexLexer):
"""
Lexer for Relation Query Language.
`RQL <http://www.logilab.org/project/rql>`_
.. versionadded:: 2.0
"""
name = 'RQL'
aliases = ['rql']
filenames = ['*.rql']
mimetypes = ['text/x-rql']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'(DELETE|SET|INSERT|UNION|DISTINCT|WITH|WHERE|BEING|OR'
r'|AND|NOT|GROUPBY|HAVING|ORDERBY|ASC|DESC|LIMIT|OFFSET'
r'|TODAY|NOW|TRUE|FALSE|NULL|EXISTS)\b', Keyword),
(r'[+*/<>=%-]', Operator),
(r'(Any|is|instance_of|CWEType|CWRelation)\b', Name.Builtin),
(r'[0-9]+', Number.Integer),
(r'[A-Z_]\w*\??', Name),
(r"'(''|[^'])*'", String.Single),
(r'"(""|[^"])*"', String.Single),
(r'[;:()\[\],.]', Punctuation)
],
}
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from oslo.messaging.openstack.common.gettextutils import _
"""
import copy
import gettext
import logging.handlers
import os
import re
import UserString
import six
_localedir = os.environ.get('oslo.messaging'.upper() + '_LOCALEDIR')
_t = gettext.translation('oslo.messaging', localedir=_localedir, fallback=True)
def _(msg):
return _t.ugettext(msg)
def install(domain):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
"""
gettext.install(domain,
localedir=os.environ.get(domain.upper() + '_LOCALEDIR'),
unicode=True)
"""
Lazy gettext functionality.
The following is an attempt to introduce a deferred way
to do translations on messages in OpenStack. We attempt to
override the standard _() function and % (format string) operation
to build Message objects that can later be translated when we have
more information. Also included is an example LogHandler that
translates Messages to an associated locale, effectively allowing
many logs, each with their own locale.
"""
def get_lazy_gettext(domain):
"""Assemble and return a lazy gettext function for a given domain.
Factory method for a project/module to get a lazy gettext function
for its own translation domain (i.e. nova, glance, cinder, etc.)
"""
def _lazy_gettext(msg):
"""Create and return a Message object.
Message encapsulates a string so that we can translate it later when
needed.
"""
return Message(msg, domain)
return _lazy_gettext
class Message(UserString.UserString, object):
"""Class used to encapsulate translatable messages."""
def __init__(self, msg, domain):
# _msg is the gettext msgid and should never change
self._msg = msg
self._left_extra_msg = ''
self._right_extra_msg = ''
self.params = None
self.locale = None
self.domain = domain
@property
def data(self):
# NOTE(mrodden): this should always resolve to a unicode string
# that best represents the state of the message currently
localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR')
if self.locale:
lang = gettext.translation(self.domain,
localedir=localedir,
languages=[self.locale],
fallback=True)
else:
# use system locale for translations
lang = gettext.translation(self.domain,
localedir=localedir,
fallback=True)
full_msg = (self._left_extra_msg +
lang.ugettext(self._msg) +
self._right_extra_msg)
if self.params is not None:
full_msg = full_msg % self.params
return six.text_type(full_msg)
def _save_dictionary_parameter(self, dict_param):
full_msg = self.data
# look for %(blah) fields in string;
# ignore %% and deal with the
# case where % is first character on the line
keys = re.findall('(?:[^%]|^)%\((\w*)\)[a-z]', full_msg)
# if we don't find any %(blah) blocks but have a %s
if not keys and re.findall('(?:[^%]|^)%[a-z]', full_msg):
# apparently the full dictionary is the parameter
params = copy.deepcopy(dict_param)
else:
params = {}
for key in keys:
try:
params[key] = copy.deepcopy(dict_param[key])
except TypeError:
# cast uncopyable thing to unicode string
params[key] = unicode(dict_param[key])
return params
def _save_parameters(self, other):
# we check for None later to see if
# we actually have parameters to inject,
# so encapsulate if our parameter is actually None
if other is None:
self.params = (other, )
elif isinstance(other, dict):
self.params = self._save_dictionary_parameter(other)
else:
# fallback to casting to unicode,
# this will handle the problematic python code-like
# objects that cannot be deep-copied
try:
self.params = copy.deepcopy(other)
except TypeError:
self.params = unicode(other)
return self
# overrides to be more string-like
def __unicode__(self):
return self.data
def __str__(self):
return self.data.encode('utf-8')
def __getstate__(self):
to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg',
'domain', 'params', 'locale']
new_dict = self.__dict__.fromkeys(to_copy)
for attr in to_copy:
new_dict[attr] = copy.deepcopy(self.__dict__[attr])
return new_dict
def __setstate__(self, state):
for (k, v) in state.items():
setattr(self, k, v)
# operator overloads
def __add__(self, other):
copied = copy.deepcopy(self)
copied._right_extra_msg += other.__str__()
return copied
def __radd__(self, other):
copied = copy.deepcopy(self)
copied._left_extra_msg += other.__str__()
return copied
def __mod__(self, other):
# do a format string to catch and raise
# any possible KeyErrors from missing parameters
self.data % other
copied = copy.deepcopy(self)
return copied._save_parameters(other)
def __mul__(self, other):
return self.data * other
def __rmul__(self, other):
return other * self.data
def __getitem__(self, key):
return self.data[key]
def __getslice__(self, start, end):
return self.data.__getslice__(start, end)
def __getattribute__(self, name):
# NOTE(mrodden): handle lossy operations that we can't deal with yet
# These override the UserString implementation, since UserString
# uses our __class__ attribute to try and build a new message
# after running the inner data string through the operation.
# At that point, we have lost the gettext message id and can just
# safely resolve to a string instead.
ops = ['capitalize', 'center', 'decode', 'encode',
'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip',
'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill']
if name in ops:
return getattr(self.data, name)
else:
return UserString.UserString.__getattribute__(self, name)
class LocaleHandler(logging.Handler):
"""Handler that can have a locale associated to translate Messages.
A quick example of how to utilize the Message class above.
LocaleHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating the internal Message.
"""
def __init__(self, locale, target):
"""Initialize a LocaleHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
logging.Handler.__init__(self)
self.locale = locale
self.target = target
def emit(self, record):
if isinstance(record.msg, Message):
# set the locale and resolve to a string
record.msg.locale = self.locale
self.target.emit(record)
|
|
#!/usr/bin/env python3
from test import support
import marshal
import sys
import unittest
import os
class HelperMixin:
def helper(self, sample, *extra):
new = marshal.loads(marshal.dumps(sample, *extra))
self.assertEqual(sample, new)
try:
with open(support.TESTFN, "wb") as f:
marshal.dump(sample, f, *extra)
with open(support.TESTFN, "rb") as f:
new = marshal.load(f)
self.assertEqual(sample, new)
finally:
support.unlink(support.TESTFN)
class IntTestCase(unittest.TestCase, HelperMixin):
def test_ints(self):
# Test the full range of Python ints.
n = sys.maxsize
while n:
for expected in (-n, n):
self.helper(expected)
n = n >> 1
def test_int64(self):
# Simulate int marshaling on a 64-bit box. This is most interesting if
# we're running the test on a 32-bit box, of course.
def to_little_endian_string(value, nbytes):
b = bytearray()
for i in range(nbytes):
b.append(value & 0xff)
value >>= 8
return b
maxint64 = (1 << 63) - 1
minint64 = -maxint64-1
for base in maxint64, minint64, -maxint64, -(minint64 >> 1):
while base:
s = b'I' + to_little_endian_string(base, 8)
got = marshal.loads(s)
self.assertEqual(base, got)
if base == -1: # a fixed-point for shifting right 1
base = 0
else:
base >>= 1
def test_bool(self):
for b in (True, False):
self.helper(b)
class FloatTestCase(unittest.TestCase, HelperMixin):
def test_floats(self):
# Test a few floats
small = 1e-25
n = sys.maxsize * 3.7e250
while n > small:
for expected in (-n, n):
self.helper(float(expected))
n /= 123.4567
f = 0.0
s = marshal.dumps(f, 2)
got = marshal.loads(s)
self.assertEqual(f, got)
# and with version <= 1 (floats marshalled differently then)
s = marshal.dumps(f, 1)
got = marshal.loads(s)
self.assertEqual(f, got)
n = sys.maxsize * 3.7e-250
while n < small:
for expected in (-n, n):
f = float(expected)
self.helper(f)
self.helper(f, 1)
n *= 123.4567
class StringTestCase(unittest.TestCase, HelperMixin):
def test_unicode(self):
for s in ["", "Andr\xe8 Previn", "abc", " "*10000]:
self.helper(marshal.loads(marshal.dumps(s)))
def test_string(self):
for s in ["", "Andr\xe8 Previn", "abc", " "*10000]:
self.helper(s)
def test_bytes(self):
for s in [b"", b"Andr\xe8 Previn", b"abc", b" "*10000]:
self.helper(s)
class ExceptionTestCase(unittest.TestCase):
def test_exceptions(self):
new = marshal.loads(marshal.dumps(StopIteration))
self.assertEqual(StopIteration, new)
class CodeTestCase(unittest.TestCase):
def test_code(self):
co = ExceptionTestCase.test_exceptions.__code__
new = marshal.loads(marshal.dumps(co))
self.assertEqual(co, new)
def test_many_codeobjects(self):
# Issue2957: bad recursion count on code objects
count = 5000 # more than MAX_MARSHAL_STACK_DEPTH
codes = (ExceptionTestCase.test_exceptions.__code__,) * count
marshal.loads(marshal.dumps(codes))
class ContainerTestCase(unittest.TestCase, HelperMixin):
d = {'astring': '[email protected]',
'afloat': 7283.43,
'anint': 2**20,
'ashortlong': 2,
'alist': ['.zyx.41'],
'atuple': ('.zyx.41',)*10,
'aboolean': False,
'aunicode': "Andr\xe8 Previn"
}
def test_dict(self):
self.helper(self.d)
def test_list(self):
self.helper(list(self.d.items()))
def test_tuple(self):
self.helper(tuple(self.d.keys()))
def test_sets(self):
for constructor in (set, frozenset):
self.helper(constructor(self.d.keys()))
class BugsTestCase(unittest.TestCase):
def test_bug_5888452(self):
# Simple-minded check for SF 588452: Debug build crashes
marshal.dumps([128] * 1000)
def test_patch_873224(self):
self.assertRaises(Exception, marshal.loads, '0')
self.assertRaises(Exception, marshal.loads, 'f')
self.assertRaises(Exception, marshal.loads, marshal.dumps(2**65)[:-1])
def test_version_argument(self):
# Python 2.4.0 crashes for any call to marshal.dumps(x, y)
self.assertEqual(marshal.loads(marshal.dumps(5, 0)), 5)
self.assertEqual(marshal.loads(marshal.dumps(5, 1)), 5)
def test_fuzz(self):
# simple test that it's at least not *totally* trivial to
# crash from bad marshal data
for c in [chr(i) for i in range(256)]:
try:
marshal.loads(c)
except Exception:
pass
def test_loads_recursion(self):
s = b'c' + (b'X' * 4*4) + b'{' * 2**20
self.assertRaises(ValueError, marshal.loads, s)
@support.impl_detail('specific recursion check')
def test_recursion_limit(self):
# Create a deeply nested structure.
head = last = []
# The max stack depth should match the value in Python/marshal.c.
if os.name == 'nt' and hasattr(sys, 'gettotalrefcount'):
MAX_MARSHAL_STACK_DEPTH = 1500
else:
MAX_MARSHAL_STACK_DEPTH = 2000
for i in range(MAX_MARSHAL_STACK_DEPTH - 2):
last.append([0])
last = last[-1]
# Verify we don't blow out the stack with dumps/load.
data = marshal.dumps(head)
new_head = marshal.loads(data)
# Don't use == to compare objects, it can exceed the recursion limit.
self.assertEqual(len(new_head), len(head))
self.assertEqual(len(new_head[0]), len(head[0]))
self.assertEqual(len(new_head[-1]), len(head[-1]))
last.append([0])
self.assertRaises(ValueError, marshal.dumps, head)
def test_exact_type_match(self):
# Former bug:
# >>> class Int(int): pass
# >>> type(loads(dumps(Int())))
# <type 'int'>
for typ in (int, float, complex, tuple, list, dict, set, frozenset):
# Note: str subclasses are not tested because they get handled
# by marshal's routines for objects supporting the buffer API.
subtyp = type('subtyp', (typ,), {})
self.assertRaises(ValueError, marshal.dumps, subtyp())
# Issue #1792 introduced a change in how marshal increases the size of its
# internal buffer; this test ensures that the new code is exercised.
def test_large_marshal(self):
size = int(1e6)
testString = 'abc' * size
marshal.dumps(testString)
def test_invalid_longs(self):
# Issue #7019: marshal.loads shouldn't produce unnormalized PyLongs
invalid_string = b'l\x02\x00\x00\x00\x00\x00\x00\x00'
self.assertRaises(ValueError, marshal.loads, invalid_string)
def test_multiple_dumps_and_loads(self):
# Issue 12291: marshal.load() should be callable multiple times
# with interleaved data written by non-marshal code
# Adapted from a patch by Engelbert Gruber.
data = (1, 'abc', b'def', 1.0, (2, 'a', ['b', b'c']))
for interleaved in (b'', b'0123'):
ilen = len(interleaved)
positions = []
try:
with open(support.TESTFN, 'wb') as f:
for d in data:
marshal.dump(d, f)
if ilen:
f.write(interleaved)
positions.append(f.tell())
with open(support.TESTFN, 'rb') as f:
for i, d in enumerate(data):
self.assertEqual(d, marshal.load(f))
if ilen:
f.read(ilen)
self.assertEqual(positions[i], f.tell())
finally:
support.unlink(support.TESTFN)
def test_loads_reject_unicode_strings(self):
# Issue #14177: marshal.loads() should not accept unicode strings
unicode_string = 'T'
self.assertRaises(TypeError, marshal.loads, unicode_string)
def test_main():
support.run_unittest(IntTestCase,
FloatTestCase,
StringTestCase,
CodeTestCase,
ContainerTestCase,
ExceptionTestCase,
BugsTestCase)
if __name__ == "__main__":
test_main()
|
|
# Copyright (c) 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import uuid
import mock
from neutron.agent.common import ovs_lib
from neutron.agent.linux import ip_lib
from neutron.tests import base as tests_base
from neutron.tests.common import net_helpers
from neutron.tests.functional.agent.linux import base
class OVSBridgeTestBase(base.BaseOVSLinuxTestCase):
# TODO(twilson) So far, only ovsdb-related tests are written. It would be
# good to also add the openflow-related functions
def setUp(self):
super(OVSBridgeTestBase, self).setUp()
self.ovs = ovs_lib.BaseOVS()
self.br = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
def create_ovs_port(self, *interface_attrs):
# Convert ((a, b), (c, d)) to {a: b, c: d} and add 'type' by default
attrs = collections.OrderedDict(interface_attrs)
attrs.setdefault('type', 'internal')
port_name = tests_base.get_rand_device_name(net_helpers.PORT_PREFIX)
return (port_name, self.br.add_port(port_name, *attrs.items()))
def create_ovs_vif_port(self, iface_id=None, mac=None,
iface_field='iface-id'):
if iface_id is None:
iface_id = base.get_rand_name()
if mac is None:
mac = base.get_rand_name()
attrs = ('external_ids', {iface_field: iface_id, 'attached-mac': mac})
port_name, ofport = self.create_ovs_port(attrs)
return ovs_lib.VifPort(port_name, ofport, iface_id, mac, self.br)
class OVSBridgeTestCase(OVSBridgeTestBase):
def test_port_lifecycle(self):
(port_name, ofport) = self.create_ovs_port(('type', 'internal'))
# ofport should always be an integer string with value -1 or > 0.
self.assertTrue(int(ofport))
self.assertTrue(int(self.br.get_port_ofport(port_name)))
self.assertTrue(self.br.port_exists(port_name))
self.assertEqual(self.br.br_name,
self.br.get_bridge_for_iface(port_name))
self.br.delete_port(port_name)
self.assertFalse(self.br.port_exists(port_name))
def test_duplicate_port_may_exist_false(self):
port_name, ofport = self.create_ovs_port(('type', 'internal'))
cmd = self.br.ovsdb.add_port(self.br.br_name,
port_name, may_exist=False)
self.assertRaises(RuntimeError, cmd.execute, check_error=True)
def test_delete_port_if_exists_false(self):
cmd = self.br.ovsdb.del_port('nonexistantport', if_exists=False)
self.assertRaises(RuntimeError, cmd.execute, check_error=True)
def test_replace_port(self):
port_name = tests_base.get_rand_device_name(net_helpers.PORT_PREFIX)
self.br.replace_port(port_name, ('type', 'internal'))
self.assertTrue(self.br.port_exists(port_name))
self.assertEqual('internal',
self.br.db_get_val('Interface', port_name, 'type'))
self.br.replace_port(port_name, ('type', 'internal'),
('external_ids', {'test': 'test'}))
self.assertTrue(self.br.port_exists(port_name))
self.assertEqual('test', self.br.db_get_val('Interface', port_name,
'external_ids')['test'])
def test_attribute_lifecycle(self):
(port_name, ofport) = self.create_ovs_port()
tag = 42
self.ovs.set_db_attribute('Port', port_name, 'tag', tag)
self.assertEqual(tag, self.ovs.db_get_val('Port', port_name, 'tag'))
self.assertEqual(tag, self.br.get_port_tag_dict()[port_name])
self.ovs.clear_db_attribute('Port', port_name, 'tag')
self.assertEqual([], self.ovs.db_get_val('Port', port_name, 'tag'))
self.assertEqual([], self.br.get_port_tag_dict()[port_name])
def test_get_bridge_external_bridge_id(self):
self.ovs.set_db_attribute('Bridge', self.br.br_name,
'external_ids',
{'bridge-id': self.br.br_name})
self.assertEqual(
self.br.br_name,
self.ovs.get_bridge_external_bridge_id(self.br.br_name))
def test_controller_lifecycle(self):
controllers = {'tcp:127.0.0.1:6633', 'tcp:172.17.16.10:55'}
self.br.set_controller(controllers)
self.assertSetEqual(controllers, set(self.br.get_controller()))
self.br.del_controller()
self.assertEqual([], self.br.get_controller())
def test_non_index_queries(self):
controllers = ['tcp:127.0.0.1:6633']
self.br.set_controller(controllers)
cmd = self.br.ovsdb.db_set('Controller', self.br.br_name,
('connection_mode', 'out-of-band'))
cmd.execute(check_error=True)
self.assertEqual('out-of-band',
self.br.db_get_val('Controller', self.br.br_name,
'connection_mode'))
def test_set_fail_mode_secure(self):
self.br.set_secure_mode()
self._assert_br_fail_mode(ovs_lib.FAILMODE_SECURE)
def test_set_fail_mode_standalone(self):
self.br.set_standalone_mode()
self._assert_br_fail_mode(ovs_lib.FAILMODE_STANDALONE)
def _assert_br_fail_mode(self, fail_mode):
self.assertEqual(
self.br.db_get_val('Bridge', self.br.br_name, 'fail_mode'),
fail_mode)
def test_set_protocols(self):
self.br.set_protocols('OpenFlow10')
self.assertEqual(
self.br.db_get_val('Bridge', self.br.br_name, 'protocols'),
"OpenFlow10")
def test_get_datapath_id(self):
brdev = ip_lib.IPDevice(self.br.br_name)
dpid = brdev.link.attributes['link/ether'].replace(':', '')
self.br.set_db_attribute('Bridge',
self.br.br_name, 'datapath_id', dpid)
self.assertIn(dpid, self.br.get_datapath_id())
def _test_add_tunnel_port(self, attrs):
port_name = tests_base.get_rand_device_name(net_helpers.PORT_PREFIX)
self.br.add_tunnel_port(port_name, attrs['remote_ip'],
attrs['local_ip'])
self.assertEqual('gre',
self.ovs.db_get_val('Interface', port_name, 'type'))
options = self.ovs.db_get_val('Interface', port_name, 'options')
for attr, val in attrs.items():
self.assertEqual(val, options[attr])
def test_add_tunnel_port_ipv4(self):
attrs = {
'remote_ip': '192.0.2.1', # RFC 5737 TEST-NET-1
'local_ip': '198.51.100.1', # RFC 5737 TEST-NET-2
}
self._test_add_tunnel_port(attrs)
def test_add_tunnel_port_ipv6(self):
attrs = {
'remote_ip': '2001:db8:200::1',
'local_ip': '2001:db8:100::1',
}
self._test_add_tunnel_port(attrs)
def test_add_patch_port(self):
local = tests_base.get_rand_device_name(net_helpers.PORT_PREFIX)
peer = 'remotepeer'
self.br.add_patch_port(local, peer)
self.assertEqual(self.ovs.db_get_val('Interface', local, 'type'),
'patch')
options = self.ovs.db_get_val('Interface', local, 'options')
self.assertEqual(peer, options['peer'])
def test_get_port_name_list(self):
# Note that ovs-vsctl's list-ports does not include the port created
# with the same name as the bridge
ports = {self.create_ovs_port()[0] for i in range(5)}
self.assertSetEqual(ports, set(self.br.get_port_name_list()))
def test_get_iface_name_list(self):
ifaces = {self.create_ovs_port()[0] for i in range(5)}
self.assertSetEqual(ifaces, set(self.br.get_iface_name_list()))
def test_get_port_stats(self):
# Nothing seems to use this function?
(port_name, ofport) = self.create_ovs_port()
stats = set(self.br.get_port_stats(port_name).keys())
self.assertTrue(set(['rx_packets', 'tx_packets']).issubset(stats))
def test_get_vif_ports(self):
for i in range(2):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port() for i in range(3)]
ports = self.br.get_vif_ports()
self.assertEqual(3, len(ports))
self.assertTrue(all([isinstance(x, ovs_lib.VifPort) for x in ports]))
self.assertEqual(sorted([x.port_name for x in vif_ports]),
sorted([x.port_name for x in ports]))
def test_get_vif_ports_with_bond(self):
for i in range(2):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port() for i in range(3)]
# bond ports don't have records in the Interface table but they do in
# the Port table
orig = self.br.get_port_name_list
new_port_name_list = lambda: orig() + ['bondport']
mock.patch.object(self.br, 'get_port_name_list',
new=new_port_name_list).start()
ports = self.br.get_vif_ports()
self.assertEqual(3, len(ports))
self.assertTrue(all([isinstance(x, ovs_lib.VifPort) for x in ports]))
self.assertEqual(sorted([x.port_name for x in vif_ports]),
sorted([x.port_name for x in ports]))
def test_get_vif_port_set(self):
for i in range(2):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port() for i in range(2)]
ports = self.br.get_vif_port_set()
expected = set([x.vif_id for x in vif_ports])
self.assertEqual(expected, ports)
def test_get_vif_port_set_with_missing_port(self):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port()]
# return an extra port to make sure the db list ignores it
orig = self.br.get_port_name_list
new_port_name_list = lambda: orig() + ['anotherport']
mock.patch.object(self.br, 'get_port_name_list',
new=new_port_name_list).start()
ports = self.br.get_vif_port_set()
expected = set([vif_ports[0].vif_id])
self.assertEqual(expected, ports)
def test_get_vif_port_set_on_empty_bridge_returns_empty_set(self):
# Create a port on self.br
self.create_ovs_vif_port()
# Create another, empty bridge
br_2 = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
# Assert that get_vif_port_set on an empty bridge returns an empty set,
# and does not return the other bridge's ports.
self.assertEqual(set(), br_2.get_vif_port_set())
def test_get_ports_attributes(self):
port_names = [self.create_ovs_port()[0], self.create_ovs_port()[0]]
db_ports = self.br.get_ports_attributes('Interface', columns=['name'])
db_ports_names = [p['name'] for p in db_ports]
self.assertEqual(sorted(port_names), sorted(db_ports_names))
def test_get_port_tag_dict(self):
# Simple case tested in port test_set_get_clear_db_val
pass
def test_get_vif_port_by_id(self):
for i in range(2):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port() for i in range(3)]
for vif in vif_ports:
self.assertEqual(self.br.get_vif_port_by_id(vif.vif_id).vif_id,
vif.vif_id)
def test_get_vifs_by_ids(self):
for i in range(2):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port() for i in range(3)]
by_id = self.br.get_vifs_by_ids([v.vif_id for v in vif_ports])
# convert to str for comparison of VifPorts
by_id = {vid: str(vport) for vid, vport in by_id.items()}
self.assertEqual({v.vif_id: str(v) for v in vif_ports}, by_id)
def test_delete_ports(self):
# TODO(twilson) I intensely dislike the current delete_ports function
# as the default behavior is really delete_vif_ports(), then it acts
# more like a delete_ports() seems like it should if all_ports=True is
# passed
# Create 2 non-vif ports and 2 vif ports
nonvifs = {self.create_ovs_port()[0] for i in range(2)}
vifs = {self.create_ovs_vif_port().port_name for i in range(2)}
self.assertSetEqual(nonvifs.union(vifs),
set(self.br.get_port_name_list()))
self.br.delete_ports()
self.assertSetEqual(nonvifs, set(self.br.get_port_name_list()))
self.br.delete_ports(all_ports=True)
self.assertEqual(len(self.br.get_port_name_list()), 0)
def test_set_controller_connection_mode(self):
controllers = ['tcp:192.0.2.0:6633']
self._set_controllers_connection_mode(controllers)
def test_set_multi_controllers_connection_mode(self):
controllers = ['tcp:192.0.2.0:6633', 'tcp:192.0.2.1:55']
self._set_controllers_connection_mode(controllers)
def _set_controllers_connection_mode(self, controllers):
self.br.set_controller(controllers)
self.assertEqual(sorted(controllers), sorted(self.br.get_controller()))
self.br.set_controllers_connection_mode('out-of-band')
self._assert_controllers_connection_mode('out-of-band')
self.br.del_controller()
self.assertEqual([], self.br.get_controller())
def _assert_controllers_connection_mode(self, connection_mode):
controllers = self.br.db_get_val('Bridge', self.br.br_name,
'controller')
controllers = [controllers] if isinstance(
controllers, uuid.UUID) else controllers
for controller in controllers:
self.assertEqual(connection_mode,
self.br.db_get_val('Controller',
controller,
'connection_mode'))
def test_egress_bw_limit(self):
port_name, _ = self.create_ovs_port()
self.br.create_egress_bw_limit_for_port(port_name, 700, 70)
max_rate, burst = self.br.get_egress_bw_limit_for_port(port_name)
self.assertEqual(700, max_rate)
self.assertEqual(70, burst)
self.br.delete_egress_bw_limit_for_port(port_name)
max_rate, burst = self.br.get_egress_bw_limit_for_port(port_name)
self.assertIsNone(max_rate)
self.assertIsNone(burst)
class OVSLibTestCase(base.BaseOVSLinuxTestCase):
def setUp(self):
super(OVSLibTestCase, self).setUp()
self.ovs = ovs_lib.BaseOVS()
def test_bridge_lifecycle_baseovs(self):
name = base.get_rand_name(prefix=net_helpers.BR_PREFIX)
self.addCleanup(self.ovs.delete_bridge, name)
br = self.ovs.add_bridge(name)
self.assertEqual(br.br_name, name)
self.assertTrue(self.ovs.bridge_exists(name))
self.ovs.delete_bridge(name)
self.assertFalse(self.ovs.bridge_exists(name))
def test_get_bridges(self):
bridges = {
self.useFixture(net_helpers.OVSBridgeFixture()).bridge.br_name
for i in range(5)}
self.assertTrue(set(self.ovs.get_bridges()).issuperset(bridges))
def test_bridge_lifecycle_ovsbridge(self):
name = base.get_rand_name(prefix=net_helpers.BR_PREFIX)
br = ovs_lib.OVSBridge(name)
self.assertEqual(br.br_name, name)
# Make sure that instantiating an OVSBridge does not actually create
self.assertFalse(self.ovs.bridge_exists(name))
self.addCleanup(self.ovs.delete_bridge, name)
br.create()
self.assertTrue(self.ovs.bridge_exists(name))
br.destroy()
self.assertFalse(self.ovs.bridge_exists(name))
def test_db_find_column_type_list(self):
"""Fixate output for vsctl/native ovsdb_interface.
Makes sure that db_find search queries give the same result for both
implementations.
"""
bridge_name = base.get_rand_name(prefix=net_helpers.BR_PREFIX)
self.addCleanup(self.ovs.delete_bridge, bridge_name)
br = self.ovs.add_bridge(bridge_name)
port_name = base.get_rand_name(prefix=net_helpers.PORT_PREFIX)
br.add_port(port_name)
self.ovs.set_db_attribute('Port', port_name, 'tag', 42)
tags = self.ovs.ovsdb.db_list('Port', columns=['tag']).execute()
# Make sure that there is data to query.
# It should be, but let's be a little paranoid here as otherwise
# the test has no sense
tags_present = [t for t in tags if t['tag'] != []]
self.assertTrue(tags_present)
tags_42 = [t for t in tags_present if t['tag'] == 42]
single_value = self.ovs.ovsdb.db_find(
'Port', ('tag', '=', 42), columns=['tag']).execute()
self.assertEqual(tags_42, single_value)
len_0_list = self.ovs.ovsdb.db_find(
'Port', ('tag', '!=', []), columns=['tag']).execute()
self.assertEqual(tags_present, len_0_list)
|
|
"""
Eric Nordstrom
Python 3.6.0
4/29/17
Removes out-of-vocabulary (OOV) words, a.k.a. "mixed words", from the provided series of
tokens. Words are deemed OOV when they are not found in either provided language dictionary.
Results are stored in .TXT file(s) specified by the user. PyDictionary option available for
English dictionary (requires PyDictionary module and reliable internet connection).
Example command line input:
C:\Users\Me\Research\Files>..\Scripts\OOVs.py "Tokenized Corpus.txt" SpnDict . -d1 utf8
Interpretation:
..\Scripts\OOVs.py Call OOVs.py from separate directory
"Tokenized Corpus.txt" Corpus tokens data (quotes to avoid parsing argument)
SpnDict Spanish dictionary (".txt" assumed)
. PyDictionary option chosen for English dictionary
-d1 Spanish dictionary encoding type argument called
utf8 Spanish dictionary encoding type specification
"""
def PyDict(): #for default D2 argument in OOV_remove
'''Returns PyDictionary object'''
from PyDictionary import PyDictionary
return PyDictionary()
def OOV_remove( tokens, D1, D2=PyDict() ):
'''Removes OOVs from tokens list based on two dictionaries. PyDictionary module used for Dictionary 2 default.'''
import string
if type( D2 ) in { set, list, tuple, dict }:
def condition3( word, D2 ): #condition for IF statement in FOR loop
return word not in D2
else: #assume PyDictionary
def condition3( word, D2 ):
return D2.meaning( word ) == None #This line would print to the console on each OOV if the STDOUT were not changed.
import sys, os
orig_stdout = sys.stdout #to save for later
sys.stdout = open( os.devnull, 'w' ) #prevents printing to console during PyDictionary usage
t = list( tokens ) #to become output tokens LIST with OOVs removed
OOVs = {} #to become DICT containing removed OOVs hashed with their original indices in TOKENS
d = 0 #index offset to account for already removed OOV words
for i in range( 0, len(tokens) ):
word = tokens[i]
if word not in string.punctuation and word not in D1 and condition3( word, D2 ):
OOVs.update({ i+1 : word }) #can remove "+1" after "i" on this line if zero-indexing desired.
del t[i-d]
d += 1
if type( D2 ) not in { set, list, tuple, dict }:
sys.stdout = orig_stdout #restore stdout
return ( t, OOVs )
def gettxt( file_name, encoding_type=None ):
'''Reads and splits .TXT files. Appends ".txt" to file name if necessary.'''
name = file_name
if name[-4:] != ".txt":
name += ".txt"
return open( name, encoding=encoding_type ).read().split() #LIST type
def get_answer(prompt, accepted_answers, answer_type = str):
'''Loops until input is an accepted answer'''
answer = 'a;sdlkfha;oiwefhdnfaf;we'
while answer not in accepted_answers:
answer = answer_type( input( prompt ) )
if answer.lower() not in accepted_answers:
print( '"%s" is not an accepted response.' % str( answer ) )
return answer
def destwrite( words, help_message ):
'''User interface for writing to .TXT files. Does not return anything.'''
destname = input( '\nInput destination .TXT file name ("\\H" for help): ' )
h = True
if destname.lower() == "\\h":
print( help_message )
destname = input( "\nInput destination .TXT file name: " )
h = False
option = 'n'
sep = False #used for "append" case
while option in { 'c', 'n' }: #determine how to open file
if destname[-4:] != ".txt":
destname += ".txt"
try: #User should preferably type a file name that does not already exist, in which case this block is not necessary.
dest = open( destname, 'r' )
print( "\nFile by that name already exists." )
prompt = 'Options:\n\t"O" - overwrite contents\n\t"A" - append to contents\n\t"C" - create new file with "(1)" appended to name\n\t"N" - enter new name\n\t[ctrl]+[C] - exit\n\nInput: '
accepted_answers = { 'o', 'a', 'c', 'n', '\h' }
option = get_answer( prompt, accepted_answers ).lower()
if option == 'o':
print( '\nOverwriting "%s".' % destname )
dest = open( destname, 'w' )
elif option == 'a':
print( '\nAppending to "%s".' % destname )
dest = open( destname, 'a' )
sep = True
elif option == 'c':
destname = destname[:-4] + " (1)"
elif option == 'n':
destname = input( "\nInput destination .TXT file name%s: " % ( ' ("\\H" for help)' * h ) )
else:
print( help_message )
destname = input( "\nInput destination .TXT file name: " )
h = False
except FileNotFoundError: #Preferred block
option = '' #to exit WHILE loop
print( '\nCreating and writing to new file "%s".' % destname )
dest = open( destname, 'w' )
dest.write( "\n"*9*sep ) #for "append" case
for i in words:
dest.write( str( i ) )
if type( words ) == dict: #OOVs
dest.write( " : " + words[i] )
dest.write( "\n" )
dest.close()
print( "Writing complete. File saved." )
def main():
import argparse
parser = argparse.ArgumentParser( description = 'Locate, remove, and record out-of-vocabulary (OOV) words, a.k.a. "mixed words"' )
parser.add_argument( "TOKENS", help="Name of the .TXT file containing corpus tokens." )
parser.add_argument( "D1", help="Name of the language 1 dictionary .TXT file" )
parser.add_argument( "D2", help='Name of the language 2 dictionary .TXT file. Enter "." for PyDictionary (requires PyDictionary module and reliable internet connection). NOTE: PyDictionary only for English; English dictionary must be D2 if using PyDictionary.' )
parser.add_argument( "-t", "--TOKENS_encoding", help="Tokens .TXT file encoding type. Default used if not specified." )
parser.add_argument( "-d1", "--D1_encoding", help="Language 1 dictionary .TXT file encoding type. Default used if not specified." )
parser.add_argument( "-d2", "--D2_encoding", help="Language 2 dictionary .TXT file encoding type. Default used if not specified." )
parser.add_argument( "-cd", "--change_directory", help='Change the folder in which to locate .TXT files. NOTE: It is also possible to specify individual file locations by including the entire path starting from "C:\".' )
args = parser.parse_args()
if args.change_directory:
import os
os.chdir( args.change_directory )
tokens = gettxt( args.TOKENS, args.TOKENS_encoding )
D1 = gettxt( args.D1, args.D1_encoding )
if args.D2 == ".":
if args.D2_encoding:
raise RuntimeError( "Both PyDictionary option and encoding type specified for D2." )
D2 = PyDict()
else:
D2 = gettxt( args.D2, args.D2_encoding )
print( "\nRemoving OOVs...\n" )
( tokens_without_OOVs, OOVs ) = OOV_remove( tokens, D1, D2 )
print( "\nOOVs removed.\n" )
help_message = '\nDestination .TXT file used to store tokens list after removing out-of-vocabulary (OOV) words, a.k.a. "mixed words". If destination file to be outside of current working directory, include file location path in name.'
destwrite( tokens_without_OOVs, help_message )
prompt = "\nWrite removed OOVs to .TXT file? (Y/N): "
accepted_answers = { 'y', 'n' }
keep_OOVs = get_answer( prompt, accepted_answers )
if keep_OOVs.lower() == 'y':
help_message = '\nDestination .TXT file used to store removed out-of-vocabulary (OOV) words, a.k.a. "mixed words", and their corresponding locations in the original tokens list. If destination file to be outside of current working directory, include file location path in name.'
destwrite( OOVs, help_message )
print( "\nDone." )
if __name__ == "__main__":
main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Code snippets used in webdocs.
The examples here are written specifically to read well with the accompanying
web docs. Do not rewrite them until you make sure the webdocs still read well
and the rewritten code supports the concept being described. For example, there
are snippets that could be shorter but they are written like this to make a
specific point in the docs.
The code snippets are all organized as self contained functions. Parts of the
function body delimited by [START tag] and [END tag] will be included
automatically in the web docs. The naming convention for the tags is to have as
prefix the PATH_TO_HTML where they are included followed by a descriptive
string. The tags can contain only letters, digits and _.
"""
import apache_beam as beam
from apache_beam.metrics import Metrics
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
# Quiet some pylint warnings that happen because of the somewhat special
# format for the code snippets.
# pylint:disable=invalid-name
# pylint:disable=expression-not-assigned
# pylint:disable=redefined-outer-name
# pylint:disable=reimported
# pylint:disable=unused-variable
# pylint:disable=wrong-import-order, wrong-import-position
class SnippetUtils(object):
from apache_beam.pipeline import PipelineVisitor
class RenameFiles(PipelineVisitor):
"""RenameFiles will rewire read/write paths for unit testing.
RenameFiles will replace the GCS files specified in the read and
write transforms to local files so the pipeline can be run as a
unit test. This assumes that read and write transforms defined in snippets
have already been replaced by transforms 'DummyReadForTesting' and
'DummyReadForTesting' (see snippets_test.py).
This is as close as we can get to have code snippets that are
executed and are also ready to presented in webdocs.
"""
def __init__(self, renames):
self.renames = renames
def visit_transform(self, transform_node):
if transform_node.full_label.find('DummyReadForTesting') >= 0:
transform_node.transform.fn.file_to_read = self.renames['read']
elif transform_node.full_label.find('DummyWriteForTesting') >= 0:
transform_node.transform.fn.file_to_write = self.renames['write']
def construct_pipeline(renames):
"""A reverse words snippet as an example for constructing a pipeline."""
import re
# This is duplicate of the import statement in
# pipelines_constructing_creating tag below, but required to avoid
# Unresolved reference in ReverseWords class
import apache_beam as beam
class ReverseWords(beam.PTransform):
"""A PTransform that reverses individual elements in a PCollection."""
def expand(self, pcoll):
return pcoll | beam.Map(lambda e: e[::-1])
def filter_words(unused_x):
"""Pass through filter to select everything."""
return True
# [START pipelines_constructing_creating]
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
p = beam.Pipeline(options=PipelineOptions())
# [END pipelines_constructing_creating]
p = TestPipeline() # Use TestPipeline for testing.
# [START pipelines_constructing_reading]
lines = p | 'ReadMyFile' >> beam.io.ReadFromText('gs://some/inputData.txt')
# [END pipelines_constructing_reading]
# [START pipelines_constructing_applying]
words = lines | beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x))
reversed_words = words | ReverseWords()
# [END pipelines_constructing_applying]
# [START pipelines_constructing_writing]
filtered_words = reversed_words | 'FilterWords' >> beam.Filter(filter_words)
filtered_words | 'WriteMyFile' >> beam.io.WriteToText(
'gs://some/outputData.txt')
# [END pipelines_constructing_writing]
p.visit(SnippetUtils.RenameFiles(renames))
# [START pipelines_constructing_running]
p.run()
# [END pipelines_constructing_running]
def model_pipelines(argv):
"""A wordcount snippet as a simple pipeline example."""
# [START model_pipelines]
import re
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
class MyOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--input',
dest='input',
default='gs://dataflow-samples/shakespeare/kinglear'
'.txt',
help='Input file to process.')
parser.add_argument('--output',
dest='output',
required=True,
help='Output file to write results to.')
pipeline_options = PipelineOptions(argv)
my_options = pipeline_options.view_as(MyOptions)
p = beam.Pipeline(options=pipeline_options)
(p
| beam.io.ReadFromText(my_options.input)
| beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x))
| beam.Map(lambda x: (x, 1))
| beam.combiners.Count.PerKey()
| beam.io.WriteToText(my_options.output))
result = p.run()
# [END model_pipelines]
result.wait_until_finish()
def model_pcollection(argv):
"""Creating a PCollection from data in local memory."""
from apache_beam.options.pipeline_options import PipelineOptions
class MyOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--output',
dest='output',
required=True,
help='Output file to write results to.')
pipeline_options = PipelineOptions(argv)
my_options = pipeline_options.view_as(MyOptions)
# [START model_pcollection]
p = beam.Pipeline(options=pipeline_options)
lines = (p
| beam.Create([
'To be, or not to be: that is the question: ',
'Whether \'tis nobler in the mind to suffer ',
'The slings and arrows of outrageous fortune, ',
'Or to take arms against a sea of troubles, ']))
# [END model_pcollection]
(lines
| beam.io.WriteToText(my_options.output))
result = p.run()
result.wait_until_finish()
def pipeline_options_remote(argv):
"""Creating a Pipeline using a PipelineOptions object for remote execution."""
from apache_beam import Pipeline
from apache_beam.options.pipeline_options import PipelineOptions
# [START pipeline_options_create]
options = PipelineOptions(flags=argv)
# [END pipeline_options_create]
# [START pipeline_options_define_custom]
class MyOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--input')
parser.add_argument('--output')
# [END pipeline_options_define_custom]
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import StandardOptions
# [START pipeline_options_dataflow_service]
# Create and set your PipelineOptions.
options = PipelineOptions(flags=argv)
# For Cloud execution, set the Cloud Platform project, job_name,
# staging location, temp_location and specify DataflowRunner.
google_cloud_options = options.view_as(GoogleCloudOptions)
google_cloud_options.project = 'my-project-id'
google_cloud_options.job_name = 'myjob'
google_cloud_options.staging_location = 'gs://my-bucket/binaries'
google_cloud_options.temp_location = 'gs://my-bucket/temp'
options.view_as(StandardOptions).runner = 'DataflowRunner'
# Create the Pipeline with the specified options.
p = Pipeline(options=options)
# [END pipeline_options_dataflow_service]
my_options = options.view_as(MyOptions)
my_input = my_options.input
my_output = my_options.output
p = TestPipeline() # Use TestPipeline for testing.
lines = p | beam.io.ReadFromText(my_input)
lines | beam.io.WriteToText(my_output)
p.run()
def pipeline_options_local(argv):
"""Creating a Pipeline using a PipelineOptions object for local execution."""
from apache_beam import Pipeline
from apache_beam.options.pipeline_options import PipelineOptions
options = PipelineOptions(flags=argv)
# [START pipeline_options_define_custom_with_help_and_default]
class MyOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--input',
help='Input for the pipeline',
default='gs://my-bucket/input')
parser.add_argument('--output',
help='Output for the pipeline',
default='gs://my-bucket/output')
# [END pipeline_options_define_custom_with_help_and_default]
my_options = options.view_as(MyOptions)
my_input = my_options.input
my_output = my_options.output
# [START pipeline_options_local]
# Create and set your Pipeline Options.
options = PipelineOptions()
p = Pipeline(options=options)
# [END pipeline_options_local]
p = TestPipeline() # Use TestPipeline for testing.
lines = p | beam.io.ReadFromText(my_input)
lines | beam.io.WriteToText(my_output)
p.run()
def pipeline_options_command_line(argv):
"""Creating a Pipeline by passing a list of arguments."""
# [START pipeline_options_command_line]
# Use Python argparse module to parse custom arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--input')
parser.add_argument('--output')
known_args, pipeline_args = parser.parse_known_args(argv)
# Create the Pipeline with remaining arguments.
p = beam.Pipeline(argv=pipeline_args)
lines = p | 'ReadFromText' >> beam.io.ReadFromText(known_args.input)
lines | 'WriteToText' >> beam.io.WriteToText(known_args.output)
# [END pipeline_options_command_line]
p.run().wait_until_finish()
def pipeline_logging(lines, output):
"""Logging Pipeline Messages."""
import re
import apache_beam as beam
# [START pipeline_logging]
# import Python logging module.
import logging
class ExtractWordsFn(beam.DoFn):
def process(self, element):
words = re.findall(r'[A-Za-z\']+', element)
for word in words:
yield word
if word.lower() == 'love':
# Log using the root logger at info or higher levels
logging.info('Found : %s', word.lower())
# Remaining WordCount example code ...
# [END pipeline_logging]
p = TestPipeline() # Use TestPipeline for testing.
(p
| beam.Create(lines)
| beam.ParDo(ExtractWordsFn())
| beam.io.WriteToText(output))
p.run()
def pipeline_monitoring(renames):
"""Using monitoring interface snippets."""
import re
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
class WordCountOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--input',
help='Input for the pipeline',
default='gs://my-bucket/input')
parser.add_argument('--output',
help='output for the pipeline',
default='gs://my-bucket/output')
class ExtractWordsFn(beam.DoFn):
def process(self, element):
words = re.findall(r'[A-Za-z\']+', element)
for word in words:
yield word
class FormatCountsFn(beam.DoFn):
def process(self, element):
word, count = element
yield '%s: %s' % (word, count)
# [START pipeline_monitoring_composite]
# The CountWords Composite Transform inside the WordCount pipeline.
class CountWords(beam.PTransform):
def expand(self, pcoll):
return (pcoll
# Convert lines of text into individual words.
| 'ExtractWords' >> beam.ParDo(ExtractWordsFn())
# Count the number of times each word occurs.
| beam.combiners.Count.PerElement()
# Format each word and count into a printable string.
| 'FormatCounts' >> beam.ParDo(FormatCountsFn()))
# [END pipeline_monitoring_composite]
pipeline_options = PipelineOptions()
options = pipeline_options.view_as(WordCountOptions)
p = TestPipeline() # Use TestPipeline for testing.
# [START pipeline_monitoring_execution]
(p
# Read the lines of the input text.
| 'ReadLines' >> beam.io.ReadFromText(options.input)
# Count the words.
| CountWords()
# Write the formatted word counts to output.
| 'WriteCounts' >> beam.io.WriteToText(options.output))
# [END pipeline_monitoring_execution]
p.visit(SnippetUtils.RenameFiles(renames))
p.run()
def examples_wordcount_minimal(renames):
"""MinimalWordCount example snippets."""
import re
import apache_beam as beam
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import PipelineOptions
# [START examples_wordcount_minimal_options]
options = PipelineOptions()
google_cloud_options = options.view_as(GoogleCloudOptions)
google_cloud_options.project = 'my-project-id'
google_cloud_options.job_name = 'myjob'
google_cloud_options.staging_location = 'gs://your-bucket-name-here/staging'
google_cloud_options.temp_location = 'gs://your-bucket-name-here/temp'
options.view_as(StandardOptions).runner = 'DataflowRunner'
# [END examples_wordcount_minimal_options]
# Run it locally for testing.
options = PipelineOptions()
# [START examples_wordcount_minimal_create]
p = beam.Pipeline(options=options)
# [END examples_wordcount_minimal_create]
(
# [START examples_wordcount_minimal_read]
p | beam.io.ReadFromText(
'gs://dataflow-samples/shakespeare/kinglear.txt')
# [END examples_wordcount_minimal_read]
# [START examples_wordcount_minimal_pardo]
| 'ExtractWords' >> beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x))
# [END examples_wordcount_minimal_pardo]
# [START examples_wordcount_minimal_count]
| beam.combiners.Count.PerElement()
# [END examples_wordcount_minimal_count]
# [START examples_wordcount_minimal_map]
| beam.Map(lambda (word, count): '%s: %s' % (word, count))
# [END examples_wordcount_minimal_map]
# [START examples_wordcount_minimal_write]
| beam.io.WriteToText('gs://my-bucket/counts.txt')
# [END examples_wordcount_minimal_write]
)
p.visit(SnippetUtils.RenameFiles(renames))
# [START examples_wordcount_minimal_run]
result = p.run()
# [END examples_wordcount_minimal_run]
result.wait_until_finish()
def examples_wordcount_wordcount(renames):
"""WordCount example snippets."""
import re
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
argv = []
# [START examples_wordcount_wordcount_options]
class WordCountOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--input',
help='Input for the pipeline',
default='gs://my-bucket/input')
options = PipelineOptions(argv)
p = beam.Pipeline(options=options)
# [END examples_wordcount_wordcount_options]
lines = p | beam.io.ReadFromText(
'gs://dataflow-samples/shakespeare/kinglear.txt')
# [START examples_wordcount_wordcount_composite]
class CountWords(beam.PTransform):
def expand(self, pcoll):
return (pcoll
# Convert lines of text into individual words.
| 'ExtractWords' >> beam.FlatMap(
lambda x: re.findall(r'[A-Za-z\']+', x))
# Count the number of times each word occurs.
| beam.combiners.Count.PerElement())
counts = lines | CountWords()
# [END examples_wordcount_wordcount_composite]
# [START examples_wordcount_wordcount_dofn]
class FormatAsTextFn(beam.DoFn):
def process(self, element):
word, count = element
yield '%s: %s' % (word, count)
formatted = counts | beam.ParDo(FormatAsTextFn())
# [END examples_wordcount_wordcount_dofn]
formatted | beam.io.WriteToText('gs://my-bucket/counts.txt')
p.visit(SnippetUtils.RenameFiles(renames))
p.run().wait_until_finish()
def examples_wordcount_debugging(renames):
"""DebuggingWordCount example snippets."""
import re
import apache_beam as beam
# [START example_wordcount_debugging_logging]
# [START example_wordcount_debugging_aggregators]
import logging
class FilterTextFn(beam.DoFn):
"""A DoFn that filters for a specific key based on a regular expression."""
def __init__(self, pattern):
self.pattern = pattern
# A custom metric can track values in your pipeline as it runs. Create
# custom metrics matched_word and unmatched_words.
self.matched_words = Metrics.counter(self.__class__, 'matched_words')
self.umatched_words = Metrics.counter(self.__class__, 'umatched_words')
def process(self, element):
word, _ = element
if re.match(self.pattern, word):
# Log at INFO level each element we match. When executing this pipeline
# using the Dataflow service, these log lines will appear in the Cloud
# Logging UI.
logging.info('Matched %s', word)
# Add 1 to the custom metric counter matched_words
self.matched_words.inc()
yield element
else:
# Log at the "DEBUG" level each element that is not matched. Different
# log levels can be used to control the verbosity of logging providing
# an effective mechanism to filter less important information. Note
# currently only "INFO" and higher level logs are emitted to the Cloud
# Logger. This log message will not be visible in the Cloud Logger.
logging.debug('Did not match %s', word)
# Add 1 to the custom metric counter umatched_words
self.umatched_words.inc()
# [END example_wordcount_debugging_logging]
# [END example_wordcount_debugging_aggregators]
p = TestPipeline() # Use TestPipeline for testing.
filtered_words = (
p
| beam.io.ReadFromText(
'gs://dataflow-samples/shakespeare/kinglear.txt')
| 'ExtractWords' >> beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x))
| beam.combiners.Count.PerElement()
| 'FilterText' >> beam.ParDo(FilterTextFn('Flourish|stomach')))
# [START example_wordcount_debugging_assert]
beam.testing.util.assert_that(
filtered_words, beam.testing.util.equal_to(
[('Flourish', 3), ('stomach', 1)]))
# [END example_wordcount_debugging_assert]
output = (filtered_words
| 'format' >> beam.Map(lambda (word, c): '%s: %s' % (word, c))
| 'Write' >> beam.io.WriteToText('gs://my-bucket/counts.txt'))
p.visit(SnippetUtils.RenameFiles(renames))
p.run()
import apache_beam as beam
from apache_beam.io import iobase
from apache_beam.io.range_trackers import OffsetRangeTracker
from apache_beam.transforms.core import PTransform
from apache_beam.options.pipeline_options import PipelineOptions
# Defining a new source.
# [START model_custom_source_new_source]
class CountingSource(iobase.BoundedSource):
def __init__(self, count):
self.records_read = Metrics.counter(self.__class__, 'recordsRead')
self._count = count
def estimate_size(self):
return self._count
def get_range_tracker(self, start_position, stop_position):
if start_position is None:
start_position = 0
if stop_position is None:
stop_position = self._count
return OffsetRangeTracker(start_position, stop_position)
def read(self, range_tracker):
for i in range(self._count):
if not range_tracker.try_claim(i):
return
self.records_read.inc()
yield i
def split(self, desired_bundle_size, start_position=None,
stop_position=None):
if start_position is None:
start_position = 0
if stop_position is None:
stop_position = self._count
bundle_start = start_position
while bundle_start < self._count:
bundle_stop = max(self._count, bundle_start + desired_bundle_size)
yield iobase.SourceBundle(weight=(bundle_stop - bundle_start),
source=self,
start_position=bundle_start,
stop_position=bundle_stop)
bundle_start = bundle_stop
# [END model_custom_source_new_source]
def model_custom_source(count):
"""Demonstrates creating a new custom source and using it in a pipeline.
Defines a new source ``CountingSource`` that produces integers starting from 0
up to a given size.
Uses the new source in an example pipeline.
Additionally demonstrates how a source should be implemented using a
``PTransform``. This is the recommended way to develop sources that are to
distributed to a large number of end users.
This method runs two pipelines.
(1) A pipeline that uses ``CountingSource`` directly using the ``df.Read``
transform.
(2) A pipeline that uses a custom ``PTransform`` that wraps
``CountingSource``.
Args:
count: the size of the counting source to be used in the pipeline
demonstrated in this method.
"""
# Using the source in an example pipeline.
# [START model_custom_source_use_new_source]
p = beam.Pipeline(options=PipelineOptions())
numbers = p | 'ProduceNumbers' >> beam.io.Read(CountingSource(count))
# [END model_custom_source_use_new_source]
lines = numbers | beam.core.Map(lambda number: 'line %d' % number)
assert_that(
lines, equal_to(
['line ' + str(number) for number in range(0, count)]))
p.run().wait_until_finish()
# We recommend users to start Source classes with an underscore to discourage
# using the Source class directly when a PTransform for the source is
# available. We simulate that here by simply extending the previous Source
# class.
class _CountingSource(CountingSource):
pass
# [START model_custom_source_new_ptransform]
class ReadFromCountingSource(PTransform):
def __init__(self, count, **kwargs):
super(ReadFromCountingSource, self).__init__(**kwargs)
self._count = count
def expand(self, pcoll):
return pcoll | iobase.Read(_CountingSource(count))
# [END model_custom_source_new_ptransform]
# [START model_custom_source_use_ptransform]
p = beam.Pipeline(options=PipelineOptions())
numbers = p | 'ProduceNumbers' >> ReadFromCountingSource(count)
# [END model_custom_source_use_ptransform]
lines = numbers | beam.core.Map(lambda number: 'line %d' % number)
assert_that(
lines, equal_to(
['line ' + str(number) for number in range(0, count)]))
# Don't test runner api due to pickling errors.
p.run(test_runner_api=False).wait_until_finish()
def model_custom_sink(simplekv, KVs, final_table_name_no_ptransform,
final_table_name_with_ptransform):
"""Demonstrates creating a new custom sink and using it in a pipeline.
Defines a new sink ``SimpleKVSink`` that demonstrates writing to a simple
key-value based storage system which has following API.
simplekv.connect(url) -
connects to the storage system and returns an access token which can be
used to perform further operations
simplekv.open_table(access_token, table_name) -
creates a table named 'table_name'. Returns a table object.
simplekv.write_to_table(access_token, table, key, value) -
writes a key-value pair to the given table.
simplekv.rename_table(access_token, old_name, new_name) -
renames the table named 'old_name' to 'new_name'.
Uses the new sink in an example pipeline.
Additionally demonstrates how a sink should be implemented using a
``PTransform``. This is the recommended way to develop sinks that are to be
distributed to a large number of end users.
This method runs two pipelines.
(1) A pipeline that uses ``SimpleKVSink`` directly using the ``df.Write``
transform.
(2) A pipeline that uses a custom ``PTransform`` that wraps
``SimpleKVSink``.
Args:
simplekv: an object that mocks the key-value storage.
KVs: the set of key-value pairs to be written in the example pipeline.
final_table_name_no_ptransform: the prefix of final set of tables to be
created by the example pipeline that uses
``SimpleKVSink`` directly.
final_table_name_with_ptransform: the prefix of final set of tables to be
created by the example pipeline that uses
a ``PTransform`` that wraps
``SimpleKVSink``.
"""
import apache_beam as beam
from apache_beam.io import iobase
from apache_beam.transforms.core import PTransform
from apache_beam.options.pipeline_options import PipelineOptions
# Defining the new sink.
# [START model_custom_sink_new_sink]
class SimpleKVSink(iobase.Sink):
def __init__(self, url, final_table_name):
self._url = url
self._final_table_name = final_table_name
def initialize_write(self):
access_token = simplekv.connect(self._url)
return access_token
def open_writer(self, access_token, uid):
table_name = 'table' + uid
return SimpleKVWriter(access_token, table_name)
def finalize_write(self, access_token, table_names):
for i, table_name in enumerate(table_names):
simplekv.rename_table(
access_token, table_name, self._final_table_name + str(i))
# [END model_custom_sink_new_sink]
# Defining a writer for the new sink.
# [START model_custom_sink_new_writer]
class SimpleKVWriter(iobase.Writer):
def __init__(self, access_token, table_name):
self._access_token = access_token
self._table_name = table_name
self._table = simplekv.open_table(access_token, table_name)
def write(self, record):
key, value = record
simplekv.write_to_table(self._access_token, self._table, key, value)
def close(self):
return self._table_name
# [END model_custom_sink_new_writer]
final_table_name = final_table_name_no_ptransform
# Using the new sink in an example pipeline.
# [START model_custom_sink_use_new_sink]
p = beam.Pipeline(options=PipelineOptions())
kvs = p | 'CreateKVs' >> beam.Create(KVs)
kvs | 'WriteToSimpleKV' >> beam.io.Write(
SimpleKVSink('http://url_to_simple_kv/', final_table_name))
# [END model_custom_sink_use_new_sink]
p.run().wait_until_finish()
# We recommend users to start Sink class names with an underscore to
# discourage using the Sink class directly when a PTransform for the sink is
# available. We simulate that here by simply extending the previous Sink
# class.
class _SimpleKVSink(SimpleKVSink):
pass
# [START model_custom_sink_new_ptransform]
class WriteToKVSink(PTransform):
def __init__(self, url, final_table_name, **kwargs):
super(WriteToKVSink, self).__init__(**kwargs)
self._url = url
self._final_table_name = final_table_name
def expand(self, pcoll):
return pcoll | iobase.Write(_SimpleKVSink(self._url,
self._final_table_name))
# [END model_custom_sink_new_ptransform]
final_table_name = final_table_name_with_ptransform
# [START model_custom_sink_use_ptransform]
p = beam.Pipeline(options=PipelineOptions())
kvs = p | 'CreateKVs' >> beam.core.Create(KVs)
kvs | 'WriteToSimpleKV' >> WriteToKVSink(
'http://url_to_simple_kv/', final_table_name)
# [END model_custom_sink_use_ptransform]
p.run().wait_until_finish()
def model_textio(renames):
"""Using a Read and Write transform to read/write text files."""
def filter_words(x):
import re
return re.findall(r'[A-Za-z\']+', x)
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
# [START model_textio_read]
p = beam.Pipeline(options=PipelineOptions())
# [START model_pipelineio_read]
lines = p | 'ReadFromText' >> beam.io.ReadFromText('path/to/input-*.csv')
# [END model_pipelineio_read]
# [END model_textio_read]
# [START model_textio_write]
filtered_words = lines | 'FilterWords' >> beam.FlatMap(filter_words)
# [START model_pipelineio_write]
filtered_words | 'WriteToText' >> beam.io.WriteToText(
'/path/to/numbers', file_name_suffix='.csv')
# [END model_pipelineio_write]
# [END model_textio_write]
p.visit(SnippetUtils.RenameFiles(renames))
p.run().wait_until_finish()
def model_textio_compressed(renames, expected):
"""Using a Read Transform to read compressed text files."""
p = TestPipeline()
# [START model_textio_write_compressed]
lines = p | 'ReadFromText' >> beam.io.ReadFromText(
'/path/to/input-*.csv.gz',
compression_type=beam.io.filesystem.CompressionTypes.GZIP)
# [END model_textio_write_compressed]
assert_that(lines, equal_to(expected))
p.visit(SnippetUtils.RenameFiles(renames))
p.run().wait_until_finish()
def model_datastoreio():
"""Using a Read and Write transform to read/write to Cloud Datastore."""
import uuid
from google.cloud.proto.datastore.v1 import entity_pb2
from google.cloud.proto.datastore.v1 import query_pb2
import googledatastore
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.io.gcp.datastore.v1.datastoreio import ReadFromDatastore
from apache_beam.io.gcp.datastore.v1.datastoreio import WriteToDatastore
project = 'my_project'
kind = 'my_kind'
query = query_pb2.Query()
query.kind.add().name = kind
# [START model_datastoreio_read]
p = beam.Pipeline(options=PipelineOptions())
entities = p | 'Read From Datastore' >> ReadFromDatastore(project, query)
# [END model_datastoreio_read]
# [START model_datastoreio_write]
p = beam.Pipeline(options=PipelineOptions())
musicians = p | 'Musicians' >> beam.Create(
['Mozart', 'Chopin', 'Beethoven', 'Vivaldi'])
def to_entity(content):
entity = entity_pb2.Entity()
googledatastore.helper.add_key_path(entity.key, kind, str(uuid.uuid4()))
googledatastore.helper.add_properties(entity, {'content': unicode(content)})
return entity
entities = musicians | 'To Entity' >> beam.Map(to_entity)
entities | 'Write To Datastore' >> WriteToDatastore(project)
# [END model_datastoreio_write]
def model_bigqueryio():
"""Using a Read and Write transform to read/write to BigQuery."""
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
# [START model_bigqueryio_read]
p = beam.Pipeline(options=PipelineOptions())
weather_data = p | 'ReadWeatherStations' >> beam.io.Read(
beam.io.BigQuerySource(
'clouddataflow-readonly:samples.weather_stations'))
# [END model_bigqueryio_read]
# [START model_bigqueryio_query]
p = beam.Pipeline(options=PipelineOptions())
weather_data = p | 'ReadYearAndTemp' >> beam.io.Read(
beam.io.BigQuerySource(
query='SELECT year, mean_temp FROM samples.weather_stations'))
# [END model_bigqueryio_query]
# [START model_bigqueryio_query_standard_sql]
p = beam.Pipeline(options=PipelineOptions())
weather_data = p | 'ReadYearAndTemp' >> beam.io.Read(
beam.io.BigQuerySource(
query='SELECT year, mean_temp FROM `samples.weather_stations`',
use_standard_sql=True))
# [END model_bigqueryio_query_standard_sql]
# [START model_bigqueryio_schema]
schema = 'source:STRING, quote:STRING'
# [END model_bigqueryio_schema]
# [START model_bigqueryio_write]
quotes = p | beam.Create(
[{'source': 'Mahatma Ghandi', 'quote': 'My life is my message.'}])
quotes | 'Write' >> beam.io.Write(
beam.io.BigQuerySink(
'my-project:output.output_table',
schema=schema,
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED))
# [END model_bigqueryio_write]
def model_composite_transform_example(contents, output_path):
"""Example of a composite transform.
To declare a composite transform, define a subclass of PTransform.
To override the apply method, define a method "apply" that
takes a PCollection as its only parameter and returns a PCollection.
"""
import re
import apache_beam as beam
# [START composite_transform_example]
# [START composite_ptransform_apply_method]
# [START composite_ptransform_declare]
class CountWords(beam.PTransform):
# [END composite_ptransform_declare]
def expand(self, pcoll):
return (pcoll
| beam.FlatMap(lambda x: re.findall(r'\w+', x))
| beam.combiners.Count.PerElement()
| beam.Map(lambda (word, c): '%s: %s' % (word, c)))
# [END composite_ptransform_apply_method]
# [END composite_transform_example]
p = TestPipeline() # Use TestPipeline for testing.
(p
| beam.Create(contents)
| CountWords()
| beam.io.WriteToText(output_path))
p.run()
def model_multiple_pcollections_flatten(contents, output_path):
"""Merging a PCollection with Flatten."""
some_hash_fn = lambda s: ord(s[0])
import apache_beam as beam
p = TestPipeline() # Use TestPipeline for testing.
partition_fn = lambda element, partitions: some_hash_fn(element) % partitions
# Partition into deciles
partitioned = p | beam.Create(contents) | beam.Partition(partition_fn, 3)
pcoll1 = partitioned[0]
pcoll2 = partitioned[1]
pcoll3 = partitioned[2]
# Flatten them back into 1
# A collection of PCollection objects can be represented simply
# as a tuple (or list) of PCollections.
# (The SDK for Python has no separate type to store multiple
# PCollection objects, whether containing the same or different
# types.)
# [START model_multiple_pcollections_flatten]
merged = (
(pcoll1, pcoll2, pcoll3)
# A list of tuples can be "piped" directly into a Flatten transform.
| beam.Flatten())
# [END model_multiple_pcollections_flatten]
merged | beam.io.WriteToText(output_path)
p.run()
def model_multiple_pcollections_partition(contents, output_path):
"""Splitting a PCollection with Partition."""
some_hash_fn = lambda s: ord(s[0])
def get_percentile(i):
"""Assume i in [0,100)."""
return i
import apache_beam as beam
p = TestPipeline() # Use TestPipeline for testing.
students = p | beam.Create(contents)
# [START model_multiple_pcollections_partition]
def partition_fn(student, num_partitions):
return int(get_percentile(student) * num_partitions / 100)
by_decile = students | beam.Partition(partition_fn, 10)
# [END model_multiple_pcollections_partition]
# [START model_multiple_pcollections_partition_40th]
fortieth_percentile = by_decile[4]
# [END model_multiple_pcollections_partition_40th]
([by_decile[d] for d in xrange(10) if d != 4] + [fortieth_percentile]
| beam.Flatten()
| beam.io.WriteToText(output_path))
p.run()
def model_group_by_key(contents, output_path):
"""Applying a GroupByKey Transform."""
import re
import apache_beam as beam
p = TestPipeline() # Use TestPipeline for testing.
words_and_counts = (
p
| beam.Create(contents)
| beam.FlatMap(lambda x: re.findall(r'\w+', x))
| 'one word' >> beam.Map(lambda w: (w, 1)))
# GroupByKey accepts a PCollection of (w, 1) and
# outputs a PCollection of (w, (1, 1, ...)).
# (A key/value pair is just a tuple in Python.)
# This is a somewhat forced example, since one could
# simply use beam.combiners.Count.PerElement here.
# [START model_group_by_key_transform]
grouped_words = words_and_counts | beam.GroupByKey()
# [END model_group_by_key_transform]
(grouped_words
| 'count words' >> beam.Map(lambda (word, counts): (word, len(counts)))
| beam.io.WriteToText(output_path))
p.run()
def model_co_group_by_key_tuple(email_list, phone_list, output_path):
"""Applying a CoGroupByKey Transform to a tuple."""
import apache_beam as beam
p = TestPipeline() # Use TestPipeline for testing.
# [START model_group_by_key_cogroupbykey_tuple]
# Each data set is represented by key-value pairs in separate PCollections.
# Both data sets share a common key type (in this example str).
# The email_list contains values such as: ('joe', '[email protected]') with
# multiple possible values for each key.
# The phone_list contains values such as: ('mary': '111-222-3333') with
# multiple possible values for each key.
emails = p | 'email' >> beam.Create(email_list)
phones = p | 'phone' >> beam.Create(phone_list)
# The result PCollection contains one key-value element for each key in the
# input PCollections. The key of the pair will be the key from the input and
# the value will be a dictionary with two entries: 'emails' - an iterable of
# all values for the current key in the emails PCollection and 'phones': an
# iterable of all values for the current key in the phones PCollection.
# For instance, if 'emails' contained ('joe', '[email protected]') and
# ('joe', '[email protected]'), then 'result' will contain the element
# ('joe', {'emails': ['[email protected]', '[email protected]'], 'phones': ...})
result = {'emails': emails, 'phones': phones} | beam.CoGroupByKey()
def join_info((name, info)):
return '; '.join(['%s' % name,
'%s' % ','.join(info['emails']),
'%s' % ','.join(info['phones'])])
contact_lines = result | beam.Map(join_info)
# [END model_group_by_key_cogroupbykey_tuple]
contact_lines | beam.io.WriteToText(output_path)
p.run()
def model_join_using_side_inputs(
name_list, email_list, phone_list, output_path):
"""Joining PCollections using side inputs."""
import apache_beam as beam
from apache_beam.pvalue import AsIter
p = TestPipeline() # Use TestPipeline for testing.
# [START model_join_using_side_inputs]
# This code performs a join by receiving the set of names as an input and
# passing PCollections that contain emails and phone numbers as side inputs
# instead of using CoGroupByKey.
names = p | 'names' >> beam.Create(name_list)
emails = p | 'email' >> beam.Create(email_list)
phones = p | 'phone' >> beam.Create(phone_list)
def join_info(name, emails, phone_numbers):
filtered_emails = []
for name_in_list, email in emails:
if name_in_list == name:
filtered_emails.append(email)
filtered_phone_numbers = []
for name_in_list, phone_number in phone_numbers:
if name_in_list == name:
filtered_phone_numbers.append(phone_number)
return '; '.join(['%s' % name,
'%s' % ','.join(filtered_emails),
'%s' % ','.join(filtered_phone_numbers)])
contact_lines = names | 'CreateContacts' >> beam.core.Map(
join_info, AsIter(emails), AsIter(phones))
# [END model_join_using_side_inputs]
contact_lines | beam.io.WriteToText(output_path)
p.run()
# [START model_library_transforms_keys]
class Keys(beam.PTransform):
def expand(self, pcoll):
return pcoll | 'Keys' >> beam.Map(lambda (k, v): k)
# [END model_library_transforms_keys]
# pylint: enable=invalid-name
# [START model_library_transforms_count]
class Count(beam.PTransform):
def expand(self, pcoll):
return (
pcoll
| 'PairWithOne' >> beam.Map(lambda v: (v, 1))
| beam.CombinePerKey(sum))
# [END model_library_transforms_count]
|
|
from __future__ import division, absolute_import, print_function
import re
import os
import sys
import warnings
import platform
import tempfile
from subprocess import Popen, PIPE, STDOUT
from numpy.distutils.fcompiler import FCompiler
from numpy.distutils.exec_command import exec_command
from numpy.distutils.misc_util import msvc_runtime_library
from numpy.distutils.compat import get_exception
compilers = ['GnuFCompiler', 'Gnu95FCompiler']
TARGET_R = re.compile("Target: ([a-zA-Z0-9_\-]*)")
# XXX: handle cross compilation
def is_win64():
return sys.platform == "win32" and platform.architecture()[0] == "64bit"
if is_win64():
#_EXTRAFLAGS = ["-fno-leading-underscore"]
_EXTRAFLAGS = []
else:
_EXTRAFLAGS = []
class GnuFCompiler(FCompiler):
compiler_type = 'gnu'
compiler_aliases = ('g77',)
description = 'GNU Fortran 77 compiler'
def gnu_version_match(self, version_string):
"""Handle the different versions of GNU fortran compilers"""
# Strip warning(s) that may be emitted by gfortran
while version_string.startswith('gfortran: warning'):
version_string = version_string[version_string.find('\n')+1:]
# Gfortran versions from after 2010 will output a simple string
# (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older
# gfortrans may still return long version strings (``-dumpversion`` was
# an alias for ``--version``)
if len(version_string) <= 20:
# Try to find a valid version string
m = re.search(r'([0-9.]+)', version_string)
if m:
# g77 provides a longer version string that starts with GNU
# Fortran
if version_string.startswith('GNU Fortran'):
return ('g77', m.group(1))
# gfortran only outputs a version string such as #.#.#, so check
# if the match is at the start of the string
elif m.start() == 0:
return ('gfortran', m.group(1))
else:
# Output probably from --version, try harder:
m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string)
if m:
return ('gfortran', m.group(1))
m = re.search(r'GNU Fortran.*?\-?([0-9-.]+)', version_string)
if m:
v = m.group(1)
if v.startswith('0') or v.startswith('2') or v.startswith('3'):
# the '0' is for early g77's
return ('g77', v)
else:
# at some point in the 4.x series, the ' 95' was dropped
# from the version string
return ('gfortran', v)
# If still nothing, raise an error to make the problem easy to find.
err = 'A valid Fortran version was not found in this string:\n'
raise ValueError(err + version_string)
def version_match(self, version_string):
v = self.gnu_version_match(version_string)
if not v or v[0] != 'g77':
return None
return v[1]
possible_executables = ['g77', 'f77']
executables = {
'version_cmd' : [None, "-dumpversion"],
'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"],
'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes
'compiler_fix' : None,
'linker_so' : [None, "-g", "-Wall"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-g", "-Wall"]
}
module_dir_switch = None
module_include_switch = None
# Cygwin: f771: warning: -fPIC ignored for target (all code is
# position independent)
if os.name != 'nt' and sys.platform != 'cygwin':
pic_flags = ['-fPIC']
# use -mno-cygwin for g77 when Python is not Cygwin-Python
if sys.platform == 'win32':
for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']:
executables[key].append('-mno-cygwin')
g2c = 'g2c'
suggested_f90_compiler = 'gnu95'
def get_flags_linker_so(self):
opt = self.linker_so[1:]
if sys.platform == 'darwin':
target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None)
# If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value
# and leave it alone. But, distutils will complain if the
# environment's value is different from the one in the Python
# Makefile used to build Python. We let disutils handle this
# error checking.
if not target:
# If MACOSX_DEPLOYMENT_TARGET is not set in the environment,
# we try to get it first from the Python Makefile and then we
# fall back to setting it to 10.3 to maximize the set of
# versions we can work with. This is a reasonable default
# even when using the official Python dist and those derived
# from it.
import distutils.sysconfig as sc
g = {}
filename = sc.get_makefile_filename()
sc.parse_makefile(filename, g)
target = g.get('MACOSX_DEPLOYMENT_TARGET', '10.3')
os.environ['MACOSX_DEPLOYMENT_TARGET'] = target
if target == '10.3':
s = 'Env. variable MACOSX_DEPLOYMENT_TARGET set to 10.3'
warnings.warn(s)
opt.extend(['-undefined', 'dynamic_lookup', '-bundle'])
else:
opt.append("-shared")
if sys.platform.startswith('sunos'):
# SunOS often has dynamically loaded symbols defined in the
# static library libg2c.a The linker doesn't like this. To
# ignore the problem, use the -mimpure-text flag. It isn't
# the safest thing, but seems to work. 'man gcc' says:
# ".. Instead of using -mimpure-text, you should compile all
# source code with -fpic or -fPIC."
opt.append('-mimpure-text')
return opt
def get_libgcc_dir(self):
status, output = exec_command(self.compiler_f77 +
['-print-libgcc-file-name'],
use_tee=0)
if not status:
return os.path.dirname(output)
return None
def get_library_dirs(self):
opt = []
if sys.platform[:5] != 'linux':
d = self.get_libgcc_dir()
if d:
# if windows and not cygwin, libg2c lies in a different folder
if sys.platform == 'win32' and not d.startswith('/usr/lib'):
d = os.path.normpath(d)
path = os.path.join(d, "lib%s.a" % self.g2c)
if not os.path.exists(path):
root = os.path.join(d, *((os.pardir,)*4))
d2 = os.path.abspath(os.path.join(root, 'lib'))
path = os.path.join(d2, "lib%s.a" % self.g2c)
if os.path.exists(path):
opt.append(d2)
opt.append(d)
return opt
def get_libraries(self):
opt = []
d = self.get_libgcc_dir()
if d is not None:
g2c = self.g2c + '-pic'
f = self.static_lib_format % (g2c, self.static_lib_extension)
if not os.path.isfile(os.path.join(d, f)):
g2c = self.g2c
else:
g2c = self.g2c
if g2c is not None:
opt.append(g2c)
c_compiler = self.c_compiler
if sys.platform == 'win32' and c_compiler and \
c_compiler.compiler_type == 'msvc':
# the following code is not needed (read: breaks) when using MinGW
# in case want to link F77 compiled code with MSVC
opt.append('gcc')
runtime_lib = msvc_runtime_library()
if runtime_lib:
opt.append(runtime_lib)
if sys.platform == 'darwin':
opt.append('cc_dynamic')
return opt
def get_flags_debug(self):
return ['-g']
def get_flags_opt(self):
v = self.get_version()
if v and v <= '3.3.3':
# With this compiler version building Fortran BLAS/LAPACK
# with -O3 caused failures in lib.lapack heevr,syevr tests.
opt = ['-O2']
else:
opt = ['-O3']
opt.append('-funroll-loops')
return opt
def _c_arch_flags(self):
""" Return detected arch flags from CFLAGS """
from distutils import sysconfig
try:
cflags = sysconfig.get_config_vars()['CFLAGS']
except KeyError:
return []
arch_re = re.compile(r"-arch\s+(\w+)")
arch_flags = []
for arch in arch_re.findall(cflags):
arch_flags += ['-arch', arch]
return arch_flags
def get_flags_arch(self):
return []
def runtime_library_dir_option(self, dir):
return '-Wl,-rpath="%s"' % dir
class Gnu95FCompiler(GnuFCompiler):
compiler_type = 'gnu95'
compiler_aliases = ('gfortran',)
description = 'GNU Fortran 95 compiler'
def version_match(self, version_string):
v = self.gnu_version_match(version_string)
if not v or v[0] != 'gfortran':
return None
v = v[1]
if v >= '4.':
# gcc-4 series releases do not support -mno-cygwin option
pass
else:
# use -mno-cygwin flag for gfortran when Python is not
# Cygwin-Python
if sys.platform == 'win32':
for key in ['version_cmd', 'compiler_f77', 'compiler_f90',
'compiler_fix', 'linker_so', 'linker_exe']:
self.executables[key].append('-mno-cygwin')
return v
possible_executables = ['gfortran', 'f95']
executables = {
'version_cmd' : ["<F90>", "-dumpversion"],
'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form",
"-fno-second-underscore"] + _EXTRAFLAGS,
'compiler_f90' : [None, "-Wall", "-g",
"-fno-second-underscore"] + _EXTRAFLAGS,
'compiler_fix' : [None, "-Wall", "-g","-ffixed-form",
"-fno-second-underscore"] + _EXTRAFLAGS,
'linker_so' : ["<F90>", "-Wall", "-g"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-Wall"]
}
module_dir_switch = '-J'
module_include_switch = '-I'
g2c = 'gfortran'
def _universal_flags(self, cmd):
"""Return a list of -arch flags for every supported architecture."""
if not sys.platform == 'darwin':
return []
arch_flags = []
# get arches the C compiler gets.
c_archs = self._c_arch_flags()
if "i386" in c_archs:
c_archs[c_archs.index("i386")] = "i686"
# check the arches the Fortran compiler supports, and compare with
# arch flags from C compiler
for arch in ["ppc", "i686", "x86_64", "ppc64"]:
if _can_target(cmd, arch) and arch in c_archs:
arch_flags.extend(["-arch", arch])
return arch_flags
def get_flags(self):
flags = GnuFCompiler.get_flags(self)
arch_flags = self._universal_flags(self.compiler_f90)
if arch_flags:
flags[:0] = arch_flags
return flags
def get_flags_linker_so(self):
flags = GnuFCompiler.get_flags_linker_so(self)
arch_flags = self._universal_flags(self.linker_so)
if arch_flags:
flags[:0] = arch_flags
return flags
def get_library_dirs(self):
opt = GnuFCompiler.get_library_dirs(self)
if sys.platform == 'win32':
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
target = self.get_target()
if target:
d = os.path.normpath(self.get_libgcc_dir())
root = os.path.join(d, *((os.pardir,)*4))
path = os.path.join(root, target, "lib")
mingwdir = os.path.normpath(path)
if os.path.exists(os.path.join(mingwdir, "libmingwex.a")):
opt.append(mingwdir)
return opt
def get_libraries(self):
opt = GnuFCompiler.get_libraries(self)
if sys.platform == 'darwin':
opt.remove('cc_dynamic')
if sys.platform == 'win32':
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
if "gcc" in opt:
i = opt.index("gcc")
opt.insert(i+1, "mingwex")
opt.insert(i+1, "mingw32")
# XXX: fix this mess, does not work for mingw
if is_win64():
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
return []
else:
pass
return opt
def get_target(self):
status, output = exec_command(self.compiler_f77 +
['-v'],
use_tee=0)
if not status:
m = TARGET_R.search(output)
if m:
return m.group(1)
return ""
def get_flags_opt(self):
if is_win64():
return ['-O0']
else:
return GnuFCompiler.get_flags_opt(self)
def _can_target(cmd, arch):
"""Return true if the architecture supports the -arch flag"""
newcmd = cmd[:]
fid, filename = tempfile.mkstemp(suffix=".f")
try:
d = os.path.dirname(filename)
output = os.path.splitext(filename)[0] + ".o"
try:
newcmd.extend(["-arch", arch, "-c", filename])
p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d)
p.communicate()
return p.returncode == 0
finally:
if os.path.exists(output):
os.remove(output)
finally:
os.remove(filename)
return False
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
compiler = GnuFCompiler()
compiler.customize()
print(compiler.get_version())
try:
compiler = Gnu95FCompiler()
compiler.customize()
print(compiler.get_version())
except Exception:
msg = get_exception()
print(msg)
|
|
"""
The MIT License
Copyright (c) 2008 Gilad Raphaelli <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
### Changelog:
### v1.0.0 - 2012-05-18
### * Brighthouse columnar database "Infobright" module, derived from mysqld module
###
### Requires:
### * yum install Infobright-python
### Copyright Bob Webber, 2012
### License to use, modify, and distribute under the GPL
### http://www.gnu.org/licenses/gpl.txt
import time
import MySQLdb
import logging
descriptors = []
logging.basicConfig(level=logging.ERROR, format="%(asctime)s - %(name)s - %(levelname)s\t Thread-%(thread)d - %(message)s", filename='/tmp/infobrightstats.log', filemode='w')
logging.debug('starting up')
last_update = 0
infobright_conn_opts = {}
infobright_stats = {}
infobright_stats_last = {}
delta_per_second = False
REPORT_BRIGHTHOUSE = True
REPORT_BRIGHTHOUSE_ENGINE = False
REPORT_MASTER = True
REPORT_SLAVE = True
MAX_UPDATE_TIME = 15
def update_stats(get_brighthouse=True, get_brighthouse_engine=True, get_master=True, get_slave=True):
"""
"""
logging.debug('updating stats')
global last_update
global infobright_stats, infobright_stats_last
cur_time = time.time()
time_delta = cur_time - last_update
if time_delta <= 0:
#we went backward in time.
logging.debug(" system clock set backwards, probably ntp")
if cur_time - last_update < MAX_UPDATE_TIME:
logging.debug(' wait ' + str(int(MAX_UPDATE_TIME - (cur_time - last_update))) + ' seconds')
return True
else:
last_update = cur_time
logging.debug('refreshing stats')
infobright_stats = {}
# Get info from DB
try:
conn = MySQLdb.connect(**infobright_conn_opts)
cursor = conn.cursor(MySQLdb.cursors.DictCursor)
cursor.execute("SELECT GET_LOCK('gmetric-infobright', 0) as ok")
lock_stat = cursor.fetchone()
cursor.close()
if lock_stat['ok'] == 0:
return False
# infobright variables have 'brighthouse_ib_' or 'brighthouse_ini_' prefix
cursor = conn.cursor(MySQLdb.cursors.Cursor)
cursor.execute("SHOW VARIABLES")
#variables = dict(((k.lower(), v) for (k,v) in cursor))
variables = {}
for (k,v) in cursor:
variables[k.lower()] = v
cursor.close()
# infobright status values have 'bh_gdc_' or 'bh_mm_' prefix
cursor = conn.cursor(MySQLdb.cursors.Cursor)
# cursor.execute("SHOW /*!50002 GLOBAL */ STATUS")
cursor.execute("SHOW GLOBAL STATUS")
#global_status = dict(((k.lower(), v) for (k,v) in cursor))
global_status = {}
for (k,v) in cursor:
# print k, v
global_status[k.lower()] = v
cursor.close()
# try not to fail ?
# BRIGHTHOUSE ENGINE status variables are pretty obscure
get_brighthouse_engine = get_brighthouse_engine and variables.has_key('brighthouse_ini_controlmessages')
get_master = get_master and variables['log_bin'].lower() == 'on'
if get_brighthouse_engine:
logging.warn('get_brighthouse_engine status not implemented')
master_logs = tuple
if get_master:
cursor = conn.cursor(MySQLdb.cursors.Cursor)
cursor.execute("SHOW MASTER LOGS")
master_logs = cursor.fetchall()
cursor.close()
slave_status = {}
if get_slave:
cursor = conn.cursor(MySQLdb.cursors.DictCursor)
cursor.execute("SHOW SLAVE STATUS")
res = cursor.fetchone()
if res:
for (k,v) in res.items():
slave_status[k.lower()] = v
else:
get_slave = False
cursor.close()
cursor = conn.cursor(MySQLdb.cursors.DictCursor)
cursor.execute("SELECT RELEASE_LOCK('gmetric-infobright') as ok")
cursor.close()
conn.close()
except MySQLdb.OperationalError, (errno, errmsg):
logging.error('error updating stats')
logging.error(errmsg)
return False
# process variables
# http://dev.infobright.com/doc/refman/5.0/en/server-system-variables.html
infobright_stats['version'] = variables['version']
infobright_stats['max_connections'] = variables['max_connections']
infobright_stats['query_cache_size'] = variables['query_cache_size']
# process mysql status
# http://www.infobright.com/
interesting_mysql_status_vars = (
'aborted_clients',
'aborted_connects',
'binlog_cache_disk_use',
'binlog_cache_use',
'bytes_received',
'bytes_sent',
'com_delete',
'com_delete_multi',
'com_insert',
'com_insert_select',
'com_load',
'com_replace',
'com_replace_select',
'com_select',
'com_update',
'com_update_multi',
'connections',
'created_tmp_disk_tables',
'created_tmp_files',
'created_tmp_tables',
'key_reads',
'key_read_requests',
'key_writes',
'key_write_requests',
'max_used_connections',
'open_files',
'open_tables',
'opened_tables',
'qcache_free_blocks',
'qcache_free_memory',
'qcache_hits',
'qcache_inserts',
'qcache_lowmem_prunes',
'qcache_not_cached',
'qcache_queries_in_cache',
'qcache_total_blocks',
'questions',
'select_full_join',
'select_full_range_join',
'select_range',
'select_range_check',
'select_scan',
'slave_open_temp_tables',
'slave_retried_transactions',
'slow_launch_threads',
'slow_queries',
'sort_range',
'sort_rows',
'sort_scan',
'table_locks_immediate',
'table_locks_waited',
'threads_cached',
'threads_connected',
'threads_created',
'threads_running',
'uptime',
)
non_delta_mysql_status_vars = (
'max_used_connections',
'open_files',
'open_tables',
'qcache_free_blocks',
'qcache_free_memory',
'qcache_total_blocks',
'slave_open_temp_tables',
'threads_cached',
'threads_connected',
'threads_running',
'uptime'
)
interesting_brighthouse_status_vars = (
'bh_gdc_false_wakeup',
'bh_gdc_hits',
'bh_gdc_load_errors',
'bh_gdc_misses',
'bh_gdc_pack_loads',
'bh_gdc_prefetched',
'bh_gdc_readwait',
'bh_gdc_read_wait_in_progress',
'bh_gdc_redecompress',
'bh_gdc_released',
'bh_gdc_released',
'bh_mm_alloc_blocs',
'bh_mm_alloc_objs',
'bh_mm_alloc_pack_size',
'bh_mm_alloc_packs',
'bh_mm_alloc_size',
'bh_mm_alloc_temp',
'bh_mm_alloc_temp_size',
'bh_mm_free_blocks',
'bh_mm_free_pack_size',
'bh_mm_free_packs',
'bh_mm_free_size',
'bh_mm_free_temp',
'bh_mm_free_temp_size',
'bh_mm_freeable',
'bh_mm_release1',
'bh_mm_release2',
'bh_mm_release3',
'bh_mm_release4',
'bh_mm_reloaded',
'bh_mm_scale',
'bh_mm_unfreeable',
'bh_readbytes',
'bh_readcount',
'bh_writebytes',
'bh_writecount',
)
non_delta_brighthouse_status_vars = (
'bh_gdc_read_wait_in_progress',
'bh_mm_alloc_size',
'bh_mm_alloc_temp_size',
'bh_mm_free_pack_size',
'bh_mm_scale',
)
# don't put all of global_status in infobright_stats b/c it's so big
all_interesting_status_vars = interesting_mysql_status_vars + interesting_brighthouse_status_vars
all_non_delta_status_vars = non_delta_mysql_status_vars + non_delta_brighthouse_status_vars
for key in all_interesting_status_vars:
if key in all_non_delta_status_vars:
infobright_stats[key] = global_status[key]
else:
# Calculate deltas for counters
if time_delta <= 0:
#systemclock was set backwards, not updating values.. to smooth over the graphs
pass
elif key in infobright_stats_last:
if delta_per_second:
infobright_stats[key] = (int(global_status[key]) - int(infobright_stats_last[key])) / time_delta
else:
infobright_stats[key] = int(global_status[key]) - int(infobright_stats_last[key])
else:
infobright_stats[key] = float(0)
infobright_stats_last[key] = global_status[key]
infobright_stats['open_files_used'] = int(global_status['open_files']) / int(variables['open_files_limit'])
# process master logs
if get_master:
infobright_stats['binlog_count'] = len(master_logs)
infobright_stats['binlog_space_current'] = master_logs[-1][1]
#infobright_stats['binlog_space_total'] = sum((long(s[1]) for s in master_logs))
infobright_stats['binlog_space_total'] = 0
for s in master_logs:
infobright_stats['binlog_space_total'] += int(s[1])
infobright_stats['binlog_space_used'] = float(master_logs[-1][1]) / float(variables['max_binlog_size']) * 100
# process slave status
if get_slave:
infobright_stats['slave_exec_master_log_pos'] = slave_status['exec_master_log_pos']
#infobright_stats['slave_io'] = 1 if slave_status['slave_io_running'].lower() == "yes" else 0
if slave_status['slave_io_running'].lower() == "yes":
infobright_stats['slave_io'] = 1
else:
infobright_stats['slave_io'] = 0
#infobright_stats['slave_sql'] = 1 if slave_status['slave_sql_running'].lower() =="yes" else 0
if slave_status['slave_sql_running'].lower() == "yes":
infobright_stats['slave_sql'] = 1
else:
infobright_stats['slave_sql'] = 0
infobright_stats['slave_lag'] = slave_status['seconds_behind_master']
infobright_stats['slave_relay_log_pos'] = slave_status['relay_log_pos']
infobright_stats['slave_relay_log_space'] = slave_status['relay_log_space']
logging.debug('success updating stats')
logging.debug('infobright_stats: ' + str(infobright_stats))
def get_stat(name):
logging.info("getting stat: %s" % name)
global infobright_stats
#logging.debug(infobright_stats)
global REPORT_BRIGHTHOUSE
global REPORT_BRIGHTHOUSE_ENGINE
global REPORT_MASTER
global REPORT_SLAVE
ret = update_stats(REPORT_BRIGHTHOUSE, REPORT_BRIGHTHOUSE_ENGINE, REPORT_MASTER, REPORT_SLAVE)
if ret:
if name.startswith('infobright_'):
# note that offset depends on length of "startswith"
label = name[11:]
else:
label = name
logging.debug("fetching %s" % name)
try:
return infobright_stats[label]
except:
logging.error("failed to fetch %s" % name)
return 0
else:
return 0
def metric_init(params):
global descriptors
global infobright_conn_opts
global infobright_stats
global delta_per_second
global REPORT_BRIGHTHOUSE
global REPORT_BRIGHTHOUSE_ENGINE
global REPORT_MASTER
global REPORT_SLAVE
REPORT_BRIGHTHOUSE = str(params.get('get_brighthouse', True)) == "True"
REPORT_BRIGHTHOUSE_ENGINE = str(params.get('get_brighthouse_engine', True)) == "True"
REPORT_MASTER = str(params.get('get_master', True)) == "True"
REPORT_SLAVE = str(params.get('get_slave', True)) == "True"
logging.debug("init: " + str(params))
infobright_conn_opts = dict(
user = params.get('user'),
passwd = params.get('passwd'),
unix_socket = params.get('unix_socket', '/tmp/mysql-ib.sock'),
connect_timeout = params.get('timeout', 30),
)
if params.get('host', '') != '':
infobright_conn_opts['host'] = params.get('host')
if params.get('port', 5029) != 5029:
infobright_conn_opts['port'] = params.get('port')
if params.get("delta_per_second", '') != '':
delta_per_second = True
mysql_stats_descriptions = {}
master_stats_descriptions = {}
brighthouse_stats_descriptions = {}
slave_stats_descriptions = {}
mysql_stats_descriptions = dict(
aborted_clients = {
'description': 'The number of connections that were aborted because the client died without closing the connection properly',
'value_type': 'float',
'units': 'clients',
},
aborted_connects = {
'description': 'The number of failed attempts to connect to the Infobright server',
'value_type': 'float',
'units': 'conns',
},
binlog_cache_disk_use = {
'description': 'The number of transactions that used the temporary binary log cache but that exceeded the value of binlog_cache_size and used a temporary file to store statements from the transaction',
'value_type': 'float',
'units': 'txns',
},
binlog_cache_use = {
'description': ' The number of transactions that used the temporary binary log cache',
'value_type': 'float',
'units': 'txns',
},
bytes_received = {
'description': 'The number of bytes received from all clients',
'value_type': 'float',
'units': 'bytes',
},
bytes_sent = {
'description': ' The number of bytes sent to all clients',
'value_type': 'float',
'units': 'bytes',
},
com_delete = {
'description': 'The number of DELETE statements',
'value_type': 'float',
'units': 'stmts',
},
com_delete_multi = {
'description': 'The number of multi-table DELETE statements',
'value_type': 'float',
'units': 'stmts',
},
com_insert = {
'description': 'The number of INSERT statements',
'value_type': 'float',
'units': 'stmts',
},
com_insert_select = {
'description': 'The number of INSERT ... SELECT statements',
'value_type': 'float',
'units': 'stmts',
},
com_load = {
'description': 'The number of LOAD statements',
'value_type': 'float',
'units': 'stmts',
},
com_replace = {
'description': 'The number of REPLACE statements',
'value_type': 'float',
'units': 'stmts',
},
com_replace_select = {
'description': 'The number of REPLACE ... SELECT statements',
'value_type': 'float',
'units': 'stmts',
},
com_select = {
'description': 'The number of SELECT statements',
'value_type': 'float',
'units': 'stmts',
},
com_update = {
'description': 'The number of UPDATE statements',
'value_type': 'float',
'units': 'stmts',
},
com_update_multi = {
'description': 'The number of multi-table UPDATE statements',
'value_type': 'float',
'units': 'stmts',
},
connections = {
'description': 'The number of connection attempts (successful or not) to the Infobright server',
'value_type': 'float',
'units': 'conns',
},
created_tmp_disk_tables = {
'description': 'The number of temporary tables on disk created automatically by the server while executing statements',
'value_type': 'float',
'units': 'tables',
},
created_tmp_files = {
'description': 'The number of temporary files Infobrights mysqld has created',
'value_type': 'float',
'units': 'files',
},
created_tmp_tables = {
'description': 'The number of in-memory temporary tables created automatically by the server while executing statement',
'value_type': 'float',
'units': 'tables',
},
#TODO in graphs: key_read_cache_miss_rate = key_reads / key_read_requests
key_read_requests = {
'description': 'The number of requests to read a key block from the cache',
'value_type': 'float',
'units': 'reqs',
},
key_reads = {
'description': 'The number of physical reads of a key block from disk',
'value_type': 'float',
'units': 'reads',
},
key_write_requests = {
'description': 'The number of requests to write a key block to the cache',
'value_type': 'float',
'units': 'reqs',
},
key_writes = {
'description': 'The number of physical writes of a key block to disk',
'value_type': 'float',
'units': 'writes',
},
max_used_connections = {
'description': 'The maximum number of connections that have been in use simultaneously since the server started',
'units': 'conns',
'slope': 'both',
},
open_files = {
'description': 'The number of files that are open',
'units': 'files',
'slope': 'both',
},
open_tables = {
'description': 'The number of tables that are open',
'units': 'tables',
'slope': 'both',
},
# If Opened_tables is big, your table_cache value is probably too small.
opened_tables = {
'description': 'The number of tables that have been opened',
'value_type': 'float',
'units': 'tables',
},
qcache_free_blocks = {
'description': 'The number of free memory blocks in the query cache',
'units': 'blocks',
'slope': 'both',
},
qcache_free_memory = {
'description': 'The amount of free memory for the query cache',
'units': 'bytes',
'slope': 'both',
},
qcache_hits = {
'description': 'The number of query cache hits',
'value_type': 'float',
'units': 'hits',
},
qcache_inserts = {
'description': 'The number of queries added to the query cache',
'value_type': 'float',
'units': 'queries',
},
qcache_lowmem_prunes = {
'description': 'The number of queries that were deleted from the query cache because of low memory',
'value_type': 'float',
'units': 'queries',
},
qcache_not_cached = {
'description': 'The number of non-cached queries (not cacheable, or not cached due to the query_cache_type setting)',
'value_type': 'float',
'units': 'queries',
},
qcache_queries_in_cache = {
'description': 'The number of queries registered in the query cache',
'value_type': 'float',
'units': 'queries',
},
qcache_total_blocks = {
'description': 'The total number of blocks in the query cache',
'units': 'blocks',
},
questions = {
'description': 'The number of statements that clients have sent to the server',
'value_type': 'float',
'units': 'stmts',
},
# If this value is not 0, you should carefully check the indexes of your tables.
select_full_join = {
'description': 'The number of joins that perform table scans because they do not use indexes',
'value_type': 'float',
'units': 'joins',
},
select_full_range_join = {
'description': 'The number of joins that used a range search on a reference table',
'value_type': 'float',
'units': 'joins',
},
select_range = {
'description': 'The number of joins that used ranges on the first table',
'value_type': 'float',
'units': 'joins',
},
# If this is not 0, you should carefully check the indexes of your tables.
select_range_check = {
'description': 'The number of joins without keys that check for key usage after each row',
'value_type': 'float',
'units': 'joins',
},
select_scan = {
'description': 'The number of joins that did a full scan of the first table',
'value_type': 'float',
'units': 'joins',
},
slave_open_temp_tables = {
'description': 'The number of temporary tables that the slave SQL thread currently has open',
'value_type': 'float',
'units': 'tables',
'slope': 'both',
},
slave_retried_transactions = {
'description': 'The total number of times since startup that the replication slave SQL thread has retried transactions',
'value_type': 'float',
'units': 'count',
},
slow_launch_threads = {
'description': 'The number of threads that have taken more than slow_launch_time seconds to create',
'value_type': 'float',
'units': 'threads',
},
slow_queries = {
'description': 'The number of queries that have taken more than long_query_time seconds',
'value_type': 'float',
'units': 'queries',
},
sort_range = {
'description': 'The number of sorts that were done using ranges',
'value_type': 'float',
'units': 'sorts',
},
sort_rows = {
'description': 'The number of sorted rows',
'value_type': 'float',
'units': 'rows',
},
sort_scan = {
'description': 'The number of sorts that were done by scanning the table',
'value_type': 'float',
'units': 'sorts',
},
table_locks_immediate = {
'description': 'The number of times that a request for a table lock could be granted immediately',
'value_type': 'float',
'units': 'count',
},
# If this is high and you have performance problems, you should first optimize your queries, and then either split your table or tables or use replication.
table_locks_waited = {
'description': 'The number of times that a request for a table lock could not be granted immediately and a wait was needed',
'value_type': 'float',
'units': 'count',
},
threads_cached = {
'description': 'The number of threads in the thread cache',
'units': 'threads',
'slope': 'both',
},
threads_connected = {
'description': 'The number of currently open connections',
'units': 'threads',
'slope': 'both',
},
#TODO in graphs: The cache miss rate can be calculated as Threads_created/Connections
# Threads_created is big, you may want to increase the thread_cache_size value.
threads_created = {
'description': 'The number of threads created to handle connections',
'value_type': 'float',
'units': 'threads',
},
threads_running = {
'description': 'The number of threads that are not sleeping',
'units': 'threads',
'slope': 'both',
},
uptime = {
'description': 'The number of seconds that the server has been up',
'units': 'secs',
'slope': 'both',
},
version = {
'description': "Infobright uses MySQL Version",
'value_type': 'string',
'format': '%s',
},
max_connections = {
'description': "The maximum permitted number of simultaneous client connections",
'slope': 'zero',
},
query_cache_size = {
'description': "The amount of memory allocated for caching query results",
'slope': 'zero',
},
)
brighthouse_stats_descriptions = dict(
bh_gdc_read_wait_in_progress = {
'description': "The number of current read waits in Brighthouse tables.",
'slope': 'zero',
},
bh_mm_alloc_size = {
'description': "The Brighthouse memory allocation size.",
'slope': 'zero',
},
bh_mm_alloc_temp_size = {
'description': "Brighthouse memory allocation temp size.",
'slope': 'zero',
},
bh_mm_free_pack_size = {
'description': "Brighthouse memory free pack size.",
'slope': 'zero',
},
bh_mm_scale = {
'description': "Brighthouse memory scale.",
'slope': 'zero',
},
bh_gdc_false_wakeup = {
'description': "BrightHouse gdc false wakeup",
'value_type':'float',
'units': 'fwkups',
'slope': 'both',
},
bh_gdc_hits = {
'description': "BrightHouse gdc hits",
'value_type':'float',
'units': 'hits',
'slope': 'both',
},
bh_gdc_load_errors = {
'description': "BrightHouse gdc load errors",
'value_type':'float',
'units': 'lderrs',
'slope': 'both',
},
bh_gdc_misses = {
'description': "BrightHouse gdc misses",
'value_type':'float',
'units': 'misses',
'slope': 'both',
},
bh_gdc_pack_loads = {
'description': "BrightHouse gdc pack loads",
'value_type':'float',
'units': 'pklds',
'slope': 'both',
},
bh_gdc_prefetched = {
'description': "BrightHouse gdc prefetched",
'value_type':'float',
'units': 'prftchs',
'slope': 'both',
},
# bh_gdc_read_wait_in_progress = {
# 'description': "BrightHouse gdc in read wait",
# 'value_type':'uint',
# 'units': 'inrdwt',
# 'slope': 'both',
# },
bh_gdc_readwait = {
'description': "BrightHouse gdc read waits",
'value_type':'float',
'units': 'rdwts',
'slope': 'both',
},
bh_gdc_redecompress = {
'description': "BrightHouse gdc redecompress",
'value_type':'float',
'units': 'rdcmprs',
'slope': 'both',
},
bh_gdc_released = {
'description': "BrightHouse gdc released",
'value_type':'float',
'units': 'rlss',
'slope': 'both',
},
bh_mm_alloc_blocs = {
'description': "BrightHouse mm allocated blocks",
'value_type':'float',
'units': 'blocks',
'slope': 'both',
},
bh_mm_alloc_objs = {
'description': "BrightHouse mm allocated objects",
'value_type':'float',
'units': 'objs',
'slope': 'both',
},
bh_mm_alloc_pack_size = {
'description': "BrightHouse mm allocated pack size",
'value_type':'float',
'units': 'pksz',
'slope': 'both',
},
bh_mm_alloc_packs = {
'description': "BrightHouse mm allocated packs",
'value_type':'float',
'units': 'packs',
'slope': 'both',
},
bh_mm_alloc_temp = {
'description': "BrightHouse mm allocated temp",
'value_type':'float',
'units': 'temps',
'slope': 'both',
},
bh_mm_free_blocks = {
'description': "BrightHouse mm free blocks",
'value_type':'float',
'units': 'blocks',
'slope': 'both',
},
bh_mm_free_packs = {
'description': "BrightHouse mm free packs",
'value_type':'float',
'units': 'packs',
'slope': 'both',
},
bh_mm_free_size = {
'description': "BrightHouse mm free size",
'value_type':'float',
'units': 'szunits',
'slope': 'both',
},
bh_mm_free_temp = {
'description': "BrightHouse mm free temp",
'value_type':'float',
'units': 'tmps',
'slope': 'both',
},
bh_mm_free_temp_size = {
'description': "BrightHouse mm temp size",
'value_type':'float',
'units': 'tmpunits',
'slope': 'both',
},
bh_mm_freeable = {
'description': "BrightHouse mm freeable",
'value_type':'float',
'units': 'allocunits',
'slope': 'both',
},
bh_mm_release1 = {
'description': "BrightHouse mm release1",
'value_type':'float',
'units': 'relunits',
'slope': 'both',
},
bh_mm_release2 = {
'description': "BrightHouse mm release2",
'value_type':'float',
'units': 'relunits',
'slope': 'both',
},
bh_mm_release3 = {
'description': "BrightHouse mm release3",
'value_type':'float',
'units': 'relunits',
'slope': 'both',
},
bh_mm_release4 = {
'description': "BrightHouse mm release4",
'value_type':'float',
'units': 'relunits',
'slope': 'both',
},
bh_mm_reloaded = {
'description': "BrightHouse mm reloaded",
'value_type':'float',
'units': 'reloads',
'slope': 'both',
},
bh_mm_unfreeable = {
'description': "BrightHouse mm unfreeable",
'value_type':'uint',
'units': 'relunits',
'slope': 'both',
},
bh_readbytes = {
'description': "BrightHouse read bytes",
'value_type':'uint',
'units': 'bytes',
'slope': 'both',
},
bh_readcount = {
'description': "BrightHouse read count",
'value_type':'uint',
'units': 'reads',
'slope': 'both',
},
bh_writebytes = {
'description': "BrightHouse write bytes",
'value_type':'uint',
'units': 'bytes',
'slope': 'both',
},
bh_writecount = {
'description': "BrightHouse write count",
'value_type':'uint',
'units': 'writes',
'slope': 'both',
}
)
if REPORT_MASTER:
master_stats_descriptions = dict(
binlog_count = {
'description': "Number of binary logs",
'units': 'logs',
'slope': 'both',
},
binlog_space_current = {
'description': "Size of current binary log",
'units': 'bytes',
'slope': 'both',
},
binlog_space_total = {
'description': "Total space used by binary logs",
'units': 'bytes',
'slope': 'both',
},
binlog_space_used = {
'description': "Current binary log size / max_binlog_size",
'value_type': 'float',
'units': 'percent',
'slope': 'both',
},
)
if REPORT_SLAVE:
slave_stats_descriptions = dict(
slave_exec_master_log_pos = {
'description': "The position of the last event executed by the SQL thread from the master's binary log",
'units': 'bytes',
'slope': 'both',
},
slave_io = {
'description': "Whether the I/O thread is started and has connected successfully to the master",
'value_type': 'uint8',
'units': 'True/False',
'slope': 'both',
},
slave_lag = {
'description': "Replication Lag",
'units': 'secs',
'slope': 'both',
},
slave_relay_log_pos = {
'description': "The position up to which the SQL thread has read and executed in the current relay log",
'units': 'bytes',
'slope': 'both',
},
slave_sql = {
'description': "Slave SQL Running",
'value_type': 'uint8',
'units': 'True/False',
'slope': 'both',
},
)
update_stats(REPORT_BRIGHTHOUSE, REPORT_BRIGHTHOUSE_ENGINE, REPORT_MASTER, REPORT_SLAVE)
time.sleep(MAX_UPDATE_TIME)
update_stats(REPORT_BRIGHTHOUSE, REPORT_BRIGHTHOUSE_ENGINE, REPORT_MASTER, REPORT_SLAVE)
for stats_descriptions in (brighthouse_stats_descriptions, master_stats_descriptions, mysql_stats_descriptions, slave_stats_descriptions):
for label in stats_descriptions:
if infobright_stats.has_key(label):
format = '%u'
if stats_descriptions[label].has_key('value_type'):
if stats_descriptions[label]['value_type'] == "float":
format = '%f'
d = {
'name': 'infobright_' + label,
'call_back': get_stat,
'time_max': 60,
'value_type': "uint",
'units': "",
'slope': "both",
'format': format,
'description': "http://www.brighthouse.com",
'groups': 'infobright',
}
d.update(stats_descriptions[label])
descriptors.append(d)
else:
logging.error("skipped " + label)
#logging.debug(str(descriptors))
return descriptors
def metric_cleanup():
logging.shutdown()
# pass
if __name__ == '__main__':
from optparse import OptionParser
import os
logging.debug('running from cmd line')
parser = OptionParser()
parser.add_option("-H", "--Host", dest="host", help="Host running Infobright", default="localhost")
parser.add_option("-u", "--user", dest="user", help="user to connect as", default="")
parser.add_option("-p", "--password", dest="passwd", help="password", default="")
parser.add_option("-P", "--port", dest="port", help="port", default=3306, type="int")
parser.add_option("-S", "--socket", dest="unix_socket", help="unix_socket", default="")
parser.add_option("--no-brighthouse", dest="get_brighthouse", action="store_false", default=True)
parser.add_option("--no-brighthouse-engine", dest="get_brighthouse_engine", action="store_false", default=False)
parser.add_option("--no-master", dest="get_master", action="store_false", default=True)
parser.add_option("--no-slave", dest="get_slave", action="store_false", default=True)
parser.add_option("-b", "--gmetric-bin", dest="gmetric_bin", help="path to gmetric binary", default="/usr/bin/gmetric")
parser.add_option("-c", "--gmond-conf", dest="gmond_conf", help="path to gmond.conf", default="/etc/ganglia/gmond.conf")
parser.add_option("-g", "--gmetric", dest="gmetric", help="submit via gmetric", action="store_true", default=False)
parser.add_option("-q", "--quiet", dest="quiet", action="store_true", default=False)
(options, args) = parser.parse_args()
metric_init({
'host': options.host,
'passwd': options.passwd,
'user': options.user,
'port': options.port,
'get_brighthouse': options.get_brighthouse,
'get_brighthouse_engine': options.get_brighthouse_engine,
'get_master': options.get_master,
'get_slave': options.get_slave,
'unix_socket': options.unix_socket,
})
for d in descriptors:
v = d['call_back'](d['name'])
if not options.quiet:
print ' %s: %s %s [%s]' % (d['name'], v, d['units'], d['description'])
if options.gmetric:
if d['value_type'] == 'uint':
value_type = 'uint32'
else:
value_type = d['value_type']
cmd = "%s --conf=%s --value='%s' --units='%s' --type='%s' --name='%s' --slope='%s'" % \
(options.gmetric_bin, options.gmond_conf, v, d['units'], value_type, d['name'], d['slope'])
os.system(cmd)
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Demonstrates how to perform basic access management with Google Cloud IAM.
For more information, see the documentation at
https://cloud.google.com/iam/docs/granting-changing-revoking-access.
"""
import argparse
import os
from google.oauth2 import service_account
import googleapiclient.discovery
# [START iam_get_policy]
def get_policy(project_id, version=1):
"""Gets IAM policy for a project."""
credentials = service_account.Credentials.from_service_account_file(
filename=os.environ["GOOGLE_APPLICATION_CREDENTIALS"],
scopes=["https://www.googleapis.com/auth/cloud-platform"],
)
service = googleapiclient.discovery.build(
"cloudresourcemanager", "v1", credentials=credentials
)
policy = (
service.projects()
.getIamPolicy(
resource=project_id,
body={"options": {"requestedPolicyVersion": version}},
)
.execute()
)
print(policy)
return policy
# [END iam_get_policy]
# [START iam_modify_policy_add_member]
def modify_policy_add_member(policy, role, member):
"""Adds a new member to a role binding."""
binding = next(b for b in policy["bindings"] if b["role"] == role)
binding["members"].append(member)
print(binding)
return policy
# [END iam_modify_policy_add_member]
# [START iam_modify_policy_add_role]
def modify_policy_add_role(policy, role, member):
"""Adds a new role binding to a policy."""
binding = {"role": role, "members": [member]}
policy["bindings"].append(binding)
print(policy)
return policy
# [END iam_modify_policy_add_role]
# [START iam_modify_policy_remove_member]
def modify_policy_remove_member(policy, role, member):
"""Removes a member from a role binding."""
binding = next(b for b in policy["bindings"] if b["role"] == role)
if "members" in binding and member in binding["members"]:
binding["members"].remove(member)
print(binding)
return policy
# [END iam_modify_policy_remove_member]
# [START iam_set_policy]
def set_policy(project_id, policy):
"""Sets IAM policy for a project."""
credentials = service_account.Credentials.from_service_account_file(
filename=os.environ["GOOGLE_APPLICATION_CREDENTIALS"],
scopes=["https://www.googleapis.com/auth/cloud-platform"],
)
service = googleapiclient.discovery.build(
"cloudresourcemanager", "v1", credentials=credentials
)
policy = (
service.projects()
.setIamPolicy(resource=project_id, body={"policy": policy})
.execute()
)
print(policy)
return policy
# [END iam_set_policy]
# [START iam_test_permissions]
def test_permissions(project_id):
"""Tests IAM permissions of the caller"""
credentials = service_account.Credentials.from_service_account_file(
filename=os.environ["GOOGLE_APPLICATION_CREDENTIALS"],
scopes=["https://www.googleapis.com/auth/cloud-platform"],
)
service = googleapiclient.discovery.build(
"cloudresourcemanager", "v1", credentials=credentials
)
permissions = {
"permissions": [
"resourcemanager.projects.get",
"resourcemanager.projects.delete",
]
}
request = service.projects().testIamPermissions(
resource=project_id, body=permissions
)
returnedPermissions = request.execute()
print(returnedPermissions)
return returnedPermissions
# [END iam_test_permissions]
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
subparsers = parser.add_subparsers(dest="command")
# Get
get_parser = subparsers.add_parser("get", help=get_policy.__doc__)
get_parser.add_argument("project_id")
# Modify: add member
modify_member_parser = subparsers.add_parser(
"modify_member", help=get_policy.__doc__
)
modify_member_parser.add_argument("project_id")
modify_member_parser.add_argument("role")
modify_member_parser.add_argument("member")
# Modify: add role
modify_role_parser = subparsers.add_parser(
"modify_role", help=get_policy.__doc__
)
modify_role_parser.add_argument("project_id")
modify_role_parser.add_argument("project_id")
modify_role_parser.add_argument("role")
modify_role_parser.add_argument("member")
# Modify: remove member
modify_member_parser = subparsers.add_parser(
"modify_member", help=get_policy.__doc__
)
modify_member_parser.add_argument("project_id")
modify_member_parser.add_argument("role")
modify_member_parser.add_argument("member")
# Set
set_parser = subparsers.add_parser("set", help=set_policy.__doc__)
set_parser.add_argument("project_id")
set_parser.add_argument("policy")
# Test permissions
test_permissions_parser = subparsers.add_parser(
"test_permissions", help=get_policy.__doc__
)
test_permissions_parser.add_argument("project_id")
args = parser.parse_args()
if args.command == "get":
get_policy(args.project_id)
elif args.command == "set":
set_policy(args.project_id, args.policy)
elif args.command == "add_member":
modify_policy_add_member(args.policy, args.role, args.member)
elif args.command == "remove_member":
modify_policy_remove_member(args.policy, args.role, args.member)
elif args.command == "add_binding":
modify_policy_add_role(args.policy, args.role, args.member)
elif args.command == "test_permissions":
test_permissions(args.project_id)
if __name__ == "__main__":
main()
|
|
#!/usr/bin/python -Wall
# ================================================================
# Please see LICENSE.txt in the same directory as this file.
# John Kerl
# [email protected]
# 2007-05-31
# ================================================================
# Type module for the group of Pauli matrices.
import re
# sigmax = 0 1
# 1 0
#
# sigmay = 0 -i
# i 0
#
# sigmaz = 1 0
# 0 -1
# ----------------------------------------------------------------
def sanitize1(x):
if (type(x) == type(0)):
return x
elif (type(x) == type(0.0)):
return x
elif (x == x.conjugate()):
return x.real
else:
return x
# ----------------------------------------------------------------
class pauli_t:
def sanitize(self):
self.a = sanitize1(self.a)
self.b = sanitize1(self.b)
self.c = sanitize1(self.c)
self.d = sanitize1(self.d)
def __init__(self, a, b, c, d):
self.a = a
self.b = b
self.c = c
self.d = d
self.sanitize()
def __mul__(X,Y):
# a b a b
# c d c d
za = X.a*Y.a + X.b*Y.c
zb = X.a*Y.b + X.b*Y.d
zc = X.c*Y.a + X.d*Y.c
zd = X.c*Y.b + X.d*Y.d
Z = pauli_t(za, zb, zc, zd)
return Z
def __eq__(X,Y):
if (X.a != Y.a): return 0
if (X.b != Y.b): return 0
if (X.c != Y.c): return 0
if (X.d != Y.d): return 0
return 1
def __ne__(X,Y):
return not (X == Y)
def __lt__(X,Y):
if (X.a < Y.a): return 0
if (X.b < Y.b): return 0
if (X.c < Y.c): return 0
if (X.d < Y.d): return 0
return 1
def __le__(X,Y):
if (X.a <= Y.a): return 0
if (X.b <= Y.b): return 0
if (X.c <= Y.c): return 0
if (X.d <= Y.d): return 0
return 1
def __gt__(X,Y):
if (X.a > Y.a): return 0
if (X.b > Y.b): return 0
if (X.c > Y.c): return 0
if (X.d > Y.d): return 0
return 1
def __ge__(X,Y):
if (X.a >= Y.a): return 0
if (X.b >= Y.b): return 0
if (X.c >= Y.c): return 0
if (X.d >= Y.d): return 0
return 1
def inv(X):
det = X.a*X.d - X.b*X.c
Z = pauli_t(X.d/det, -X.b/det, -X.c/det, X.a/det)
return Z
# xxx stub
def scan(self, string):
if (string == "I"):
self.__init__(1,0,0,1)
elif (string == "sx"):
self.__init__(0,1,1,0)
elif (string == "sy"):
self.__init__(0,-1j,1j,0)
elif (string == "sz"):
self.__init__(1,0,0,-1)
# parse on slashes ...
else:
raise IOError
def __str__(self):
return str(self.a) + "/" + str(self.b) + "/" + str(self.c) + "/" + str(self.d)
def __repr__(self):
return self.__str__()
def params_from_string(params_string):
# xxx check empty
return 0
def from_string(value_string, params_string):
not_used = params_from_string(params_string)
obj = pauli_t(0,0,0,0)
obj.scan(value_string)
return obj
## ----------------------------------------------------------------
#from sackgrp import *
#X=from_string("sx",""); print X
#Y=from_string("sy",""); print Y
#Z=from_string("sz",""); print Z
#XX=X*X;print XX
#YY=Y*Y;print YY
#ZZ=Z*Z;print ZZ
#print
#G=[X,Y,Z]
#close_group(G)
#for g in G:
# print g
#print
#print_cayley_table(G)
#print
#orders = get_orders(G)
#n = len(G)
#for k in range(0, n):
# print G[k], orders[k]
# ================================================================
import unittest
if __name__ == '__main__':
class test_cases(unittest.TestCase):
def test_sanitize1(self):
pass # to be implemented
def test_sanitize(self):
pass # to be implemented
def test___init__(self):
pass # to be implemented
def test___mul__(self):
pass # to be implemented
def test___eq__(self):
pass # to be implemented
def test___ne__(self):
pass # to be implemented
def test___lt__(self):
pass # to be implemented
def test___le__(self):
pass # to be implemented
def test___gt__(self):
pass # to be implemented
def test___ge__(self):
pass # to be implemented
def test_inv(self):
pass # to be implemented
def test_scan(self):
pass # to be implemented
def test___str__(self):
pass # to be implemented
def test___repr__(self):
pass # to be implemented
def test_params_from_string(self):
pass # to be implemented
def test_from_string(self):
pass # to be implemented
# ----------------------------------------------------------------
unittest.main()
|
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Daniel Drizhuk, [email protected], 2017
# - Mario Lassnig, [email protected], 2017
# - Paul Nilsson, [email protected], 2017-2020
import collections
import subprocess # Python 2/3
try:
import commands # Python 2
except Exception:
pass
import json
import os
import platform
import ssl
import sys
try:
import urllib.request # Python 3
import urllib.error # Python 3
import urllib.parse # Python 3
except Exception:
import urllib # Python 2
import urllib2 # Python 2
import pipes
from .filehandling import write_file
from .auxiliary import is_python3
from .config import config
from .constants import get_pilot_version
import logging
logger = logging.getLogger(__name__)
_ctx = collections.namedtuple('_ctx', 'ssl_context user_agent capath cacert')
# anisyonk: public copy of `_ctx` to avoid logic break since ssl_context is reset inside the request() -- FIXME
# anisyonk: public instance, should be properly initialized by `https_setup()`
# anisyonk: use lightweight class definition instead of namedtuple since tuple is immutable and we don't need/use any tuple features here
ctx = type('ctx', (object,), dict(ssl_context=None, user_agent='Pilot2 client', capath=None, cacert=None))
def _tester(func, *args):
"""
Tests function ``func`` on arguments and returns first positive.
>>> _tester(lambda x: x%3 == 0, 1, 2, 3, 4, 5, 6)
3
>>> _tester(lambda x: x%3 == 0, 1, 2)
None
:param func: function(arg)->boolean
:param args: other arguments
:return: something or none
"""
for arg in args:
if arg is not None and func(arg):
return arg
return None
def capath(args=None):
"""
Tries to get :abbr:`CA (Certification Authority)` path with certificates.
Testifies it to be a directory.
Tries next locations:
1. :option:`--capath` from arguments
2. :envvar:`X509_CERT_DIR` from env
3. Path ``/etc/grid-security/certificates``
:param args: arguments, parsed by `argparse`
:returns: `str` -- directory path, or `None`
"""
return _tester(os.path.isdir,
args and args.capath,
os.environ.get('X509_CERT_DIR'),
'/etc/grid-security/certificates')
def cacert_default_location():
"""
Tries to get current user ID through `os.getuid`, and get the posix path for x509 certificate.
:returns: `str` -- posix default x509 path, or `None`
"""
try:
return '/tmp/x509up_u%s' % str(os.getuid())
except AttributeError:
logger.warn('No UID available? System not POSIX-compatible... trying to continue')
pass
return None
def cacert(args=None):
"""
Tries to get :abbr:`CA (Certification Authority)` certificate or X509 one.
Testifies it to be a regular file.
Tries next locations:
1. :option:`--cacert` from arguments
2. :envvar:`X509_USER_PROXY` from env
3. Path ``/tmp/x509up_uXXX``, where ``XXX`` refers to ``UID``
:param args: arguments, parsed by `argparse`
:returns: `str` -- certificate file path, or `None`
"""
return _tester(os.path.isfile,
args and args.cacert,
os.environ.get('X509_USER_PROXY'),
cacert_default_location())
def https_setup(args=None, version=None):
"""
Sets up the context for future HTTPS requests:
1. Selects the certificate paths
2. Sets up :mailheader:`User-Agent`
3. Tries to create `ssl.SSLContext` for future use (falls back to :command:`curl` if fails)
:param args: arguments, parsed by `argparse`
:param str version: pilot version string (for :mailheader:`User-Agent`)
"""
version = version or get_pilot_version()
_ctx.user_agent = 'pilot/%s (Python %s; %s %s)' % (version,
sys.version.split()[0],
platform.system(),
platform.machine())
logger.debug('User-Agent: %s' % _ctx.user_agent)
_ctx.capath = capath(args)
_ctx.cacert = cacert(args)
if sys.version_info < (2, 7, 9): # by anisyonk: actually SSL context should work, but prior to 2.7.9 there is no automatic hostname/certificate validation
logger.warn('Python version <2.7.9 lacks SSL contexts -- falling back to curl')
_ctx.ssl_context = None
else:
try:
_ctx.ssl_context = ssl.create_default_context(capath=_ctx.capath,
cafile=_ctx.cacert)
except Exception as e:
logger.warn('SSL communication is impossible due to SSL error: %s -- falling back to curl' % str(e))
_ctx.ssl_context = None
# anisyonk: clone `_ctx` to avoid logic break since ssl_context is reset inside the request() -- FIXME
ctx.capath = _ctx.capath
ctx.cacert = _ctx.cacert
ctx.user_agent = _ctx.user_agent
try:
ctx.ssl_context = ssl.create_default_context(capath=ctx.capath, cafile=ctx.cacert)
ctx.ssl_context.load_cert_chain(ctx.cacert)
except Exception as e: # redandant try-catch protection, should work well for both python2 & python3 -- CLEAN ME later (anisyonk)
logger.warn('Failed to initialize SSL context .. skipped, error: %s' % str(e))
def request(url, data=None, plain=False, secure=True):
"""
This function sends a request using HTTPS.
Sends :mailheader:`User-Agent` and certificates previously being set up by `https_setup`.
If `ssl.SSLContext` is available, uses `urllib2` as a request processor. Otherwise uses :command:`curl`.
If ``data`` is provided, encodes it as a URL form data and sends it to the server.
Treats the request as JSON unless a parameter ``plain`` is `True`.
If JSON is expected, sends ``Accept: application/json`` header.
:param string url: the URL of the resource
:param dict data: data to send
:param boolean plain: if true, treats the response as a plain text.
:param secure: Boolean (default: True, ie use certificates)
Usage:
.. code-block:: python
:emphasize-lines: 2
https_setup(args, PILOT_VERSION) # sets up ssl and other stuff
response = request('https://some.url', {'some':'data'})
Returns:
- :keyword:`dict` -- if everything went OK
- `str` -- if ``plain`` parameter is `True`
- `None` -- if something went wrong
"""
_ctx.ssl_context = None # certificates are not available on the grid, use curl
logger.debug('server update dictionary = \n%s' % str(data))
# get the filename and strdata for the curl config file
filename, strdata = get_vars(url, data)
# write the strdata to file
writestatus = write_file(filename, strdata)
# get the config option for the curl command
dat = get_curl_config_option(writestatus, url, data, filename)
if _ctx.ssl_context is None and secure:
req = get_curl_command(plain, dat)
try:
status, output = execute_request(req)
except Exception as e:
logger.warning('exception: %s' % e)
return None
else:
if status != 0:
logger.warn('request failed (%s): %s' % (status, output))
return None
# return output if plain otherwise return json.loads(output)
if plain:
return output
else:
try:
ret = json.loads(output)
except Exception as e:
logger.warning('json.loads() failed to parse output=%s: %s' % (output, e))
return None
else:
return ret
else:
req = execute_urllib(url, data, plain, secure)
context = _ctx.ssl_context if secure else None
if is_python3(): # Python 3
ec, output = get_urlopen_output(req, context)
if ec:
return None
else: # Python 2
ec, output = get_urlopen2_output(req, context)
if ec:
return None
return output.read() if plain else json.load(output)
def get_curl_command(plain, dat):
"""
Get the curl command.
:param plain:
:param dat: curl config option (string).
:return: curl command (string).
"""
req = 'curl -sS --compressed --connect-timeout %s --max-time %s '\
'--capath %s --cert %s --cacert %s --key %s '\
'-H %s %s %s' % (config.Pilot.http_connect_timeout, config.Pilot.http_maxtime,
pipes.quote(_ctx.capath or ''), pipes.quote(_ctx.cacert or ''),
pipes.quote(_ctx.cacert or ''), pipes.quote(_ctx.cacert or ''),
pipes.quote('User-Agent: %s' % _ctx.user_agent),
"-H " + pipes.quote('Accept: application/json') if not plain else '',
dat)
logger.info('request: %s' % req)
return req
def get_vars(url, data):
"""
Get the filename and strdata for the curl config file.
:param url: URL (string).
:param data: data to be written to file (dictionary).
:return: filename (string), strdata (string).
"""
strdata = ""
for key in data:
try:
strdata += 'data="%s"\n' % urllib.parse.urlencode({key: data[key]}) # Python 3
except Exception:
strdata += 'data="%s"\n' % urllib.urlencode({key: data[key]}) # Python 2
jobid = ''
if 'jobId' in list(data.keys()): # Python 2/3
jobid = '_%s' % data['jobId']
# write data to temporary config file
filename = '%s/curl_%s%s.config' % (os.getenv('PILOT_HOME'), os.path.basename(url), jobid)
return filename, strdata
def get_curl_config_option(writestatus, url, data, filename):
"""
Get the curl config option.
:param writestatus: status of write_file call (Boolean).
:param url: URL (string).
:param data: data structure (dictionary).
:param filename: file name of config file (string).
:return: config option (string).
"""
if not writestatus:
logger.warning('failed to create curl config file (will attempt to urlencode data directly)')
try:
dat = pipes.quote(url + '?' + urllib.parse.urlencode(data) if data else '') # Python 3
except Exception:
dat = pipes.quote(url + '?' + urllib.urlencode(data) if data else '') # Python 2
else:
dat = '--config %s %s' % (filename, url)
return dat
def execute_request(req):
"""
Execute the curl request.
:param req: curl request command (string).
:return: status (int), output (string).
"""
try:
status, output = subprocess.getstatusoutput(req) # Python 3
except Exception:
status, output = commands.getstatusoutput(req) # Python 2
return status, output
def execute_urllib(url, data, plain, secure):
"""
Execute the request using urllib.
:param url: URL (string).
:param data: data structure
:return: urllib request structure.
"""
try:
req = urllib.request.Request(url, urllib.parse.urlencode(data)) # Python 3
except Exception:
req = urllib2.Request(url, urllib.urlencode(data)) # Python 2
if not plain:
req.add_header('Accept', 'application/json')
if secure:
req.add_header('User-Agent', _ctx.user_agent)
return req
def get_urlopen_output(req, context):
"""
Get the output from the urlopen request.
:param req:
:param context:
:return: ec (int), output (string).
"""
ec = -1
output = ""
try:
output = urllib.request.urlopen(req, context=context)
except urllib.error.HTTPError as e:
logger.warn('server error (%s): %s' % (e.code, e.read()))
except urllib.error.URLError as e:
logger.warn('connection error: %s' % e.reason)
else:
ec = 0
return ec, output
def get_urlopen2_output(req, context):
"""
Get the output from the urlopen2 request.
:param req:
:param context:
:return: ec (int), output (string).
"""
ec = -1
output = ""
try:
output = urllib2.urlopen(req, context=context)
except urllib2.HTTPError as e:
logger.warn('server error (%s): %s' % (e.code, e.read()))
except urllib2.URLError as e:
logger.warn('connection error: %s' % e.reason)
else:
ec = 0
return ec, output
|
|
# -*- coding: utf-8 -*-
'''
mirrormanager2 tests.
'''
__requires__ = ['SQLAlchemy >= 0.7']
import pkg_resources
import unittest
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(
os.path.abspath(__file__)), '..'))
import mirrormanager2.lib
import tests
class MMLibtests(tests.Modeltests):
""" Collection tests. """
def test_query_directories(self):
""" Test the query_directories function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.query_directories(self.session)
self.assertEqual(len(results), 0)
tests.create_base_items(self.session)
tests.create_site(self.session)
tests.create_hosts(self.session)
tests.create_directory(self.session)
tests.create_category(self.session)
tests.create_hostcategory(self.session)
tests.create_hostcategoryurl(self.session)
tests.create_categorydirectory(self.session)
results = mirrormanager2.lib.query_directories(self.session)
self.assertEqual(len(results), 12)
def test_get_site(self):
""" Test the get_site function of mirrormanager2.lib. """
tests.create_site(self.session)
results = mirrormanager2.lib.get_site(self.session, 0)
self.assertEqual(results, None)
results = mirrormanager2.lib.get_site(self.session, 1)
self.assertEqual(results.name, 'test-mirror')
self.assertEqual(results.private, False)
self.assertEqual(results.created_by, 'pingou')
results = mirrormanager2.lib.get_site(self.session, 2)
self.assertEqual(results.name, 'test-mirror2')
self.assertEqual(results.private, False)
self.assertEqual(results.created_by, 'kevin')
results = mirrormanager2.lib.get_site(self.session, 3)
self.assertEqual(results.name, 'test-mirror_private')
self.assertEqual(results.private, True)
self.assertEqual(results.created_by, 'skvidal')
def test_get_site_by_name(self):
""" Test the get_site_by_name function of mirrormanager2.lib. """
tests.create_site(self.session)
results = mirrormanager2.lib.get_site_by_name(self.session, 'foo')
self.assertEqual(results, None)
results = mirrormanager2.lib.get_site_by_name(
self.session, 'test-mirror')
self.assertEqual(results.name, 'test-mirror')
self.assertEqual(results.private, False)
self.assertEqual(results.created_by, 'pingou')
results = mirrormanager2.lib.get_site_by_name(
self.session, 'test-mirror2')
self.assertEqual(results.name, 'test-mirror2')
self.assertEqual(results.private, False)
self.assertEqual(results.created_by, 'kevin')
def test_get_all_sites(self):
""" Test the get_all_sites function of mirrormanager2.lib. """
results = mirrormanager2.lib.get_all_sites(self.session)
self.assertEqual(results, [])
tests.create_site(self.session)
results = mirrormanager2.lib.get_all_sites(self.session)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].name, 'test-mirror')
self.assertEqual(results[1].name, 'test-mirror2')
self.assertEqual(results[2].name, 'test-mirror_private')
def test_get_siteadmin(self):
""" Test the get_siteadmin function of mirrormanager2.lib. """
results = mirrormanager2.lib.get_siteadmin(self.session, 1)
self.assertEqual(results, None)
tests.create_site(self.session)
results = mirrormanager2.lib.get_siteadmin(self.session, 1)
self.assertEqual(results, None)
tests.create_site_admin(self.session)
results = mirrormanager2.lib.get_siteadmin(self.session, 1)
self.assertEqual(results.site.name, 'test-mirror')
self.assertEqual(results.username, 'ralph')
results = mirrormanager2.lib.get_siteadmin(self.session, 4)
self.assertEqual(results.site.name, 'test-mirror2')
self.assertEqual(results.username, 'pingou')
def test_get_host(self):
""" Test the get_host function of mirrormanager2.lib. """
results = mirrormanager2.lib.get_host(self.session, 1)
self.assertEqual(results, None)
tests.create_site(self.session)
tests.create_hosts(self.session)
results = mirrormanager2.lib.get_host(self.session, 1)
self.assertEqual(results.name, 'mirror.localhost')
self.assertEqual(results.country, 'US')
results = mirrormanager2.lib.get_host(self.session, 2)
self.assertEqual(results.name, 'mirror2.localhost')
self.assertEqual(results.country, 'FR')
results = mirrormanager2.lib.get_host(self.session, 3)
self.assertEqual(results.name, 'private.localhost')
self.assertEqual(results.country, 'NL')
def test_get_hosts(self):
""" Test the get_hosts function of mirrormanager2.lib. """
results = mirrormanager2.lib.get_hosts(self.session)
self.assertEqual(results, [])
tests.create_site(self.session)
tests.create_hosts(self.session)
results = mirrormanager2.lib.get_hosts(self.session)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].name, 'mirror.localhost')
self.assertEqual(results[0].country, 'US')
self.assertEqual(results[1].name, 'mirror2.localhost')
self.assertEqual(results[1].country, 'FR')
self.assertEqual(results[2].name, 'private.localhost')
self.assertEqual(results[2].country, 'NL')
def test_get_host_acl_ip(self):
""" Test the get_host_acl_ip function of mirrormanager2.lib. """
results = mirrormanager2.lib.get_host_acl_ip(self.session, 1)
self.assertEqual(results, None)
tests.create_site(self.session)
tests.create_hosts(self.session)
tests.create_hostaclip(self.session)
results = mirrormanager2.lib.get_host_acl_ip(self.session, 1)
self.assertEqual(results.host.name, 'mirror.localhost')
self.assertEqual(results.host.country, 'US')
results = mirrormanager2.lib.get_host_acl_ip(self.session, 2)
self.assertEqual(results.host.name, 'mirror2.localhost')
self.assertEqual(results.host.country, 'FR')
def test_get_host_netblock(self):
""" Test the get_host_netblock function of mirrormanager2.lib. """
results = mirrormanager2.lib.get_host_netblock(self.session, 1)
self.assertEqual(results, None)
tests.create_site(self.session)
tests.create_hosts(self.session)
tests.create_hostnetblock(self.session)
results = mirrormanager2.lib.get_host_netblock(self.session, 1)
self.assertEqual(results.host.name, 'private.localhost')
self.assertEqual(results.host.country, 'NL')
results = mirrormanager2.lib.get_host_netblock(self.session, 2)
self.assertEqual(results, None)
def test_get_host_peer_asn(self):
""" Test the get_host_peer_asn function of mirrormanager2.lib. """
results = mirrormanager2.lib.get_host_peer_asn(self.session, 1)
self.assertEqual(results, None)
tests.create_site(self.session)
tests.create_hosts(self.session)
tests.create_hostpeerasn(self.session)
results = mirrormanager2.lib.get_host_peer_asn(self.session, 1)
self.assertEqual(results.host.name, 'private.localhost')
self.assertEqual(results.host.country, 'NL')
results = mirrormanager2.lib.get_host_peer_asn(self.session, 2)
self.assertEqual(results, None)
def test_get_host_country(self):
""" Test the get_host_country function of mirrormanager2.lib. """
results = mirrormanager2.lib.get_host_country(self.session, 1)
self.assertEqual(results, None)
tests.create_base_items(self.session)
tests.create_site(self.session)
tests.create_hosts(self.session)
tests.create_hostcountry(self.session)
results = mirrormanager2.lib.get_host_country(self.session, 1)
self.assertEqual(results.host.name, 'mirror.localhost')
self.assertEqual(results.host.country, 'US')
results = mirrormanager2.lib.get_host_country(self.session, 2)
self.assertEqual(results.host.name, 'mirror2.localhost')
self.assertEqual(results.host.country, 'FR')
results = mirrormanager2.lib.get_host_country(self.session, 3)
self.assertEqual(results, None)
def test_get_host_category(self):
""" Test the get_host_category function of mirrormanager2.lib. """
results = mirrormanager2.lib.get_host_category(self.session, 1)
self.assertEqual(results, None)
tests.create_base_items(self.session)
tests.create_site(self.session)
tests.create_hosts(self.session)
tests.create_directory(self.session)
tests.create_category(self.session)
tests.create_hostcategory(self.session)
results = mirrormanager2.lib.get_host_category(self.session, 1)
self.assertEqual(results.host.name, 'mirror.localhost')
self.assertEqual(results.host.country, 'US')
results = mirrormanager2.lib.get_host_category(self.session, 2)
self.assertEqual(results.host.name, 'mirror.localhost')
self.assertEqual(results.host.country, 'US')
results = mirrormanager2.lib.get_host_category(self.session, 3)
self.assertEqual(results.host.name, 'mirror2.localhost')
self.assertEqual(results.host.country, 'FR')
results = mirrormanager2.lib.get_host_category(self.session, 4)
self.assertEqual(results.host.name, 'mirror2.localhost')
self.assertEqual(results.host.country, 'FR')
results = mirrormanager2.lib.get_host_category(self.session, 5)
self.assertEqual(results, None)
def test_get_host_category_by_hostid_category(self):
""" Test the get_host_category_by_hostid_category function of
mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_host_category_by_hostid_category(
self.session, 1, 'Fedora Linux')
self.assertEqual(results, [])
tests.create_base_items(self.session)
tests.create_site(self.session)
tests.create_hosts(self.session)
tests.create_directory(self.session)
tests.create_category(self.session)
tests.create_hostcategory(self.session)
results = mirrormanager2.lib.get_host_category_by_hostid_category(
self.session, 1, 'Fedora Linux')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].host.name, 'mirror.localhost')
self.assertEqual(results[0].host.country, 'US')
results = mirrormanager2.lib.get_host_category_by_hostid_category(
self.session, 2, 'Fedora Linux')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].host.name, 'mirror2.localhost')
self.assertEqual(results[0].host.country, 'FR')
results = mirrormanager2.lib.get_host_category_by_hostid_category(
self.session, 3, 'Fedora Linux')
self.assertEqual(results, [])
def test_get_host_category_url_by_id(self):
""" Test the get_host_category_url_by_id function of
mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_host_category_url_by_id(
self.session, 1)
self.assertEqual(results, None)
tests.create_base_items(self.session)
tests.create_site(self.session)
tests.create_hosts(self.session)
tests.create_directory(self.session)
tests.create_category(self.session)
tests.create_hostcategory(self.session)
tests.create_hostcategoryurl(self.session)
for i in range(4):
results = mirrormanager2.lib.get_host_category_url_by_id(
self.session, i+1)
self.assertEqual(
results.host_category.host.name, 'mirror.localhost')
self.assertEqual(
results.host_category.host.country, 'US')
results = mirrormanager2.lib.get_host_category_url_by_id(
self.session, 5)
self.assertEqual(results, None)
def test_get_host_category_url(self):
""" Test the get_host_category_url function of
mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_host_category_url(self.session)
self.assertEqual(results, [])
tests.create_base_items(self.session)
tests.create_site(self.session)
tests.create_hosts(self.session)
tests.create_directory(self.session)
tests.create_category(self.session)
tests.create_hostcategory(self.session)
tests.create_hostcategoryurl(self.session)
results = mirrormanager2.lib.get_host_category_url(self.session)
self.assertEqual(len(results), 4)
for i in range(4):
self.assertEqual(
results[i].host_category.host.name, 'mirror.localhost')
self.assertEqual(
results[i].host_category.host.country, 'US')
def test_get_country_by_name(self):
""" Test the get_country_by_name function of
mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_country_by_name(self.session, 'FR')
self.assertEqual(results, None)
tests.create_base_items(self.session)
for i in ['FR', 'US']:
results = mirrormanager2.lib.get_country_by_name(
self.session, i)
self.assertEqual(results.code, i)
results = mirrormanager2.lib.get_country_by_name(self.session, 'BE')
self.assertEqual(results, None)
def test_get_country_continent_redirect(self):
""" Test the get_country_continent_redirect function of
mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_country_continent_redirect(
self.session)
self.assertEqual(results, [])
tests.create_base_items(self.session)
results = mirrormanager2.lib.get_country_continent_redirect(
self.session)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].country, 'IL')
self.assertEqual(results[0].continent, 'EU')
self.assertEqual(results[1].country, 'AM')
self.assertEqual(results[1].continent, 'EU')
self.assertEqual(results[2].country, 'JO')
self.assertEqual(results[2].continent, 'EU')
def test_get_user_by_username(self):
""" Test the get_user_by_username function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_user_by_username(
self.session, 'pingou')
self.assertEqual(results, None)
tests.create_base_items(self.session)
results = mirrormanager2.lib.get_user_by_username(
self.session, 'pingou')
self.assertEqual(results.user_name, 'pingou')
self.assertEqual(results.email_address, '[email protected]')
results = mirrormanager2.lib.get_user_by_username(
self.session, 'ralph')
self.assertEqual(results.user_name, 'ralph')
self.assertEqual(results.email_address, '[email protected]')
results = mirrormanager2.lib.get_user_by_username(
self.session, 'foo')
self.assertEqual(results, None)
def test_get_user_by_email(self):
""" Test the get_user_by_email function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_user_by_email(
self.session, '[email protected]')
self.assertEqual(results, None)
tests.create_base_items(self.session)
results = mirrormanager2.lib.get_user_by_email(
self.session, '[email protected]')
self.assertEqual(results.user_name, 'pingou')
self.assertEqual(results.email_address, '[email protected]')
results = mirrormanager2.lib.get_user_by_email(
self.session, '[email protected]')
self.assertEqual(results.user_name, 'ralph')
self.assertEqual(results.email_address, '[email protected]')
results = mirrormanager2.lib.get_user_by_email(
self.session, 'foo')
self.assertEqual(results, None)
def test_get_user_by_token(self):
""" Test the get_user_by_token function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_user_by_token(
self.session, 'bar')
self.assertEqual(results, None)
tests.create_base_items(self.session)
results = mirrormanager2.lib.get_user_by_token(
self.session, 'bar')
self.assertEqual(results.user_name, 'shaiton')
self.assertEqual(results.email_address, '[email protected]')
results = mirrormanager2.lib.get_user_by_token(
self.session, 'foo')
self.assertEqual(results, None)
def test_get_session_by_visitkey(self):
""" Test the get_session_by_visitkey function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_session_by_visitkey(
self.session, 'foo')
self.assertEqual(results, None)
tests.create_base_items(self.session)
results = mirrormanager2.lib.get_session_by_visitkey(
self.session, 'foo')
self.assertEqual(results.user.user_name, 'pingou')
self.assertEqual(results.user.email_address, '[email protected]')
self.assertEqual(results.user_ip, '127.0.0.1')
results = mirrormanager2.lib.get_session_by_visitkey(
self.session, 'bar')
self.assertEqual(results, None)
def test_get_version_by_name_version(self):
""" Test the get_version_by_name_version function of
mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_version_by_name_version(
self.session, 'Fedora', '21')
self.assertEqual(results, None)
tests.create_base_items(self.session)
tests.create_version(self.session)
results = mirrormanager2.lib.get_version_by_name_version(
self.session, 'Fedora', 21)
self.assertEqual(results.product.name, 'Fedora')
self.assertEqual(results.name, '21')
results = mirrormanager2.lib.get_version_by_name_version(
self.session, 'Fedora', '21-alpha')
self.assertEqual(results.product.name, 'Fedora')
self.assertEqual(results.name, '21-alpha')
self.assertEqual(results.is_test, True)
results = mirrormanager2.lib.get_session_by_visitkey(
self.session, 'bar')
self.assertEqual(results, None)
def test_get_versions(self):
""" Test the get_versions function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_versions(self.session)
self.assertEqual(results, [])
tests.create_base_items(self.session)
tests.create_version(self.session)
results = mirrormanager2.lib.get_versions(self.session)
self.assertEqual(len(results), 6)
self.assertEqual(results[0].product.name, 'Fedora')
self.assertEqual(results[0].name, '20')
self.assertEqual(results[1].product.name, 'Fedora')
self.assertEqual(results[1].name, '21-alpha')
self.assertEqual(results[2].product.name, 'Fedora')
self.assertEqual(results[2].name, '21')
def test_get_arch_by_name(self):
""" Test the get_arch_by_name function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_arch_by_name(self.session, 'i386')
self.assertEqual(results, None)
tests.create_base_items(self.session)
results = mirrormanager2.lib.get_arch_by_name(self.session, 'i386')
self.assertEqual(results.name, 'i386')
results = mirrormanager2.lib.get_arch_by_name(self.session, 'i686')
self.assertEqual(results, None)
def test_get_categories(self):
""" Test the get_categories function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_categories(self.session)
self.assertEqual(results, [])
tests.create_base_items(self.session)
tests.create_directory(self.session)
tests.create_category(self.session)
results = mirrormanager2.lib.get_categories(self.session)
self.assertEqual(len(results), 2)
self.assertEqual(results[0].name, 'Fedora Linux')
self.assertEqual(results[0].product.name, 'Fedora')
self.assertEqual(results[1].name, 'Fedora EPEL')
self.assertEqual(results[1].product.name, 'EPEL')
def test_get_category_by_name(self):
""" Test the get_category_by_name function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_category_by_name(
self.session, 'Fedora EPEL')
self.assertEqual(results, None)
tests.create_base_items(self.session)
tests.create_directory(self.session)
tests.create_category(self.session)
results = mirrormanager2.lib.get_category_by_name(
self.session, 'Fedora EPEL')
self.assertEqual(results.name, 'Fedora EPEL')
self.assertEqual(results.product.name, 'EPEL')
results = mirrormanager2.lib.get_category_by_name(
self.session, 'Fedora Linux')
self.assertEqual(results.name, 'Fedora Linux')
self.assertEqual(results.product.name, 'Fedora')
results = mirrormanager2.lib.get_category_by_name(
self.session, 'foo')
self.assertEqual(results, None)
def test_get_category_directory(self):
""" Test the get_category_directory function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_category_directory(self.session)
self.assertEqual(results, [])
tests.create_base_items(self.session)
tests.create_directory(self.session)
tests.create_category(self.session)
tests.create_categorydirectory(self.session)
results = mirrormanager2.lib.get_category_directory(self.session)
self.assertEqual(len(results), 4)
self.assertEqual(
results[0].category.name, 'Fedora Linux')
self.assertEqual(
results[0].directory.name, 'pub/fedora/linux/releases')
self.assertEqual(
results[1].category.name, 'Fedora EPEL')
self.assertEqual(
results[1].directory.name, 'pub/epel')
def test_get_product_by_name(self):
""" Test the get_product_by_name function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_product_by_name(
self.session, 'Fedora')
self.assertEqual(results, None)
tests.create_base_items(self.session)
results = mirrormanager2.lib.get_product_by_name(
self.session, 'Fedora')
self.assertEqual(results.name, 'Fedora')
results = mirrormanager2.lib.get_product_by_name(
self.session, 'EPEL')
self.assertEqual(results.name, 'EPEL')
results = mirrormanager2.lib.get_product_by_name(
self.session, 'foo')
self.assertEqual(results, None)
def test_get_products(self):
""" Test the get_products function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_products(self.session)
self.assertEqual(results, [])
tests.create_base_items(self.session)
results = mirrormanager2.lib.get_products(self.session)
self.assertEqual(len(results), 2)
self.assertEqual(
results[0].name, 'EPEL')
self.assertEqual(
results[1].name, 'Fedora')
def test_get_repo_prefix_arch(self):
""" Test the get_repo_prefix_arch function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_repo_prefix_arch(
self.session, 'updates-testing-f20', 'x86_64')
self.assertEqual(results, None)
tests.create_base_items(self.session)
tests.create_version(self.session)
tests.create_directory(self.session)
tests.create_category(self.session)
tests.create_repository(self.session)
results = mirrormanager2.lib.get_repo_prefix_arch(
self.session, 'updates-testing-f20', 'x86_64')
self.assertEqual(
results.name, 'pub/fedora/linux/updates/testing/20/x86_64')
results = mirrormanager2.lib.get_repo_prefix_arch(
self.session, 'updates-testing-f21', 'x86_64')
self.assertEqual(
results.name, 'pub/fedora/linux/updates/testing/21/x86_64')
results = mirrormanager2.lib.get_repo_prefix_arch(
self.session, 'updates-testing-f20', 'i386')
self.assertEqual(results, None)
def test_get_repo_by_name(self):
""" Test the get_repo_by_name function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_repo_by_name(
self.session, 'pub/fedora/linux/updates/testing/19/x86_64')
self.assertEqual(results, None)
tests.create_base_items(self.session)
tests.create_version(self.session)
tests.create_directory(self.session)
tests.create_category(self.session)
tests.create_repository(self.session)
results = mirrormanager2.lib.get_repo_by_name(
self.session, 'pub/fedora/linux/updates/testing/19/x86_64')
self.assertEqual(
results.name, 'pub/fedora/linux/updates/testing/19/x86_64')
results = mirrormanager2.lib.get_repo_by_name(
self.session, 'pub/fedora/linux/updates/testing/20/x86_64')
self.assertEqual(
results.name, 'pub/fedora/linux/updates/testing/20/x86_64')
results = mirrormanager2.lib.get_repo_by_name(
self.session, 'pub/fedora/linux/updates/testing/19/i386')
self.assertEqual(results, None)
def test_get_repo_by_dir(self):
""" Test the get_repo_by_dir function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_repo_by_dir(
self.session, 'pub/fedora/linux/updates/testing/21/x86_64')
self.assertEqual(results, [])
tests.create_base_items(self.session)
tests.create_version(self.session)
tests.create_directory(self.session)
tests.create_category(self.session)
tests.create_repository(self.session)
results = mirrormanager2.lib.get_repo_by_dir(
self.session, 'pub/fedora/linux/updates/testing/21/x86_64')
self.assertEqual(len(results), 1)
self.assertEqual(
results[0].name, 'pub/fedora/linux/updates/testing/21/x86_64')
self.assertEqual(results[0].arch.name, 'x86_64')
def test_get_repositories(self):
""" Test the get_repositories function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_repositories(self.session)
self.assertEqual(results, [])
tests.create_base_items(self.session)
tests.create_version(self.session)
tests.create_directory(self.session)
tests.create_category(self.session)
tests.create_repository(self.session)
results = mirrormanager2.lib.get_repositories(self.session)
self.assertEqual(len(results), 3)
self.assertEqual(
results[0].name, 'pub/fedora/linux/updates/testing/19/x86_64')
self.assertEqual(results[0].arch.name, 'x86_64')
self.assertEqual(
results[1].name, 'pub/fedora/linux/updates/testing/20/x86_64')
self.assertEqual(results[1].arch.name, 'x86_64')
self.assertEqual(
results[2].name, 'pub/fedora/linux/updates/testing/21/x86_64')
self.assertEqual(results[2].arch.name, 'x86_64')
def test_get_reporedirect(self):
""" Test the get_reporedirect function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_reporedirect(self.session)
self.assertEqual(results, [])
tests.create_repositoryredirect(self.session)
results = mirrormanager2.lib.get_reporedirect(self.session)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].from_repo, 'fedora-rawhide')
self.assertEqual(results[0].to_repo, 'rawhide')
self.assertEqual(results[1].from_repo, 'fedora-install-rawhide')
self.assertEqual(results[1].to_repo, 'rawhide')
self.assertEqual(results[2].from_repo, 'epel-6.0')
self.assertEqual(results[2].to_repo, 'epel-6')
def test_get_arches(self):
""" Test the get_arches function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_arches(self.session)
self.assertEqual(results, [])
tests.create_base_items(self.session)
results = mirrormanager2.lib.get_arches(self.session)
self.assertEqual(len(results), 4)
self.assertEqual(results[0].name, 'i386')
self.assertEqual(results[1].name, 'ppc')
self.assertEqual(results[2].name, 'source')
self.assertEqual(results[3].name, 'x86_64')
def test_add_admin_to_site(self):
""" Test the add_admin_to_site function of mirrormanager2.lib.
"""
tests.create_base_items(self.session)
tests.create_site(self.session)
site = mirrormanager2.lib.get_site(self.session, 1)
results = mirrormanager2.lib.add_admin_to_site(
self.session, site, 'pingou')
self.assertEqual(results, 'pingou added as an admin')
results = mirrormanager2.lib.add_admin_to_site(
self.session, site, 'pingou')
self.assertEqual(results, 'pingou was already listed as an admin')
def test_get_locations(self):
""" Test the get_locations function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_locations(self.session)
self.assertEqual(results, [])
tests.create_location(self.session)
results = mirrormanager2.lib.get_locations(self.session)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].name, 'foo')
self.assertEqual(results[1].name, 'bar')
self.assertEqual(results[2].name, 'foobar')
def test_get_netblock_country(self):
""" Test the get_netblock_country function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_netblock_country(self.session)
self.assertEqual(results, [])
tests.create_netblockcountry(self.session)
results = mirrormanager2.lib.get_netblock_country(self.session)
self.assertEqual(len(results), 1)
self.assertEqual(results[0].netblock, '127.0.0.0/24')
self.assertEqual(results[0].country, 'AU')
def test_get_mirrors(self):
""" Test the get_mirrors function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_mirrors(self.session)
self.assertEqual(results, [])
tests.create_base_items(self.session)
tests.create_site(self.session)
tests.create_hosts(self.session)
tests.create_directory(self.session)
tests.create_category(self.session)
tests.create_hostcategory(self.session)
tests.create_hostcategoryurl(self.session)
tests.create_categorydirectory(self.session)
tests.create_netblockcountry(self.session)
results = mirrormanager2.lib.get_mirrors(self.session)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].name, 'mirror2.localhost')
self.assertEqual(results[1].name, 'private.localhost')
self.assertEqual(results[2].name, 'mirror.localhost')
results = mirrormanager2.lib.get_mirrors(self.session, private=True)
self.assertEqual(len(results), 1)
self.assertEqual(results[0].name, 'private.localhost')
results = mirrormanager2.lib.get_mirrors(self.session, internet2=True)
self.assertEqual(len(results), 0)
results = mirrormanager2.lib.get_mirrors(
self.session, internet2_clients=True)
self.assertEqual(len(results), 0)
results = mirrormanager2.lib.get_mirrors(
self.session, internet2_clients=False)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].name, 'mirror2.localhost')
self.assertEqual(results[1].name, 'private.localhost')
self.assertEqual(results[2].name, 'mirror.localhost')
results = mirrormanager2.lib.get_mirrors(
self.session, asn_clients=True)
self.assertEqual(len(results), 1)
self.assertEqual(results[0].name, 'mirror2.localhost')
results = mirrormanager2.lib.get_mirrors(
self.session, asn_clients=False)
self.assertEqual(len(results), 2)
self.assertEqual(results[0].name, 'private.localhost')
self.assertEqual(results[1].name, 'mirror.localhost')
results = mirrormanager2.lib.get_mirrors(
self.session, admin_active=False)
self.assertEqual(len(results), 0)
results = mirrormanager2.lib.get_mirrors(
self.session, admin_active=True)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].name, 'mirror2.localhost')
self.assertEqual(results[1].name, 'private.localhost')
self.assertEqual(results[2].name, 'mirror.localhost')
results = mirrormanager2.lib.get_mirrors(
self.session, user_active=False)
self.assertEqual(len(results), 0)
results = mirrormanager2.lib.get_mirrors(
self.session, user_active=True)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].name, 'mirror2.localhost')
self.assertEqual(results[1].name, 'private.localhost')
self.assertEqual(results[2].name, 'mirror.localhost')
results = mirrormanager2.lib.get_mirrors(
self.session, host_category_url_private=True)
self.assertEqual(len(results), 0)
results = mirrormanager2.lib.get_mirrors(
self.session, host_category_url_private=False)
self.assertEqual(len(results), 1)
self.assertEqual(results[0].name, 'mirror.localhost')
results = mirrormanager2.lib.get_mirrors(
self.session, last_crawl_duration=True)
self.assertEqual(len(results), 0)
results = mirrormanager2.lib.get_mirrors(
self.session, last_crawl_duration=False)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].name, 'mirror2.localhost')
self.assertEqual(results[1].name, 'private.localhost')
self.assertEqual(results[2].name, 'mirror.localhost')
results = mirrormanager2.lib.get_mirrors(
self.session, last_crawled=True)
self.assertEqual(len(results), 0)
results = mirrormanager2.lib.get_mirrors(
self.session, last_crawled=False)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].name, 'mirror2.localhost')
self.assertEqual(results[1].name, 'private.localhost')
self.assertEqual(results[2].name, 'mirror.localhost')
results = mirrormanager2.lib.get_mirrors(
self.session, last_checked_in=True)
self.assertEqual(len(results), 0)
results = mirrormanager2.lib.get_mirrors(
self.session, last_checked_in=False)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].name, 'mirror2.localhost')
self.assertEqual(results[1].name, 'private.localhost')
self.assertEqual(results[2].name, 'mirror.localhost')
results = mirrormanager2.lib.get_mirrors(
self.session, site_private=True)
self.assertEqual(len(results), 0)
results = mirrormanager2.lib.get_mirrors(
self.session, site_private=False)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].name, 'mirror2.localhost')
self.assertEqual(results[1].name, 'private.localhost')
self.assertEqual(results[2].name, 'mirror.localhost')
results = mirrormanager2.lib.get_mirrors(
self.session, site_user_active=False)
self.assertEqual(len(results), 0)
results = mirrormanager2.lib.get_mirrors(
self.session, site_user_active=True)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].name, 'mirror2.localhost')
self.assertEqual(results[1].name, 'private.localhost')
self.assertEqual(results[2].name, 'mirror.localhost')
results = mirrormanager2.lib.get_mirrors(
self.session, site_admin_active=False)
self.assertEqual(len(results), 0)
results = mirrormanager2.lib.get_mirrors(
self.session, site_admin_active=True)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].name, 'mirror2.localhost')
self.assertEqual(results[1].name, 'private.localhost')
self.assertEqual(results[2].name, 'mirror.localhost')
results = mirrormanager2.lib.get_mirrors(
self.session, up2date=True)
self.assertEqual(len(results), 0)
results = mirrormanager2.lib.get_mirrors(
self.session, up2date=False)
self.assertEqual(len(results), 0)
results = mirrormanager2.lib.get_mirrors(
self.session, version_id=1)
self.assertEqual(len(results), 0)
results = mirrormanager2.lib.get_mirrors(
self.session, version_id=3)
tests.create_version(self.session)
tests.create_repository(self.session)
results = mirrormanager2.lib.get_mirrors(
self.session, version_id=1)
self.assertEqual(len(results), 2)
self.assertEqual(results[0].name, 'mirror2.localhost')
self.assertEqual(results[1].name, 'mirror.localhost')
results = mirrormanager2.lib.get_mirrors(
self.session, version_id=3)
self.assertEqual(len(results), 2)
self.assertEqual(results[0].name, 'mirror2.localhost')
self.assertEqual(results[1].name, 'mirror.localhost')
results = mirrormanager2.lib.get_mirrors(
self.session, arch_id=1)
self.assertEqual(len(results), 0)
results = mirrormanager2.lib.get_mirrors(
self.session, arch_id=3)
self.assertEqual(len(results), 2)
self.assertEqual(results[0].name, 'mirror2.localhost')
self.assertEqual(results[1].name, 'mirror.localhost')
def test_get_user_sites(self):
""" Test the get_user_sites function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_user_sites(self.session, 'pingou')
self.assertEqual(results, [])
self.test_add_admin_to_site()
results = mirrormanager2.lib.get_user_sites(self.session, 'pingou')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].name, 'test-mirror')
def test_id_generator(self):
""" Test the id_generator function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.id_generator(size=5, chars=['a'])
self.assertEqual(results, 'aaaaa')
results = mirrormanager2.lib.id_generator(size=5, chars=['1'])
self.assertEqual(results, '11111')
def test_get_directory_by_name(self):
""" Test the get_directory_by_name function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_directory_by_name(
self.session, 'pub/epel')
self.assertEqual(results, None)
tests.create_directory(self.session)
results = mirrormanager2.lib.get_directory_by_name(
self.session, 'pub/epel')
self.assertEqual(results.name, 'pub/epel')
self.assertEqual(results.readable, True)
results = mirrormanager2.lib.get_directory_by_name(
self.session, 'pub/fedora/linux/extras')
self.assertEqual(results.name, 'pub/fedora/linux/extras')
self.assertEqual(results.readable, True)
results = mirrormanager2.lib.get_directory_by_name(
self.session, 'pub/fedora/linux/updates/testing/19/x86_64')
self.assertEqual(
results.name, 'pub/fedora/linux/updates/testing/19/x86_64')
self.assertEqual(results.readable, True)
def test_get_directories(self):
""" Test the get_directories function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_directories(self.session)
self.assertEqual(results, [])
tests.create_directory(self.session)
results = mirrormanager2.lib.get_directories(self.session)
self.assertEqual(len(results), 9)
self.assertEqual(results[0].name, 'pub/fedora/linux/releases')
self.assertEqual(results[1].name, 'pub/fedora/linux/extras')
self.assertEqual(results[2].name, 'pub/epel')
def test_get_directory_by_id(self):
""" Test the get_directory_by_id function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_directory_by_id(
self.session, 1)
self.assertEqual(results, None)
tests.create_directory(self.session)
results = mirrormanager2.lib.get_directory_by_id(self.session, 3)
self.assertEqual(results.name, 'pub/epel')
self.assertEqual(results.readable, True)
results = mirrormanager2.lib.get_directory_by_id(self.session, 2)
self.assertEqual(results.name, 'pub/fedora/linux/extras')
self.assertEqual(results.readable, True)
results = mirrormanager2.lib.get_directory_by_id(self.session, 4)
self.assertEqual(
results.name, 'pub/fedora/linux/releases/20')
self.assertEqual(results.readable, True)
def test_get_hostcategorydir_by_hostcategoryid_and_path(self):
""" Test the get_hostcategorydir_by_hostcategoryid_and_path
function of mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_hostcategorydir_by_hostcategoryid_and_path(
self.session, 2, 'pub/fedora/linux/releases/21')
self.assertEqual(results, [])
tests.create_base_items(self.session)
tests.create_site(self.session)
tests.create_hosts(self.session)
tests.create_directory(self.session)
tests.create_category(self.session)
tests.create_hostcategory(self.session)
tests.create_hostcategorydir(self.session)
results = mirrormanager2.lib.get_hostcategorydir_by_hostcategoryid_and_path(
self.session, 3, 'pub/fedora/linux/releases/21')
self.assertEqual(len(results), 1)
self.assertEqual(
results[0].directory.name, 'pub/fedora/linux/releases/21')
self.assertEqual(
results[0].host_category.category.name, 'Fedora Linux')
def test_get_directory_exclusive_host(self):
""" Test the get_directory_exclusive_host function of
mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_directory_exclusive_host(
self.session)
self.assertEqual(results, [])
tests.create_base_items(self.session)
tests.create_site(self.session)
tests.create_hosts(self.session)
tests.create_directory(self.session)
tests.create_directoryexclusivehost(self.session)
results = mirrormanager2.lib.get_directory_exclusive_host(
self.session)
self.assertEqual(len(results), 2)
self.assertEqual(
results[0].dname, 'pub/fedora/linux/releases/20')
self.assertEqual(
results[0].host_id, 1)
self.assertEqual(
results[1].dname, 'pub/fedora/linux/releases/21')
self.assertEqual(
results[1].host_id, 3)
def test_get_file_detail(self):
""" Test the get_file_detail function of
mirrormanager2.lib.
"""
results = mirrormanager2.lib.get_file_detail(
self.session, 'repomd.xml', 7)
self.assertEqual(results, None)
tests.create_directory(self.session)
tests.create_filedetail(self.session)
results = mirrormanager2.lib.get_file_detail(
self.session, 'repomd.xml', 7)
self.assertEqual(results.md5, 'foo_md5')
self.assertEqual(
results.directory.name,
'pub/fedora/linux/updates/testing/19/x86_64')
results = mirrormanager2.lib.get_file_detail(
self.session, 'repomd.xml', 7, md5='foo_md5')
self.assertEqual(results.md5, 'foo_md5')
self.assertEqual(
results.directory.name,
'pub/fedora/linux/updates/testing/19/x86_64')
results = mirrormanager2.lib.get_file_detail(
self.session, 'repomd.xml', 7, sha1='foo_sha1')
self.assertEqual(results.md5, 'foo_md5')
self.assertEqual(
results.directory.name,
'pub/fedora/linux/updates/testing/19/x86_64')
results = mirrormanager2.lib.get_file_detail(
self.session, 'repomd.xml', 7, sha256='foo_sha256')
self.assertEqual(results.md5, 'foo_md5')
self.assertEqual(
results.directory.name,
'pub/fedora/linux/updates/testing/19/x86_64')
results = mirrormanager2.lib.get_file_detail(
self.session, 'repomd.xml', 7, sha512='foo_sha512')
self.assertEqual(results.md5, 'foo_md5')
self.assertEqual(
results.directory.name,
'pub/fedora/linux/updates/testing/19/x86_64')
results = mirrormanager2.lib.get_file_detail(
self.session, 'repomd.xml', 7, size=2973)
self.assertEqual(results, None)
results = mirrormanager2.lib.get_file_detail(
self.session, 'repomd.xml', 7, timestamp=1357758825)
self.assertEqual(results.md5, 'foo_md5')
self.assertEqual(
results.directory.name,
'pub/fedora/linux/updates/testing/19/x86_64')
if __name__ == '__main__':
SUITE = unittest.TestLoader().loadTestsFromTestCase(MMLibtests)
unittest.TextTestRunner(verbosity=10).run(SUITE)
|
|
# Copyright (c) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
from blinkpy.common.memoized import memoized
def add_typ_dir_to_sys_path():
path_to_typ = get_typ_dir()
if path_to_typ not in sys.path:
sys.path.insert(0, path_to_typ)
def add_bindings_scripts_dir_to_sys_path():
path_to_bindings_scripts = get_bindings_scripts_dir()
if path_to_bindings_scripts not in sys.path:
sys.path.insert(0, path_to_bindings_scripts)
def add_build_scripts_dir_to_sys_path():
path_to_build_scripts = get_build_scripts_dir()
if path_to_build_scripts not in sys.path:
sys.path.insert(0, path_to_build_scripts)
def add_blinkpy_thirdparty_dir_to_sys_path():
path = get_blinkpy_thirdparty_dir()
if path not in sys.path:
sys.path.insert(0, path)
def add_depot_tools_dir_to_os_path():
path = get_depot_tools_dir()
if path not in os.environ['PATH']:
os.environ['PATH'] += os.pathsep + path
def get_bindings_scripts_dir():
return os.path.join(get_source_dir(), 'bindings', 'scripts')
def get_blink_dir():
return os.path.dirname(
os.path.dirname(
os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
def get_chromium_src_dir():
return os.path.dirname(os.path.dirname(get_blink_dir()))
def get_depot_tools_dir():
return os.path.join(get_chromium_src_dir(), 'third_party', 'depot_tools')
def get_source_dir():
return os.path.join(get_chromium_src_dir(), 'third_party', 'blink',
'renderer')
def get_typ_dir():
return os.path.join(get_chromium_src_dir(), 'third_party', 'catapult',
'third_party', 'typ')
def get_blinkpy_thirdparty_dir():
return os.path.join(get_blink_tools_dir(), 'blinkpy', 'third_party')
def get_blink_tools_dir():
return os.path.join(get_chromium_src_dir(), 'third_party', 'blink',
'tools')
def get_wpt_tools_wpt_dir():
return os.path.join(get_chromium_src_dir(), 'third_party', 'wpt_tools',
'wpt')
def get_build_scripts_dir():
return os.path.join(get_source_dir(), 'build', 'scripts')
def add_blink_tools_dir_to_sys_path():
path = get_blink_tools_dir()
if path not in sys.path:
sys.path.insert(0, path)
def _does_blink_web_tests_exist():
return os.path.exists(
os.path.join(get_chromium_src_dir(), 'third_party', 'blink',
'web_tests'))
TESTS_IN_BLINK = _does_blink_web_tests_exist()
# web_tests path relative to the repository root.
# Path separators are always '/', and this contains the trailing '/'.
RELATIVE_WEB_TESTS = 'third_party/blink/web_tests/'
WEB_TESTS_LAST_COMPONENT = 'web_tests'
class PathFinder(object):
def __init__(self, filesystem, sys_path=None, env_path=None):
self._filesystem = filesystem
self._dirsep = filesystem.sep
self._sys_path = sys_path or sys.path
self._env_path = env_path or os.environ['PATH'].split(os.pathsep)
@memoized
def chromium_base(self):
return self._filesystem.dirname(
self._filesystem.dirname(self._blink_base()))
def web_tests_dir(self):
return self.path_from_chromium_base('third_party', 'blink',
'web_tests')
def perf_tests_dir(self):
return self.path_from_chromium_base('third_party', 'blink',
'perf_tests')
def webdriver_prefix(self):
return self._filesystem.join('external', 'wpt', 'webdriver', '')
@memoized
def _blink_base(self):
"""Returns the absolute path to the top of the Blink directory."""
module_path = self._filesystem.path_to_module(self.__module__)
tools_index = module_path.rfind('tools')
assert tools_index != -1, 'could not find location of this checkout from %s' % module_path
return self._filesystem.normpath(module_path[0:tools_index - 1])
def path_from_chromium_base(self, *comps):
return self._filesystem.join(self.chromium_base(), *comps)
def _blink_source_dir(self):
return self._filesystem.join(self.chromium_base(), 'third_party',
'blink', 'renderer')
def path_from_blink_source(self, *comps):
return self._filesystem.join(self._blink_source_dir(), *comps)
def path_from_blink_tools(self, *comps):
return self._filesystem.join(
self._filesystem.join(self.chromium_base(), 'third_party', 'blink',
'tools'), *comps)
def path_from_web_tests(self, *comps):
return self._filesystem.join(self.web_tests_dir(), *comps)
def strip_web_tests_path(self, wpt_test_abs_path):
web_tests_path = self.path_from_web_tests('')
if wpt_test_abs_path.startswith(web_tests_path):
return wpt_test_abs_path[len(web_tests_path):]
return wpt_test_abs_path
def strip_webdriver_tests_path(self, wpt_webdriver_test_path):
if self.is_webdriver_test_path(wpt_webdriver_test_path):
return wpt_webdriver_test_path[len(self.webdriver_prefix()):]
return wpt_webdriver_test_path
def is_webdriver_test_path(self, test_path):
return test_path.startswith(self.webdriver_prefix())
@memoized
def depot_tools_base(self):
"""Returns the path to depot_tools, or None if not found.
Expects depot_tools to be //third_party/depot_tools.
src.git's DEPS defines depot_tools to be there.
"""
depot_tools = self.path_from_chromium_base('third_party',
'depot_tools')
return depot_tools if self._filesystem.isdir(depot_tools) else None
def path_from_depot_tools_base(self, *comps):
return self._filesystem.join(self.depot_tools_base(), *comps)
|
|
"""
Test the func_inspect module.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
import functools
from joblib.func_inspect import filter_args, get_func_name, get_func_code
from joblib.func_inspect import _clean_win_chars, format_signature
from joblib.memory import Memory
from joblib.test.common import with_numpy
from joblib.testing import fixture, parametrize, raises
from joblib._compat import PY3_OR_LATER
###############################################################################
# Module-level functions and fixture, for tests
def f(x, y=0):
pass
def g(x):
pass
def h(x, y=0, *args, **kwargs):
pass
def i(x=1):
pass
def j(x, y, **kwargs):
pass
def k(*args, **kwargs):
pass
@fixture(scope='module')
def cached_func(tmpdir_factory):
# Create a Memory object to test decorated functions.
# We should be careful not to call the decorated functions, so that
# cache directories are not created in the temp dir.
cachedir = tmpdir_factory.mktemp("joblib_test_func_inspect")
mem = Memory(cachedir.strpath)
@mem.cache
def cached_func_inner(x):
return x
return cached_func_inner
class Klass(object):
def f(self, x):
return x
###############################################################################
# Tests
@parametrize('func,args,filtered_args',
[(f, [[], (1, )], {'x': 1, 'y': 0}),
(f, [['x'], (1, )], {'y': 0}),
(f, [['y'], (0, )], {'x': 0}),
(f, [['y'], (0, ), {'y': 1}], {'x': 0}),
(f, [['x', 'y'], (0, )], {}),
(f, [[], (0,), {'y': 1}], {'x': 0, 'y': 1}),
(f, [['y'], (), {'x': 2, 'y': 1}], {'x': 2}),
(g, [[], (), {'x': 1}], {'x': 1}),
(i, [[], (2, )], {'x': 2})])
def test_filter_args(func, args, filtered_args):
assert filter_args(func, *args) == filtered_args
def test_filter_args_method():
obj = Klass()
assert filter_args(obj.f, [], (1, )) == {'x': 1, 'self': obj}
@parametrize('func,args,filtered_args',
[(h, [[], (1, )],
{'x': 1, 'y': 0, '*': [], '**': {}}),
(h, [[], (1, 2, 3, 4)],
{'x': 1, 'y': 2, '*': [3, 4], '**': {}}),
(h, [[], (1, 25), {'ee': 2}],
{'x': 1, 'y': 25, '*': [], '**': {'ee': 2}}),
(h, [['*'], (1, 2, 25), {'ee': 2}],
{'x': 1, 'y': 2, '**': {'ee': 2}})])
def test_filter_varargs(func, args, filtered_args):
assert filter_args(func, *args) == filtered_args
test_filter_kwargs_extra_params = []
if PY3_OR_LATER:
m1 = m2 = None
# The following statements raise SyntaxError in python 2
# because kwargonly is not supported
exec("def m1(x, *, y): pass")
exec("def m2(x, *, y, z=3): pass")
test_filter_kwargs_extra_params.extend([
(m1, [[], (1,), {'y': 2}], {'x': 1, 'y': 2}),
(m2, [[], (1,), {'y': 2}], {'x': 1, 'y': 2, 'z': 3})
])
@parametrize('func,args,filtered_args',
[(k, [[], (1, 2), {'ee': 2}],
{'*': [1, 2], '**': {'ee': 2}}),
(k, [[], (3, 4)],
{'*': [3, 4], '**': {}})] +
test_filter_kwargs_extra_params)
def test_filter_kwargs(func, args, filtered_args):
assert filter_args(func, *args) == filtered_args
def test_filter_args_2():
assert (filter_args(j, [], (1, 2), {'ee': 2}) ==
{'x': 1, 'y': 2, '**': {'ee': 2}})
ff = functools.partial(f, 1)
# filter_args has to special-case partial
assert filter_args(ff, [], (1, )) == {'*': [1], '**': {}}
assert filter_args(ff, ['y'], (1, )) == {'*': [1], '**': {}}
@parametrize('func,funcname', [(f, 'f'), (g, 'g'),
(cached_func, 'cached_func')])
def test_func_name(func, funcname):
# Check that we are not confused by decoration
# here testcase 'cached_func' is the function itself
assert get_func_name(func)[1] == funcname
def test_func_name_on_inner_func(cached_func):
# Check that we are not confused by decoration
# here testcase 'cached_func' is the 'cached_func_inner' function
# returned by 'cached_func' fixture
assert get_func_name(cached_func)[1] == 'cached_func_inner'
def test_func_inspect_errors():
# Check that func_inspect is robust and will work on weird objects
assert get_func_name('a'.lower)[-1] == 'lower'
assert get_func_code('a'.lower)[1:] == (None, -1)
ff = lambda x: x
assert get_func_name(ff, win_characters=False)[-1] == '<lambda>'
assert get_func_code(ff)[1] == __file__.replace('.pyc', '.py')
# Simulate a function defined in __main__
ff.__module__ = '__main__'
assert get_func_name(ff, win_characters=False)[-1] == '<lambda>'
assert get_func_code(ff)[1] == __file__.replace('.pyc', '.py')
if PY3_OR_LATER:
# Avoid flake8 F821 "undefined name" warning. func_with_kwonly_args and
# func_with_signature are redefined in the exec statement a few lines below
def func_with_kwonly_args():
pass
def func_with_signature():
pass
# exec is needed to define a function with a keyword-only argument and a
# function with signature while avoiding a SyntaxError on Python 2
exec("""
def func_with_kwonly_args(a, b, *, kw1='kw1', kw2='kw2'): pass
def func_with_signature(a: int, b: int) -> None: pass
""")
def test_filter_args_python_3():
assert (
filter_args(func_with_kwonly_args, [], (1, 2),
{'kw1': 3, 'kw2': 4}) ==
{'a': 1, 'b': 2, 'kw1': 3, 'kw2': 4})
# filter_args doesn't care about keyword-only arguments so you
# can pass 'kw1' into *args without any problem
with raises(ValueError) as excinfo:
filter_args(func_with_kwonly_args, [], (1, 2, 3), {'kw2': 2})
excinfo.match("Keyword-only parameter 'kw1' was passed as positional "
"parameter")
assert (
filter_args(func_with_kwonly_args, ['b', 'kw2'], (1, 2),
{'kw1': 3, 'kw2': 4}) ==
{'a': 1, 'kw1': 3})
assert (filter_args(func_with_signature, ['b'], (1, 2)) == {'a': 1})
def test_bound_methods():
""" Make sure that calling the same method on two different instances
of the same class does resolv to different signatures.
"""
a = Klass()
b = Klass()
assert filter_args(a.f, [], (1, )) != filter_args(b.f, [], (1, ))
@parametrize('exception,regex,func,args',
[(ValueError, 'ignore_lst must be a list of parameters to ignore',
f, ['bar', (None, )]),
(ValueError, r'Ignore list: argument \'(.*)\' is not defined',
g, [['bar'], (None, )]),
(ValueError, 'Wrong number of arguments',
h, [[]])])
def test_filter_args_error_msg(exception, regex, func, args):
""" Make sure that filter_args returns decent error messages, for the
sake of the user.
"""
with raises(exception) as excinfo:
filter_args(func, *args)
excinfo.match(regex)
def test_filter_args_no_kwargs_mutation():
"""None-regression test against 0.12.0 changes.
https://github.com/joblib/joblib/pull/75
Make sure filter args doesn't mutate the kwargs dict that gets passed in.
"""
kwargs = {'x': 0}
filter_args(g, [], [], kwargs)
assert kwargs == {'x': 0}
def test_clean_win_chars():
string = r'C:\foo\bar\main.py'
mangled_string = _clean_win_chars(string)
for char in ('\\', ':', '<', '>', '!'):
assert char not in mangled_string
@parametrize('func,args,kwargs,sgn_expected',
[(g, [list(range(5))], {}, 'g([0, 1, 2, 3, 4])'),
(k, [1, 2, (3, 4)], {'y': True}, 'k(1, 2, (3, 4), y=True)')])
def test_format_signature(func, args, kwargs, sgn_expected):
# Test signature formatting.
path, sgn_result = format_signature(func, *args, **kwargs)
assert sgn_result == sgn_expected
def test_format_signature_long_arguments():
shortening_threshold = 1500
# shortening gets it down to 700 characters but there is the name
# of the function in the signature and a few additional things
# like dots for the ellipsis
shortening_target = 700 + 10
arg = 'a' * shortening_threshold
_, signature = format_signature(h, arg)
assert len(signature) < shortening_target
nb_args = 5
args = [arg for _ in range(nb_args)]
_, signature = format_signature(h, *args)
assert len(signature) < shortening_target * nb_args
kwargs = {str(i): arg for i, arg in enumerate(args)}
_, signature = format_signature(h, **kwargs)
assert len(signature) < shortening_target * nb_args
_, signature = format_signature(h, *args, **kwargs)
assert len(signature) < shortening_target * 2 * nb_args
@with_numpy
def test_format_signature_numpy():
""" Test the format signature formatting with numpy.
"""
def test_special_source_encoding():
from joblib.test.test_func_inspect_special_encoding import big5_f
func_code, source_file, first_line = get_func_code(big5_f)
assert first_line == 5
assert "def big5_f():" in func_code
assert "test_func_inspect_special_encoding" in source_file
def _get_code():
from joblib.test.test_func_inspect_special_encoding import big5_f
return get_func_code(big5_f)[0]
def test_func_code_consistency():
from joblib.parallel import Parallel, delayed
codes = Parallel(n_jobs=2)(delayed(_get_code)() for _ in range(5))
assert len(set(codes)) == 1
|
|
import json
from django import forms
from django.contrib.auth.decorators import user_passes_test
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from crits.core import form_consts
from crits.core.crits_mongoengine import EmbeddedCampaign
from crits.core.data_tools import json_handler, make_ascii_strings
from crits.core.data_tools import make_unicode_strings, make_hex, xor_search
from crits.core.data_tools import xor_string, make_stackstrings
from crits.core.exceptions import ZipFileError
from crits.core.handsontable_tools import form_to_dict
from crits.core.user_tools import user_can_view_data, user_is_admin
from crits.core.user_tools import get_user_organization
from crits.objects.forms import AddObjectForm
from crits.samples.forms import UploadFileForm, XORSearchForm
from crits.samples.forms import UnrarSampleForm
from crits.samples.handlers import handle_uploaded_file, mail_sample
from crits.samples.handlers import handle_unrar_sample, generate_yarahit_jtable
from crits.samples.handlers import delete_sample, handle_unzip_file
from crits.samples.handlers import get_source_counts
from crits.samples.handlers import get_sample_details
from crits.samples.handlers import generate_sample_jtable
from crits.samples.handlers import generate_sample_csv, process_bulk_add_md5_sample
from crits.samples.handlers import update_sample_filename, modify_sample_filenames
from crits.samples.sample import Sample
from crits.stats.handlers import generate_sources
@user_passes_test(user_can_view_data)
def detail(request, sample_md5):
"""
Generate the sample details page.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param sample_md5: The MD5 of the Sample.
:type sample_md5: str
:returns: :class:`django.http.HttpResponse`
"""
format_ = request.GET.get('format', None)
template = "samples_detail.html"
(new_template, args) = get_sample_details(sample_md5,
request.user.username,
format_)
if new_template:
template = new_template
if template == "yaml":
return HttpResponse(args, mimetype="text/plain")
elif template == "json":
return HttpResponse(json.dumps(args), mimetype="application/json")
return render_to_response(template,
args,
RequestContext(request))
@user_passes_test(user_can_view_data)
def samples_listing(request,option=None):
"""
Generate Samples Listing template.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param option: Whether or not we should generate a CSV (yes if option is "csv")
:type option: str
:returns: :class:`django.http.HttpResponse`
"""
if option == "csv":
return generate_sample_csv(request)
return generate_sample_jtable(request, option)
@user_passes_test(user_can_view_data)
def yarahits_listing(request,option=None):
"""
Generate YaraHits Listing template.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param option: Whether or not we should generate a CSV (yes if option is "csv")
:type option: str
:returns: :class:`django.http.HttpResponse`
"""
return generate_yarahit_jtable(request, option)
@user_passes_test(user_can_view_data)
def view_upload_list(request, filename, md5s):
"""
View a list of uploaded files.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param filename: The name of the original file that was uploaded.
:type filename: str
:param md5s: The MD5s of the files that were uploaded.
:type md5s: str
:returns: :class:`django.http.HttpResponse`
"""
#convert md5s list from unicode to list
while md5s.endswith('/'):
md5s = md5s[:-1]
import ast
md5s = ast.literal_eval(md5s)
return render_to_response('samples_uploadList.html',
{'sample_md5': md5s,
'archivename': filename},
RequestContext(request))
@user_passes_test(user_can_view_data)
def bulk_add_md5_sample(request):
"""
Bulk add samples via a bulk upload form.
Args:
request: The Django context which contains information about the
session and key/value pairs for the bulk add request
Returns:
If the request is not a POST and not a Ajax call then:
Returns a rendered HTML form for a bulk add of domains
If the request is a POST and a Ajax call then:
Returns a response that contains information about the
status of the bulk add. This may include information
such as items that failed or successfully added. This may
also contain helpful status messages about each operation.
"""
formdict = form_to_dict(UploadFileForm(request.user, request.POST, request.FILES))
objectformdict = form_to_dict(AddObjectForm(request.user))
if request.method == "POST" and request.is_ajax():
response = process_bulk_add_md5_sample(request, formdict);
return HttpResponse(json.dumps(response,
default=json_handler),
mimetype='application/json')
else:
return render_to_response('bulk_add_default.html',
{'formdict': formdict,
'objectformdict': objectformdict,
'title': "Bulk Add Samples",
'table_name': 'sample',
'local_validate_columns': [form_consts.Sample.MD5],
'is_bulk_add_objects': True},
RequestContext(request));
@user_passes_test(user_can_view_data)
def upload_file(request, related_md5=None):
"""
Upload a new sample.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param related_md5: The MD5 of a related sample.
:type related_md5: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
form = UploadFileForm(request.user, request.POST, request.FILES)
email_errmsg = None
if form.is_valid():
response = {'success': False,
'message': 'Unknown error; unable to upload file.'}
inherited_source = None
backdoor = form.cleaned_data['backdoor']
campaign = form.cleaned_data['campaign']
confidence = form.cleaned_data['confidence']
source = form.cleaned_data['source']
method = form.cleaned_data['method']
reference = form.cleaned_data['reference']
analyst = request.user.username
if related_md5:
reload_page = True
else:
reload_page = False
related_md5 = form.cleaned_data['related_md5']
if related_md5:
related_sample = Sample.objects(md5=related_md5).first()
if not related_sample:
response['message'] = "Upload Failed. Unable to locate related sample."
return render_to_response("file_upload_response.html",
{'response': json.dumps(response)},
RequestContext(request))
# If selected, new sample inherits the campaigns of the related sample.
if form.cleaned_data['inherit_campaigns']:
if campaign:
related_sample.campaign.append(EmbeddedCampaign(name=campaign, confidence=confidence, analyst=analyst))
campaign = related_sample.campaign
# If selected, new sample inherits the sources of the related sample
if form.cleaned_data['inherit_sources']:
inherited_source = related_sample.source
backdoor_name = None
backdoor_version = None
if backdoor:
backdoor = backdoor.split('|||')
if len(backdoor) == 2:
(backdoor_name, backdoor_version) = backdoor[0], backdoor[1]
try:
if request.FILES:
result = handle_uploaded_file(
request.FILES['filedata'],
source,
method,
reference,
form.cleaned_data['file_format'],
form.cleaned_data['password'],
analyst,
campaign,
confidence,
related_md5,
bucket_list=form.cleaned_data[form_consts.Common.BUCKET_LIST_VARIABLE_NAME],
ticket=form.cleaned_data[form_consts.Common.TICKET_VARIABLE_NAME],
inherited_source=inherited_source,
backdoor_name=backdoor_name,
backdoor_version=backdoor_version)
else:
result = handle_uploaded_file(
None,
source,
method,
reference,
form.cleaned_data['file_format'],
None,
analyst,
campaign,
confidence,
related_md5 = related_md5,
filename=request.POST['filename'].strip(),
md5=request.POST['md5'].strip().lower(),
bucket_list=form.cleaned_data[form_consts.Common.BUCKET_LIST_VARIABLE_NAME],
ticket=form.cleaned_data[form_consts.Common.TICKET_VARIABLE_NAME],
inherited_source=inherited_source,
is_return_only_md5=False,
backdoor_name=backdoor_name,
backdoor_version=backdoor_version)
except ZipFileError, zfe:
return render_to_response('file_upload_response.html',
{'response': json.dumps({'success': False,
'message': zfe.value})},
RequestContext(request))
else:
if len(result) > 1:
filedata = request.FILES['filedata']
message = ('<a href="%s">View Uploaded Samples.</a>'
% reverse('crits.samples.views.view_upload_list',
args=[filedata.name, result]))
response = {'success': True,
'message': message }
md5_response = result
elif len(result) == 1:
md5_response = None
if not request.FILES:
response['success'] = result[0].get('success', False)
if(response['success'] == False):
response['message'] = result[0].get('message', response.get('message'))
else:
md5_response = [result[0].get('object').md5]
else:
md5_response = [result[0]]
response['success'] = True
if md5_response != None:
response['message'] = ('File uploaded successfully. <a href="%s">View Sample.</a>'
% reverse('crits.samples.views.detail',
args=md5_response))
if response['success']:
if request.POST.get('email'):
for s in md5_response:
email_errmsg = mail_sample(s, [request.user.email])
if email_errmsg is not None:
msg = "<br>Error emailing sample %s: %s\n" % (s, email_errmsg)
response['message'] = response['message'] + msg
if reload_page:
response['redirect_url'] = reverse('crits.samples.views.detail', args=[related_md5])
return render_to_response("file_upload_response.html",
{'response': json.dumps(response)},
RequestContext(request))
else:
if related_md5: #if this is a 'related' upload, hide field so it doesn't reappear
form.fields['related_md5'].widget = forms.HiddenInput()
return render_to_response('file_upload_response.html',
{'response': json.dumps({'success': False,
'form': form.as_table()})},
RequestContext(request))
else:
return HttpResponseRedirect(reverse('crits.samples.views.samples_listing'))
@user_passes_test(user_can_view_data)
def strings(request, sample_md5):
"""
Generate strings for a sample. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param sample_md5: The MD5 of the sample to use.
:type sample_md5: str
:returns: :class:`django.http.HttpResponse`
"""
if request.is_ajax():
strings_data = make_ascii_strings(md5=sample_md5)
strings_data += make_unicode_strings(md5=sample_md5)
result = {"strings": strings_data}
return HttpResponse(json.dumps(result),
mimetype='application/json')
else:
return render_to_response('error.html',
{'error': "Expected AJAX."},
RequestContext(request))
@user_passes_test(user_can_view_data)
def stackstrings(request, sample_md5):
"""
Generate stack strings for a sample. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param sample_md5: The MD5 of the sample to use.
:type sample_md5: str
:returns: :class:`django.http.HttpResponse`
"""
if request.is_ajax():
strings = make_stackstrings(md5=sample_md5)
result = {"strings": strings}
return HttpResponse(json.dumps(result),
mimetype='application/json')
else:
return render_to_response('error.html',
{'error': "Expected AJAX."},
RequestContext(request))
@user_passes_test(user_can_view_data)
def hex(request,sample_md5):
"""
Generate hex for a sample. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param sample_md5: The MD5 of the sample to use.
:type sample_md5: str
:returns: :class:`django.http.HttpResponse`
"""
if request.is_ajax():
hex_data = make_hex(md5=sample_md5)
result = {"strings": hex_data}
return HttpResponse(json.dumps(result),
mimetype='application/json')
else:
return render_to_response('error.html',
{'error': "Expected AJAX."},
RequestContext(request))
@user_passes_test(user_can_view_data)
def xor(request,sample_md5):
"""
Generate xor results for a sample. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param sample_md5: The MD5 of the sample to use.
:type sample_md5: str
:returns: :class:`django.http.HttpResponse`
"""
if request.is_ajax():
key = request.GET.get('key')
key = int(key)
xor_data = xor_string(md5=sample_md5,
key=key)
xor_data = make_ascii_strings(data=xor_data)
result = {"strings": xor_data}
return HttpResponse(json.dumps(result),
mimetype='application/json')
else:
return render_to_response('error.html',
{'error': "Expected AJAX."},
RequestContext(request))
@user_passes_test(user_can_view_data)
def xor_searcher(request, sample_md5):
"""
Generate xor search results for a sample. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param sample_md5: The MD5 of the sample to use.
:type sample_md5: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == "POST" and request.is_ajax():
form = XORSearchForm(request.POST)
if form.is_valid():
try:
string = request.POST['string']
except:
string = None
try:
if request.POST["skip_nulls"] == "on":
skip_nulls = 1
except:
skip_nulls = 0
try:
if request.POST["is_key"] == "on":
is_key = 1
except:
is_key = 0
if is_key:
try:
result = {"keys": [int(string)]}
except:
result = {"keys": []}
else:
results = xor_search(md5=sample_md5,
string=string,
skip_nulls=skip_nulls)
result = {"keys": results}
return HttpResponse(json.dumps(result),
mimetype='application/json')
else:
return render_to_response('error.html',
{'error': "Invalid Form."},
RequestContext(request))
else:
return render_to_response('error.html',
{'error': "Expected AJAX POST."},
RequestContext(request))
@user_passes_test(user_can_view_data)
def unzip_sample(request, md5):
"""
Unzip a sample.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param md5: The MD5 of the sample to use.
:type md5: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == "POST":
# Intentionally using UnrarSampleForm here. Both unrar and unzip use
# the same form because it's an identical form.
form = UnrarSampleForm(request.POST)
if form.is_valid():
pwd = form.cleaned_data['password']
try:
handle_unzip_file(md5, user=request.user.username, password=pwd)
except ZipFileError, zfe:
return render_to_response('error.html',
{'error' : zfe.value},
RequestContext(request))
return HttpResponseRedirect(reverse('crits.samples.views.detail',
args=[md5]))
else:
return render_to_response('error.html',
{'error': 'Expecting POST.'},
RequestContext(request))
@user_passes_test(user_can_view_data)
def unrar_sample(request, md5):
"""
Unrar a sample.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param md5: The MD5 of the sample to use.
:type md5: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == "POST":
unrar_form = UnrarSampleForm(request.POST)
if unrar_form.is_valid():
pwd = unrar_form.cleaned_data['password']
try:
handle_unrar_sample(md5, user=request.user.username, password=pwd)
except ZipFileError, zfe:
return render_to_response('error.html',
{'error' : zfe.value},
RequestContext(request))
return HttpResponseRedirect(reverse('crits.samples.views.detail',
args=[md5]))
#TODO: convert to jtable
@user_passes_test(user_can_view_data)
def sources(request):
"""
Get the sources list for samples.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
refresh = request.GET.get("refresh", "no")
if refresh == "yes":
generate_sources()
sources_list = get_source_counts(request.user)
return render_to_response('samples_sources.html',
{'sources': sources_list},
RequestContext(request))
@user_passes_test(user_is_admin)
def remove_sample(request, md5):
"""
Remove a sample from CRITs.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param md5: The MD5 of the sample to remove.
:type md5: str
:returns: :class:`django.http.HttpResponse`
"""
result = delete_sample(md5, '%s' % request.user.username)
if result:
org = get_user_organization(request.user.username)
return HttpResponseRedirect(reverse('crits.samples.views.samples_listing')
+'?source=%s' % org)
else:
return render_to_response('error.html',
{'error': "Could not delete sample"},
RequestContext(request))
@user_passes_test(user_can_view_data)
def set_sample_filename(request):
"""
Set a Sample filename. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
filename = request.POST.get('filename', None)
id_ = request.POST.get('id', None)
analyst = request.user.username
return HttpResponse(json.dumps(update_sample_filename(id_,
filename,
analyst)),
mimetype="application/json")
else:
error = "Expected POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def set_sample_filenames(request):
"""
Set Sample filenames. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == "POST" and request.is_ajax():
tags = request.POST.get('tags', "").split(",")
id_ = request.POST.get('id', None)
return HttpResponse(json.dumps(modify_sample_filenames(id_,
tags,
request.user.username)),
mimetype="application/json")
else:
error = "Expected POST"
return render_to_response("error.html", {"error" : error },
RequestContext(request))
|
|
r"""
.. autofunction:: openpnm.models.physics.generic_source_term.charge_conservation
.. autofunction:: openpnm.models.physics.generic_source_term.standard_kinetics
.. autofunction:: openpnm.models.physics.generic_source_term.linear
.. autofunction:: openpnm.models.physics.generic_source_term.power_law
.. autofunction:: openpnm.models.physics.generic_source_term.exponential
.. autofunction:: openpnm.models.physics.generic_source_term.natural_exponential
.. autofunction:: openpnm.models.physics.generic_source_term.logarithm
.. autofunction:: openpnm.models.physics.generic_source_term.natural_logarithm
.. autofunction:: openpnm.models.physics.generic_source_term.general_symbolic
"""
import numpy as _np
import scipy as _sp
import scipy.sparse.csgraph as _spgr
def charge_conservation(target, phase, p_alg, e_alg, assumption):
r"""
Applies the source term on the charge conservation equation when solving
for ions transport.
Parameters
----------
phase : OpenPNM Phase object
The phase on which the charge conservation equation is applied.
p_alg : OpenPNM Algorithm object
The algorithm used to enforce charge conservation.
e_alg : list of OpenPNM algorithms
The list of algorithms used to solve for transport of different
ionic species of the mixture phase.
assumption : string
A string correponding to the assumption adopted to enforce charge
conservation.
Returns
-------
A dictionary containing the following three items:
**'rate'** - The value of the source term function for the given list
of algortihms under the provided assumption.
**'S1'** - A placeholder (zero array).
**'S2'** - The value of the source term function for the given list of
algortihms under the provided assumption (same as 'rate').
Notes
-----
Three assumptions are supported; "poisson", "electroneutrality" and
"laplace".
"""
F = 96485.3321233100184
rhs = _np.zeros(shape=(p_alg.Np, ), dtype=float)
network = p_alg.project.network
if assumption == 'poisson':
v = network['pore.volume']
for e in e_alg:
rhs += (v * F * phase['pore.valence.'+e.settings['ion']]
* e[e.settings['quantity']])
elif assumption == 'poisson_2D':
s = network['pore.area']
for e in e_alg:
rhs += (s * F * phase['pore.valence.'+e.settings['ion']]
* e[e.settings['quantity']])
elif assumption in ['electroneutrality', 'electroneutrality_2D']:
for e in e_alg:
try:
c = e[e.settings['quantity']]
except KeyError:
c = _np.zeros(shape=(e.Np, ), dtype=float)
network = e.project.network
g = phase['throat.diffusive_conductance.'+e.settings['ion']]
am = network.create_adjacency_matrix(weights=g, fmt='coo')
A = _spgr.laplacian(am)
rhs += - F * phase['pore.valence.'+e.settings['ion']] * A * c
elif assumption in ['laplace', 'laplace_2D']:
pass # rhs should remain 0
else:
raise Exception('Unknown keyword for "charge_conservation", can '
+ 'only be "poisson", "poisson_2D", "laplace", '
+ '"laplace_2D", "electroneutrality" or '
+ "electroneutrality_2D")
S1 = _np.zeros(shape=(p_alg.Np, ), dtype=float)
values = {'S1': S1, 'S2': rhs, 'rate': rhs}
return values
def standard_kinetics(target, X, prefactor, exponent):
r"""
Calculates the rate, as well as slope and intercept of the following
function at the given value of `X`:
.. math::
r = A X^b
Parameters
----------
``prefactor, exponent`` : {string}
The dictionary keys on the target object containing the coefficients
values to be used in the source term model
``quantity`` : {string}
The dictionary key on the target object containing the the quantity
of interest
Returns
-------
A dictionary containing the following three items:
**'rate'** - The value of the source term function at the given X.
**'S1'** - The slope of the source term function at the given X.
**'S2'** - The intercept of the source term function at the given X.
The slope and intercept provide a linearized source term equation about the
current value of X as follow:
.. math::
rate = S_{1} X + S_{2}
"""
X = target[X]
A = target[prefactor]
b = target[exponent]
r = A*(X**b)
S1 = A*b*(X**(b - 1))
S2 = A*(1 - b)*(X**b)
values = {'S1': S1, 'S2': S2, 'rate': r}
return values
def _parse_args(target, key, default):
if key == '':
val = default
else:
val = target[key]
return val
def linear(target, X, A1='', A2=''):
r"""
Calculates the rate, as well as slope and intercept of the following
function at the given value of `X`:
.. math::
r = A_{1} X + A_{2}
Parameters
----------
A1 -> A2 : string
The dictionary keys on the target object containing the coefficients
values to be used in the source term model
X : string
The dictionary key on the target objecxt containing the the quantity
of interest
Returns
-------
A dictionary containing the following three items:
**'rate'** - The value of the source term function at the given X.
**'S1'** - The slope of the source term function at the given X.
**'S2'** - The intercept of the source term function at the given X.
The slope and intercept provide a linearized source term equation about the
current value of X as follow:
.. math::
rate = S_{1} X + S_{2}
"""
A = _parse_args(target=target, key=A1, default=0.0)
B = _parse_args(target=target, key=A2, default=0.0)
X = target[X]
r = A * X + B
S1 = A
S2 = B
values = {'S1': S1, 'S2': S2, 'rate': r}
return values
def power_law(target, X, A1='', A2='', A3=''):
r"""
Calculates the rate, as well as slope and intercept of the following
function at the given value of *X*:
.. math::
r = A_{1} x^{A_{2}} + A_{3}
Parameters
----------
A1 -> A3 : string
The dictionary keys on the target object containing the coefficients
values to be used in the source term model
X : string
The dictionary key on the target objecxt containing the the quantity
of interest
Returns
-------
A dictionary containing the following three items:
**'rate'** - The value of the source term function at the given X.
**'S1'** - The slope of the source term function at the given X.
**'S2'** - The intercept of the source term function at the given X.
The slope and intercept provide a linearized source term equation about the
current value of X as follow:
.. math::
rate = S_{1} X + S_{2}
"""
A = _parse_args(target=target, key=A1, default=0.0)
B = _parse_args(target=target, key=A2, default=1.0)
C = _parse_args(target=target, key=A3, default=0.0)
X = target[X]
r = A * X ** B + C
S1 = A * B * X ** (B - 1)
S2 = A * X ** B * (1 - B) + C
values = {'S1': S1, 'S2': S2, 'rate': r}
return values
def exponential(target, X, A1='', A2='', A3='', A4='', A5='', A6=''):
r"""
Calculates the rate, as well as slope and intercept of the following
function at the given value of `X`:
.. math::
r = A_{1} A_{2}^{( A_{3} x^{ A_{4} } + A_{5})} + A_{6}
Parameters
----------
A1 -> A6 : string
The dictionary keys on the target object containing the coefficients
values to be used in the source term model
X : string
The dictionary key on the target objecxt containing the the quantity
of interest
Returns
-------
A dictionary containing the following three items:
**'rate'** - The value of the source term function at the given X.
**'S1'** - The slope of the source term function at the given X.
**'S2'** - The intercept of the source term function at the given X.
The slope and intercept provide a linearized source term equation about the
current value of X as follow:
.. math::
rate = S_{1} X + S_{2}
"""
A = _parse_args(target=target, key=A1, default=0.0)
B = _parse_args(target=target, key=A2, default=1.0)
C = _parse_args(target=target, key=A3, default=1.0)
D = _parse_args(target=target, key=A4, default=1.0)
E = _parse_args(target=target, key=A5, default=0.0)
F = _parse_args(target=target, key=A6, default=0.0)
X = target[X]
r = A * B ** (C * X ** D + E) + F
S1 = A * C * D * X ** (D - 1) * _np.log(B) * B ** (C * X ** D + E)
S2 = A * B ** (C * X ** D + E) * (1 - C * D * _np.log(B) * X ** D) + F
values = {'S1': S1, 'S2': S2, 'rate': r}
return values
def natural_exponential(target, X, A1='', A2='', A3='', A4='', A5=''):
r"""
Calculates the rate, as well as slope and intercept of the following
function at the given value of `X`:
.. math::
r = A_{1} exp( A_{2} x^{ A_{3} } + A_{4} )+ A_{5}
Parameters
----------
A1 -> A5 : string
The dictionary keys on the target object containing the coefficients
values to be used in the source term model
X : string
The dictionary key on the target objecxt containing the the quantity
of interest
Returns
-------
A dictionary containing the following three items:
**'rate'** - The value of the source term function at the given X.
**'S1'** - The slope of the source term function at the given X.
**'S2'** - The intercept of the source term function at the given X.
The slope and intercept provide a linearized source term equation about the
current value of X as follow:
.. math::
rate = S_{1} X + S_{2}
"""
A = _parse_args(target=target, key=A1, default=0.0)
B = _parse_args(target=target, key=A2, default=0.0)
C = _parse_args(target=target, key=A3, default=0.0)
D = _parse_args(target=target, key=A4, default=0.0)
E = _parse_args(target=target, key=A5, default=0.0)
X = target[X]
r = A * _np.exp(B * X ** C + D) + E
S1 = A * B * C * X ** (C - 1) * _np.exp(B * X ** C + D)
S2 = A * (1 - B * C * X ** C) * _np.exp(B * X ** C + D) + E
values = {'pore.S1': S1, 'pore.S2': S2, 'pore.rate': r}
return values
def logarithm(target, X, A1='', A2='', A3='', A4='', A5='', A6=''):
r"""
Calculates the rate, as well as slope and intercept of the following
function at the given value of `X`:
.. math::
r = A_{1} Log_{ A_{2} }( A_{3} x^{ A_{4} }+ A_{5})+ A_{6}
Parameters
----------
A1 -> A6 : string
The dictionary keys on the target object containing the coefficients
values to be used in the source term model
X : string
The dictionary key on the target objecxt containing the the quantity
of interest
Returns
-------
A dictionary containing the following three items:
**'rate'** - The value of the source term function at the given X.
**'S1'** - The slope of the source term function at the given X.
**'S2'** - The intercept of the source term function at the given X.
The slope and intercept provide a linearized source term equation about the
current value of X as follow:
.. math::
rate = S_{1} X + S_{2}
"""
A = _parse_args(target=target, key=A1, default=0.0)
B = _parse_args(target=target, key=A2, default=10.0)
C = _parse_args(target=target, key=A3, default=1.0)
D = _parse_args(target=target, key=A4, default=1.0)
E = _parse_args(target=target, key=A5, default=0.0)
F = _parse_args(target=target, key=A6, default=0.0)
X = target[X]
r = (A * _np.log(C * X ** D + E)/_np.log(B) + F)
S1 = A * C * D * X ** (D - 1) / (_np.log(B) * (C * X ** D + E))
S2 = A * _np.log(C * X ** D + E) / _np.log(B) + F - A * C * D * X ** D / \
(_np.log(B) * (C * X ** D + E))
values = {'S1': S1, 'S2': S2, 'rate': r}
return values
def natural_logarithm(target, X, A1='', A2='', A3='', A4='', A5=''):
r"""
Calculates the rate, as well as slope and intercept of the following
function at the given value of `X`:
.. math::
r = A_{1} Ln( A_{2} x^{ A_{3} }+ A_{4})+ A_{5}
Parameters
----------
A1 -> A5 : string
The dictionary keys on the target object containing the coefficients
values to be used in the source term model
X : string
The dictionary key on the target objecxt containing the the quantity
of interest
Returns
-------
A dictionary containing the following three items:
**'rate'** - The value of the source term function at the given X.
**'S1'** - The slope of the source term function at the given X.
**'S2'** - The intercept of the source term function at the given X.
The slope and intercept provide a linearized source term equation about the
current value of X as follow:
.. math::
rate = S_{1} X + S_{2}
"""
A = _parse_args(target=target, key=A1, default=0.0)
B = _parse_args(target=target, key=A2, default=1.0)
C = _parse_args(target=target, key=A3, default=1.0)
D = _parse_args(target=target, key=A4, default=0.0)
E = _parse_args(target=target, key=A5, default=0.0)
X = target[X]
r = A*_np.log(B*X**C + D) + E
S1 = A*B*C*X**(C - 1) / (B * X ** C + D)
S2 = A*_np.log(B*X**C + D) + E - A*B*C*X**C / (B*X**C + D)
values = {'pore.S1': S1, 'pore.S2': S2, 'pore.rate': r}
return values
def _build_func(eq, **args):
r'''
Take a symbolic equation and return the lambdified version plus the
linearization of form S1 * x + S2
'''
from sympy import lambdify
eq_prime = eq.diff(args['x'])
s1 = eq_prime
s2 = eq - eq_prime*args['x']
EQ = lambdify(args.values(), expr=eq, modules='numpy')
S1 = lambdify(args.values(), expr=s1, modules='numpy')
S2 = lambdify(args.values(), expr=s2, modules='numpy')
return EQ, S1, S2
def linear_sym(target, X, A1='', A2=''):
r"""
Calculates the rate, as well as slope and intercept of the following
function at the given value of *x*:
.. math::
r = A_{1} x + A_{2}
Parameters
----------
A1 -> A2 : string
The dictionary keys on the target object containing the coefficients
values to be used in the source term model
X : string
The dictionary key on the target object containing the the quantity
of interest
Returns
-------
A dictionary containing the following three items:
**'rate'** - The value of the source term function at the given X.
**'S1'** - The slope of the source term function at the given X.
**'S2'** - The intercept of the source term function at the given X.
The slope and intercept provide a linearized source term equation about the
current value of X as follow:
.. math::
rate = S_{1} X + S_{2}
"""
from sympy import symbols
A = _parse_args(target=target, key=A1, default=0.0)
B = _parse_args(target=target, key=A2, default=0.0)
X = target[X]
# Symbols used in symbolic function
a, b, x = symbols('a,b,x')
# Equation
y = a*x + b
# Callable functions
r, s1, s2 = _build_func(eq=y, a=a, b=b, x=x)
# Values
r_val = r(A, B, X)
s1_val = s1(A, B, X)
s2_val = s2(A, B, X)
values = {'S1': s1_val, 'S2': s2_val, 'rate': r_val}
return values
def power_law_sym(target, X, A1='', A2='', A3=''):
r"""
Calculates the rate, as well as slope and intercept of the following
function at the given value of *x*:
.. math::
r = A_{1} x^{A_{2}} + A_{3}
Parameters
----------
A1 -> A3 : string
The dictionary keys on the target object containing the coefficients
values to be used in the source term model
X : string
The dictionary key on the target objecxt containing the the quantity
of interest
Returns
-------
A dictionary containing the following three items:
**'rate'** - The value of the source term function at the given X.
**'S1'** - The slope of the source term function at the given X.
**'S2'** - The intercept of the source term function at the given X.
The slope and intercept provide a linearized source term equation about the
current value of X as follow:
.. math::
rate = S_{1} X + S_{2}
"""
from sympy import symbols
A = _parse_args(target=target, key=A1, default=0.0)
B = _parse_args(target=target, key=A2, default=1.0)
C = _parse_args(target=target, key=A3, default=0.0)
X = target[X]
# Symbols used in symbolic function
a, b, c, x = symbols('a,b,c,x')
# Equation
y = a*x**b + c
# Callable functions
r, s1, s2 = _build_func(eq=y, a=a, b=b, c=c, x=x)
# Values
r_val = r(A, B, C, X)
s1_val = s1(A, B, C, X)
s2_val = s2(A, B, C, X)
values = {'S1': s1_val, 'S2': s2_val, 'rate': r_val}
return values
def exponential_sym(target, X, A1='', A2='', A3='', A4='', A5='', A6=''):
r"""
Calculates the rate, as well as slope and intercept of the following
function at the given value of *x*:
.. math::
r = A_{1} A_{2}^{( A_{3} x^{ A_{4} } + A_{5})} + A_{6}
Parameters
----------
A1 -> A6 : string
The dictionary keys on the target object containing the coefficients
values to be used in the source term model
X : string or float/int or array/list
The dictionary key on the target objecxt containing the the quantity
of interest
Returns
-------
A dictionary containing the following three items:
**'rate'** - The value of the source term function at the given X.
**'S1'** - The slope of the source term function at the given X.
**'S2'** - The intercept of the source term function at the given X.
The slope and intercept provide a linearized source term equation about the
current value of X as follow:
.. math::
rate = S_{1} X + S_{2}
"""
from sympy import symbols
A = _parse_args(target=target, key=A1, default=0.0)
B = _parse_args(target=target, key=A2, default=1.0)
C = _parse_args(target=target, key=A3, default=1.0)
D = _parse_args(target=target, key=A4, default=1.0)
E = _parse_args(target=target, key=A5, default=0.0)
F = _parse_args(target=target, key=A6, default=0.0)
X = target[X]
# Symbols used in symbolic function
a, b, c, d, e, f, x = symbols('a,b,c,d,e,f,x')
# Equation
y = a*b**(c*x**d + e) + f
# Callable functions
r, s1, s2 = _build_func(eq=y, a=a, b=b, c=c, d=d, e=e, f=f, x=x)
# Values
r_val = r(A, B, C, D, E, F, X)
s1_val = s1(A, B, C, D, E, F, X)
s2_val = s2(A, B, C, D, E, F, X)
values = {'S1': s1_val, 'S2': s2_val, 'rate': r_val}
return values
def natural_exponential_sym(target, X, A1='', A2='', A3='', A4='', A5=''):
r"""
Calculates the rate, as well as slope and intercept of the following
function at the given value of *x*:
.. math::
r = A_{1} exp( A_{2} x^{ A_{3} } + A_{4} )+ A_{5}
Parameters
----------
A1 -> A6 : string
The dictionary keys on the target object containing the coefficients
values to be used in the source term model
X : string or float/int or array/list
The dictionary key on the target objecxt containing the the quantity
of interest
Returns
-------
A dictionary containing the following three items:
**'rate'** - The value of the source term function at the given X.
**'S1'** - The slope of the source term function at the given X.
**'S2'** - The intercept of the source term function at the given X.
The slope and intercept provide a linearized source term equation about the
current value of X as follow:
.. math::
rate = S_{1} X + S_{2}
"""
from sympy import symbols, exp
A = _parse_args(target=target, key=A1, default=0.0)
B = _parse_args(target=target, key=A2, default=1.0)
C = _parse_args(target=target, key=A3, default=1.0)
D = _parse_args(target=target, key=A4, default=0.0)
E = _parse_args(target=target, key=A5, default=0.0)
X = target[X]
# Symbols used in symbolic function
a, b, c, d, e, x = symbols('a,b,c,d,e,x')
# Equation
y = a*exp(b*x**c + d) + e
# Callable functions
r, s1, s2 = _build_func(eq=y, a=a, b=b, c=c, d=d, e=e, x=x)
# Values
r_val = r(A, B, C, D, E, X)
s1_val = s1(A, B, C, D, E, X)
s2_val = s2(A, B, C, D, E, X)
values = {'S1': s1_val, 'S2': s2_val, 'rate': r_val}
return values
def logarithm_sym(target, X, A1='', A2='', A3='', A4='', A5='', A6=''):
r"""
Calculates the rate, as well as slope and intercept of the following
function at the given value of *x*:
.. math::
r = A_{1} Log_{ A_{2} }( A_{3} x^{ A_{4} }+ A_{5})+ A_{6}
Parameters
----------
A1 -> A6 : string
The dictionary keys on the target object containing the coefficients
values to be used in the source term model
x : string or float/int or array/list
The dictionary key on the target objecxt containing the the quantity
of interest
Returns
-------
A dictionary containing the following three items:
**'rate'** - The value of the source term function at the given X.
**'S1'** - The slope of the source term function at the given X.
**'S2'** - The intercept of the source term function at the given X.
The slope and intercept provide a linearized source term equation about the
current value of X as follow:
.. math::
rate = S_{1} X + S_{2}
"""
from sympy import symbols, log
A = _parse_args(target=target, key=A1, default=0.0)
B = _parse_args(target=target, key=A2, default=10.0)
C = _parse_args(target=target, key=A3, default=1.0)
D = _parse_args(target=target, key=A4, default=1.0)
E = _parse_args(target=target, key=A5, default=0.0)
F = _parse_args(target=target, key=A6, default=0.0)
X = target[X]
# Symbols used in symbolic function
a, b, c, d, e, f, x = symbols('a,b,c,d,e,f,x')
# Equation
y = a * log((c * x**d + e), b) + f
# Callable functions
r, s1, s2 = _build_func(eq=y, a=a, b=b, c=c, d=d, e=e, f=f, x=x)
# Values
r_val = r(A, B, C, D, E, F, X)
s1_val = s1(A, B, C, D, E, F, X)
s2_val = s2(A, B, C, D, E, F, X)
values = {'S1': s1_val, 'S2': s2_val, 'rate': r_val}
return values
def natural_logarithm_sym(target, X, A1='', A2='', A3='', A4='', A5=''):
r"""
Calculates the rate, as well as slope and intercept of the following
function at the given value of *x*:
.. math::
rate = A_{1} Ln( A_{2} x^{ A_{3} }+ A_{4})+ A_{5}
Parameters
----------
A1 -> A5 : string
The dictionary keys on the target object containing the coefficients
values to be used in the source term model
X : string or float/int or array/list
The dictionary key on the target objecxt containing the the quantity
of interest
Returns
-------
A dictionary containing the following three items:
**'rate'** - The value of the source term function at the given X.
**'S1'** - The slope of the source term function at the given X.
**'S2'** - The intercept of the source term function at the given X.
The slope and intercept provide a linearized source term equation about the
current value of X as follow:
.. math::
rate = S_{1} X + S_{2}
"""
from sympy import symbols, ln
A = _parse_args(target=target, key=A1, default=0.0)
B = _parse_args(target=target, key=A2, default=1.0)
C = _parse_args(target=target, key=A3, default=1.0)
D = _parse_args(target=target, key=A4, default=0.0)
E = _parse_args(target=target, key=A5, default=0.0)
X = target[X]
# Symbols used in symbolic function
a, b, c, d, e, x = symbols('a,b,c,d,e,x')
# Equation
y = a * ln(b * x**c + d) + e
# Callable functions
r, s1, s2 = _build_func(eq=y, a=a, b=b, c=c, d=d, e=e, x=x)
# Values
r_val = r(A, B, C, D, E, X)
s1_val = s1(A, B, C, D, E, X)
s2_val = s2(A, B, C, D, E, X)
values = {'S1': s1_val, 'S2': s2_val, 'rate': r_val}
return values
def general_symbolic(target, eqn=None, arg_map=None):
r'''
A general function to interpret a sympy equation and evaluate the linear
components of the source term.
Parameters
----------
target : OpenPNM object
The OpenPNM object where the result will be applied.
eqn : sympy symbolic expression for the source terms
e.g. y = a*x**b + c
arg_map : Dict mapping the symbols in the expression to OpenPNM data
on the target. Must contain 'x' which is the independent variable.
e.g. arg_map={'a':'pore.a', 'b':'pore.b', 'c':'pore.c', 'x':'pore.x'}
Example
----------
>>> import openpnm as op
>>> from openpnm.models.physics import generic_source_term as gst
>>> import scipy as sp
>>> import sympy
>>> pn = op.network.Cubic(shape=[5, 5, 5], spacing=0.0001)
>>> water = op.phases.Water(network=pn)
>>> water['pore.a'] = 1
>>> water['pore.b'] = 2
>>> water['pore.c'] = 3
>>> water['pore.x'] = sp.random.random(water.Np)
>>> a, b, c, x = sympy.symbols('a,b,c,x')
>>> y = a*x**b + c
>>> arg_map = {'a':'pore.a', 'b':'pore.b', 'c':'pore.c', 'x':'pore.x'}
>>> water.add_model(propname='pore.general',
... model=gst.general_symbolic,
... eqn=y, arg_map=arg_map,
... regen_mode='normal')
>>> assert 'pore.general.rate' in water.props()
>>> assert 'pore.general.S1' in water.props()
>>> assert 'pore.general.S1' in water.props()
'''
from sympy import postorder_traversal, srepr, symbols
# First make sure all the symbols have been allocated dict items
for arg in postorder_traversal(eqn):
if srepr(arg)[:6] == 'Symbol':
key = srepr(arg)[7:].strip('(').strip(')').strip("'")
if key not in arg_map.keys():
raise Exception('argument mapping incomplete, missing '+key)
if 'x' not in arg_map.keys():
raise Exception('argument mapping must contain "x" for the '
+ 'independent variable')
# Get the data
data = {}
args = {}
for key in arg_map.keys():
data[key] = target[arg_map[key]]
# Callable functions
args[key] = symbols(key)
r, s1, s2 = _build_func(eqn, **args)
r_val = r(*data.values())
s1_val = s1(*data.values())
s2_val = s2(*data.values())
values = {'S1': s1_val, 'S2': s2_val, 'rate': r_val}
return values
|
|
# coding: utf-8
from __future__ import unicode_literals, division
"""
Created on Jun 1, 2012
"""
__author__ = "Shyue Ping Ong, Stephen Dacek"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Jun 1, 2012"
import unittest
import os
import glob
import shutil
import datetime
from custodian.vasp.handlers import VaspErrorHandler, \
UnconvergedErrorHandler, MeshSymmetryErrorHandler, WalltimeHandler, \
MaxForceErrorHandler, PositiveEnergyErrorHandler, PotimErrorHandler, \
FrozenJobErrorHandler, AliasingErrorHandler, StdErrHandler
from pymatgen.io.vasp import Incar, Poscar, Structure, Kpoints, VaspInput
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
cwd = os.getcwd()
def clean_dir():
for f in glob.glob("error.*.tar.gz"):
os.remove(f)
for f in glob.glob("custodian.chk.*.tar.gz"):
os.remove(f)
class VaspErrorHandlerTest(unittest.TestCase):
def setUp(self):
if "VASP_PSP_DIR" not in os.environ:
os.environ["VASP_PSP_DIR"] = test_dir
os.chdir(test_dir)
shutil.copy("INCAR", "INCAR.orig")
shutil.copy("KPOINTS", "KPOINTS.orig")
shutil.copy("POSCAR", "POSCAR.orig")
shutil.copy("CHGCAR", "CHGCAR.orig")
def test_frozen_job(self):
h = FrozenJobErrorHandler()
d = h.correct()
self.assertEqual(d['errors'], ['Frozen job'])
self.assertEqual(Incar.from_file("INCAR")['ALGO'], "Normal")
def test_check_correct(self):
h = VaspErrorHandler("vasp.teterror")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ['tet'])
self.assertEqual(d["actions"],
[{'action': {'_set': {'ISMEAR': 0}},
'dict': 'INCAR'}])
h = VaspErrorHandler("vasp.sgrcon")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ['rot_matrix'])
self.assertEqual(set([a["dict"] for a in d["actions"]]),
{"KPOINTS"})
h = VaspErrorHandler("vasp.real_optlay")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ['real_optlay'])
self.assertEqual(d["actions"],
[{'action': {'_set': {'LREAL': False}},
'dict': 'INCAR'}])
subdir = os.path.join(test_dir, "large_cell_real_optlay")
os.chdir(subdir)
shutil.copy("INCAR", "INCAR.orig")
h = VaspErrorHandler()
h.check()
d = h.correct()
self.assertEqual(d["errors"], ['real_optlay'])
vi = VaspInput.from_directory(".")
self.assertEqual(vi["INCAR"]["LREAL"], True)
h.check()
d = h.correct()
self.assertEqual(d["errors"], ['real_optlay'])
vi = VaspInput.from_directory(".")
self.assertEqual(vi["INCAR"]["LREAL"], False)
shutil.copy("INCAR.orig", "INCAR")
os.remove("INCAR.orig")
os.remove("error.1.tar.gz")
os.remove("error.2.tar.gz")
os.chdir(test_dir)
def test_mesh_symmetry(self):
h = MeshSymmetryErrorHandler("vasp.ibzkpt")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ['mesh_symmetry'])
self.assertEqual(d["actions"],
[{'action': {'_set': {'kpoints': [[4, 4, 4]]}},
'dict': 'KPOINTS'}])
def test_dentet(self):
h = VaspErrorHandler("vasp.dentet")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ['dentet'])
self.assertEqual(d["actions"],
[{'action': {'_set': {'ISMEAR': 0}},
'dict': 'INCAR'}])
def test_brmix(self):
h = VaspErrorHandler("vasp.brmix")
self.assertEqual(h.check(), True)
# The first (no good OUTCAR) correction, check IMIX
d = h.correct()
self.assertEqual(d["errors"], ['brmix'])
vi = VaspInput.from_directory(".")
self.assertEqual(vi["INCAR"]["IMIX"], 1)
self.assertTrue(os.path.exists("CHGCAR"))
# The next correction check Gamma and evenize
h.correct()
vi = VaspInput.from_directory(".")
self.assertFalse("IMIX" in vi["INCAR"])
self.assertTrue(os.path.exists("CHGCAR"))
if vi["KPOINTS"].style == Kpoints.supported_modes.Gamma and vi["KPOINTS"].num_kpts < 1:
all_kpts_even = all([
bool(n % 2 == 0) for n in vi["KPOINTS"].kpts[0]
])
self.assertFalse(all_kpts_even)
# The next correction check ISYM and no CHGCAR
h.correct()
vi = VaspInput.from_directory(".")
self.assertEqual(vi["INCAR"]["ISYM"], 0)
self.assertFalse(os.path.exists("CHGCAR"))
shutil.copy("INCAR.nelect", "INCAR")
h = VaspErrorHandler("vasp.brmix")
self.assertEqual(h.check(), False)
d = h.correct()
self.assertEqual(d["errors"], [])
def test_too_few_bands(self):
os.chdir(os.path.join(test_dir, "too_few_bands"))
shutil.copy("INCAR", "INCAR.orig")
h = VaspErrorHandler("vasp.too_few_bands")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ['too_few_bands'])
self.assertEqual(d["actions"],
[{'action': {'_set': {'NBANDS': 501}},
'dict': 'INCAR'}])
clean_dir()
shutil.move("INCAR.orig", "INCAR")
os.chdir(test_dir)
def test_rot_matrix(self):
if "VASP_PSP_DIR" not in os.environ:
os.environ["VASP_PSP_DIR"] = test_dir
subdir = os.path.join(test_dir, "poscar_error")
os.chdir(subdir)
shutil.copy("KPOINTS", "KPOINTS.orig")
h = VaspErrorHandler()
h.check()
d = h.correct()
self.assertEqual(d["errors"], ["rot_matrix"])
os.remove(os.path.join(subdir, "error.1.tar.gz"))
shutil.copy("KPOINTS.orig", "KPOINTS")
os.remove("KPOINTS.orig")
def test_to_from_dict(self):
h = VaspErrorHandler("random_name")
h2 = VaspErrorHandler.from_dict(h.as_dict())
self.assertEqual(type(h2), type(h))
self.assertEqual(h2.output_filename, "random_name")
def test_pssyevx(self):
h = VaspErrorHandler("vasp.pssyevx")
self.assertEqual(h.check(), True)
self.assertEqual(h.correct()["errors"], ["pssyevx"])
i = Incar.from_file("INCAR")
self.assertEqual(i["ALGO"], "Normal")
def test_eddrmm(self):
h = VaspErrorHandler("vasp.eddrmm")
self.assertEqual(h.check(), True)
self.assertEqual(h.correct()["errors"], ["eddrmm"])
i = Incar.from_file("INCAR")
self.assertEqual(i["ALGO"], "Normal")
self.assertEqual(h.correct()["errors"], ["eddrmm"])
i = Incar.from_file("INCAR")
self.assertEqual(i["POTIM"], 0.25)
def test_nicht_konv(self):
h = VaspErrorHandler("vasp.nicht_konvergent")
h.natoms_large_cell = 5
self.assertEqual(h.check(), True)
self.assertEqual(h.correct()["errors"], ["nicht_konv"])
i = Incar.from_file("INCAR")
self.assertEqual(i["LREAL"], True)
def test_edddav(self):
h = VaspErrorHandler("vasp.edddav")
self.assertEqual(h.check(), True)
self.assertEqual(h.correct()["errors"], ["edddav"])
self.assertFalse(os.path.exists("CHGCAR"))
def test_gradient_not_orthogonal(self):
h = VaspErrorHandler("vasp.gradient_not_orthogonal")
self.assertEqual(h.check(), True)
self.assertEqual(h.correct()["errors"], ["grad_not_orth"])
i = Incar.from_file("INCAR")
self.assertEqual(i["ISMEAR"], 0)
def tearDown(self):
os.chdir(test_dir)
shutil.move("INCAR.orig", "INCAR")
shutil.move("KPOINTS.orig", "KPOINTS")
shutil.move("POSCAR.orig", "POSCAR")
shutil.move("CHGCAR.orig", "CHGCAR")
clean_dir()
os.chdir(cwd)
class AliasingErrorHandlerTest(unittest.TestCase):
def setUp(self):
if "VASP_PSP_DIR" not in os.environ:
os.environ["VASP_PSP_DIR"] = test_dir
os.chdir(test_dir)
shutil.copy("INCAR", "INCAR.orig")
shutil.copy("KPOINTS", "KPOINTS.orig")
shutil.copy("POSCAR", "POSCAR.orig")
shutil.copy("CHGCAR", "CHGCAR.orig")
def test_aliasing(self):
os.chdir(os.path.join(test_dir, "aliasing"))
shutil.copy("INCAR", "INCAR.orig")
h = AliasingErrorHandler("vasp.aliasing")
h.check()
d = h.correct()
shutil.move("INCAR.orig", "INCAR")
clean_dir()
os.chdir(test_dir)
self.assertEqual(d["errors"], ['aliasing'])
self.assertEqual(d["actions"],
[{'action': {'_set': {'NGX': 34}},
'dict': 'INCAR'}, {"file": "CHGCAR",
"action": {"_file_delete": {'mode': "actual"}}},
{"file": "WAVECAR",
"action": {"_file_delete": {'mode': "actual"}}}])
def test_aliasing_incar(self):
os.chdir(os.path.join(test_dir, "aliasing"))
shutil.copy("INCAR", "INCAR.orig")
h = AliasingErrorHandler("vasp.aliasing_incar")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ['aliasing_incar'])
self.assertEqual(d["actions"],
[{'action': {'_unset': {'NGY':1, 'NGZ': 1}},
'dict': 'INCAR'}, {"file": "CHGCAR",
"action": {"_file_delete": {'mode': "actual"}}},
{"file": "WAVECAR",
"action": {"_file_delete": {'mode': "actual"}}}])
incar = Incar.from_file('INCAR.orig')
incar["ICHARG"] = 10
incar.write_file("INCAR")
d = h.correct()
self.assertEqual(d["errors"], ['aliasing_incar'])
self.assertEqual(d["actions"],
[{'action': {'_unset': {'NGY': 1, 'NGZ': 1}},
'dict': 'INCAR'}])
shutil.move("INCAR.orig", "INCAR")
clean_dir()
os.chdir(test_dir)
def tearDown(self):
os.chdir(test_dir)
shutil.move("INCAR.orig", "INCAR")
shutil.move("KPOINTS.orig", "KPOINTS")
shutil.move("POSCAR.orig", "POSCAR")
shutil.move("CHGCAR.orig", "CHGCAR")
clean_dir()
os.chdir(cwd)
class UnconvergedErrorHandlerTest(unittest.TestCase):
def setUp(cls):
if "VASP_PSP_DIR" not in os.environ:
os.environ["VASP_PSP_DIR"] = test_dir
os.chdir(test_dir)
def test_check_correct(self):
subdir = os.path.join(test_dir, "unconverged")
os.chdir(subdir)
shutil.copy("INCAR", "INCAR.orig")
shutil.copy("KPOINTS", "KPOINTS.orig")
shutil.copy("POSCAR", "POSCAR.orig")
shutil.copy("CONTCAR", "CONTCAR.orig")
h = UnconvergedErrorHandler()
self.assertTrue(h.check())
d = h.correct()
self.assertEqual(d["errors"], ['Unconverged'])
os.remove(os.path.join(subdir, "error.1.tar.gz"))
shutil.move("INCAR.orig", "INCAR")
shutil.move("KPOINTS.orig", "KPOINTS")
shutil.move("POSCAR.orig", "POSCAR")
shutil.move("CONTCAR.orig", "CONTCAR")
def test_to_from_dict(self):
h = UnconvergedErrorHandler("random_name.xml")
h2 = UnconvergedErrorHandler.from_dict(h.as_dict())
self.assertEqual(type(h2), UnconvergedErrorHandler)
self.assertEqual(h2.output_filename, "random_name.xml")
@classmethod
def tearDownClass(cls):
os.chdir(cwd)
class ZpotrfErrorHandlerTest(unittest.TestCase):
def setUp(self):
if "VASP_PSP_DIR" not in os.environ:
os.environ["VASP_PSP_DIR"] = test_dir
os.chdir(test_dir)
os.chdir('zpotrf')
shutil.copy("POSCAR", "POSCAR.orig")
shutil.copy("INCAR", "INCAR.orig")
def test_first_step(self):
shutil.copy("OSZICAR.empty", "OSZICAR")
s1 = Structure.from_file("POSCAR")
h = VaspErrorHandler("vasp.out")
self.assertEqual(h.check(), True)
d = h.correct()
self.assertEqual(d['errors'], ['zpotrf'])
s2 = Structure.from_file("POSCAR")
self.assertAlmostEqual(s2.volume, s1.volume * 1.2 ** 3, 3)
def test_potim_correction(self):
shutil.copy("OSZICAR.one_step", "OSZICAR")
s1 = Structure.from_file("POSCAR")
h = VaspErrorHandler("vasp.out")
self.assertEqual(h.check(), True)
d = h.correct()
self.assertEqual(d['errors'], ['zpotrf'])
s2 = Structure.from_file("POSCAR")
self.assertAlmostEqual(s2.volume, s1.volume, 3)
self.assertAlmostEqual(Incar.from_file("INCAR")['POTIM'], 0.25)
def tearDown(self):
os.chdir(test_dir)
os.chdir('zpotrf')
shutil.move("POSCAR.orig", "POSCAR")
shutil.move("INCAR.orig", "INCAR")
os.remove("OSZICAR")
clean_dir()
os.chdir(cwd)
class MaxForceErrorHandlerTest(unittest.TestCase):
def setUp(self):
if "VASP_PSP_DIR" not in os.environ:
os.environ["VASP_PSP_DIR"] = test_dir
os.chdir(test_dir)
def test_check_correct(self):
#NOTE: the vasprun here has had projected and partial eigenvalues removed
subdir = os.path.join(test_dir, "max_force")
os.chdir(subdir)
shutil.copy("INCAR", "INCAR.orig")
shutil.copy("POSCAR", "POSCAR.orig")
h = MaxForceErrorHandler()
self.assertTrue(h.check())
d = h.correct()
self.assertEqual(d["errors"], ['MaxForce'])
os.remove(os.path.join(subdir, "error.1.tar.gz"))
incar = Incar.from_file('INCAR')
poscar = Poscar.from_file('POSCAR')
contcar = Poscar.from_file('CONTCAR')
shutil.move("INCAR.orig", "INCAR")
shutil.move("POSCAR.orig", "POSCAR")
self.assertEqual(poscar.structure, contcar.structure)
self.assertAlmostEqual(incar['EDIFFG'], 0.005)
def tearDown(self):
os.chdir(cwd)
class WalltimeHandlerTest(unittest.TestCase):
def setUp(self):
os.chdir(test_dir)
def test_check_and_correct(self):
# The test OSZICAR file has 60 ionic steps. Let's try a 1 hr wall
# time with a 1min buffer
h = WalltimeHandler(wall_time=3600, buffer_time=120)
self.assertFalse(h.check())
# This makes sure the check returns True when the time left is less
# than the buffer time.
h.start_time = datetime.datetime.now() - datetime.timedelta(minutes=59)
self.assertTrue(h.check())
# This makes sure the check returns True when the time left is less
# than 3 x the average time per ionic step. We have a 62 min wall
# time, a very short buffer time, but the start time was 62 mins ago
h = WalltimeHandler(wall_time=3720, buffer_time=10)
h.start_time = datetime.datetime.now() - datetime.timedelta(minutes=62)
self.assertTrue(h.check())
# Test that the STOPCAR is written correctly.
h.correct()
with open("STOPCAR") as f:
content = f.read()
self.assertEqual(content, "LSTOP = .TRUE.")
os.remove("STOPCAR")
h = WalltimeHandler(wall_time=3600, buffer_time=120,
electronic_step_stop=True)
self.assertFalse(h.check())
h.start_time = datetime.datetime.now() - datetime.timedelta(minutes=59)
self.assertTrue(h.check())
h.correct()
with open("STOPCAR") as f:
content = f.read()
self.assertEqual(content, "LABORT = .TRUE.")
os.remove("STOPCAR")
@classmethod
def tearDownClass(cls):
os.chdir(cwd)
class PositiveEnergyHandlerTest(unittest.TestCase):
def setUp(cls):
os.chdir(test_dir)
def test_check_correct(self):
subdir = os.path.join(test_dir, "positive_energy")
os.chdir(subdir)
shutil.copy("INCAR", "INCAR.orig")
shutil.copy("POSCAR", "POSCAR.orig")
h = PositiveEnergyErrorHandler()
self.assertTrue(h.check())
d = h.correct()
self.assertEqual(d["errors"], ['Positive energy'])
os.remove(os.path.join(subdir, "error.1.tar.gz"))
incar = Incar.from_file('INCAR')
shutil.move("INCAR.orig", "INCAR")
shutil.move("POSCAR.orig", "POSCAR")
self.assertEqual(incar['ALGO'], 'Normal')
@classmethod
def tearDownClass(cls):
os.chdir(cwd)
class PotimHandlerTest(unittest.TestCase):
def setUp(cls):
os.chdir(test_dir)
def test_check_correct(self):
subdir = os.path.join(test_dir, "potim")
os.chdir(subdir)
shutil.copy("INCAR", "INCAR.orig")
shutil.copy("POSCAR", "POSCAR.orig")
incar = Incar.from_file('INCAR')
original_potim = incar['POTIM']
h = PotimErrorHandler()
self.assertTrue(h.check())
d = h.correct()
self.assertEqual(d["errors"], ['POTIM'])
os.remove(os.path.join(subdir, "error.1.tar.gz"))
incar = Incar.from_file('INCAR')
new_potim = incar['POTIM']
shutil.move("INCAR.orig", "INCAR")
shutil.move("POSCAR.orig", "POSCAR")
self.assertEqual(original_potim, new_potim)
self.assertEqual(incar['IBRION'], 3)
@classmethod
def tearDownClass(cls):
os.chdir(cwd)
class LrfCommHandlerTest(unittest.TestCase):
def setUp(self):
os.chdir(test_dir)
os.chdir('lrf_comm')
for f in ["INCAR", "OUTCAR", "std_err.txt"]:
shutil.copy(f, f+".orig")
def test_lrf_comm(self):
h = StdErrHandler("std_err.txt")
self.assertEqual(h.check(), True)
d = h.correct()
self.assertEqual(d["errors"], ['lrf_comm'])
vi = VaspInput.from_directory(".")
self.assertEqual(vi["INCAR"]["ISTART"], 1)
self.assertEqual(h.check(), True)
d = h.correct()
self.assertEqual(d["errors"], ['lrf_comm'])
self.assertEqual(d["actions"], []) # don't correct twice
def tearDown(self):
os.chdir(test_dir)
os.chdir('lrf_comm')
for f in ["INCAR", "OUTCAR", "std_err.txt"]:
shutil.move(f+".orig", f)
clean_dir()
os.chdir(cwd)
class KpointsTransHandlerTest(unittest.TestCase):
def setUp(self):
os.chdir(test_dir)
shutil.copy("KPOINTS", "KPOINTS.orig")
def test_kpoints_trans(self):
h = StdErrHandler("std_err.txt.kpoints_trans")
self.assertEqual(h.check(), True)
d = h.correct()
self.assertEqual(d["errors"], ['kpoints_trans'])
self.assertEqual(d["actions"],
[{u'action': {u'_set':
{u'kpoints': [[4, 4, 4]]}},
u'dict': u'KPOINTS'}])
self.assertEqual(h.check(), True)
d = h.correct()
self.assertEqual(d["errors"], ['kpoints_trans'])
self.assertEqual(d["actions"], []) # don't correct twice
def tearDown(self):
shutil.move("KPOINTS.orig", "KPOINTS")
clean_dir()
os.chdir(cwd)
class OutOfMemoryHandlerTest(unittest.TestCase):
def setUp(self):
os.chdir(test_dir)
shutil.copy("INCAR", "INCAR.orig")
def test_oom(self):
vi = VaspInput.from_directory(".")
from custodian.vasp.interpreter import VaspModder
VaspModder(vi=vi).apply_actions([{"dict": "INCAR",
"action": {"_set": {"KPAR": 4}}}])
h = StdErrHandler("std_err.txt.oom", correct_out_of_memory=True)
self.assertEqual(h.check(), True)
d = h.correct()
self.assertEqual(d["errors"], ['out_of_memory'])
self.assertEqual(d["actions"],
[{'dict': 'INCAR',
'action': {'_set': {'KPAR': 2}}}])
h = StdErrHandler("std_err.txt.oom2", correct_out_of_memory=True)
self.assertEqual(h.check(), True)
d = h.correct()
self.assertEqual(d["errors"], ['out_of_memory'])
self.assertEqual(d["actions"],
[{'dict': 'INCAR',
'action': {'_set': {'KPAR': 1}}}])
vi = VaspInput.from_directory(".")
from custodian.vasp.interpreter import VaspModder
VaspModder(vi=vi).apply_actions([{"dict": "INCAR",
"action": {"_set": {"KPAR": 4}}}])
h = StdErrHandler("std_err.txt.emlsp", correct_out_of_memory=True)
self.assertEqual(h.check(), True)
d = h.correct()
self.assertEqual(d["errors"], ['out_of_memory'])
self.assertEqual(d["actions"],
[{'dict': 'INCAR',
'action': {'_set': {'KPAR': 2}}}])
vi = VaspInput.from_directory(".")
from custodian.vasp.interpreter import VaspModder
VaspModder(vi=vi).apply_actions([{"dict": "INCAR",
"action": {"_set": {"KPAR": 4}}}])
h = StdErrHandler("std_err.txt.insufficient_mem", correct_out_of_memory=True)
self.assertEqual(h.check(), True)
d = h.correct()
self.assertEqual(d["errors"], ['out_of_memory'])
self.assertEqual(d["actions"],
[{'dict': 'INCAR',
'action': {'_set': {'KPAR': 2}}}])
def test_seg_fault(self):
vi = VaspInput.from_directory(".")
from custodian.vasp.interpreter import VaspModder
VaspModder(vi=vi).apply_actions([{"dict": "INCAR",
"action": {"_set": {"KPAR": 4}}}])
h = StdErrHandler("std_err.txt.seg_fault", correct_seg_fault=True)
self.assertEqual(h.check(), True)
d = h.correct()
self.assertEqual(d["errors"], ['seg_fault'])
self.assertEqual(d["actions"],
[{'dict': 'INCAR',
'action': {'_set': {'ISMEAR': '0'}}}])
self.assertEqual(h.check(), True)
d = h.correct()
self.assertEqual(d["errors"], ['seg_fault'])
# no fix if still seg fault
self.assertEqual(d["actions"], [])
def tearDown(self):
shutil.move("INCAR.orig", "INCAR")
clean_dir()
os.chdir(cwd)
if __name__ == "__main__":
unittest.main()
|
|
# Views for Parkmap
from django.contrib.sites.models import Site
from django.contrib.gis.measure import D
import json
from django.utils import simplejson
from django.core.mail import send_mail
from django.template.defaultfilters import slugify
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.http import HttpResponse, Http404, HttpResponseRedirect
from parkmap.models import Neighborhood, Park, Facility, Activity, Event, Parktype, Story, Facilitytype
from forms import StoryForm
from django.template import RequestContext
from django.conf import settings
from mbta.models import MBTAStop
import cgpolyencode
current_site = Site.objects.get_current()
#Temporary view to see Play page
def play_page(request):
neighborhoods = Neighborhood.objects.all().order_by('name')
activities = Activity.objects.all().order_by('name')
response_d = {
'neighborhoods': neighborhoods,
'activities': activities,
}
return render_to_response('parkmap/play.html', response_d,
context_instance=RequestContext(request))
def get_list():
# Query each of the three classes.
parks = Park.objects.all()
facilities = Facility.objects.all()
neighborhoods = Neighborhood.objects.all().order_by('name')
return parks, facilities, neighborhoods
#Home page
def home_page(request):
parks, facilities, neighborhoods = get_list()
all_parks = Park.objects.all().order_by('name')
activities = Activity.objects.all()
stories = Story.objects.all().order_by('-date')[:6]
return render_to_response('parkmap/home.html', {
'parks': parks,
'all_parks': all_parks,
'facilities': facilities,
'activities': activities,
'neighborhoods': neighborhoods,
'stories':stories,
}, context_instance=RequestContext(request))
def parks_page(request, park_slug):
park = get_object_or_404(Park, slug=park_slug)
encoder = cgpolyencode.GPolyEncoder()
coordinates = simplejson.loads(park.geometry.geojson)
map = encoder.encode(coordinates['coordinates'][0][0])
stories = Story.objects.filter(park=park).order_by("-date")
#stops = MBTAStop.objects.filter(lat_long__distance_lte=(park.geometry.centroid,D(mi=settings.MBTA_DISTANCE))) # this distance doesn't overload the page with a million stops.
if request.method == 'POST':
story = Story()
f = StoryForm(request.POST, instance=story)
if f.is_valid():
story.park = park
f.save()
f = StoryForm()
else:
f = StoryForm()
return render_to_response('parkmap/park.html',
{'park': park,
'map': map,
#'stops': stops,
'story_form': f,
'stories': stories,
'request': request,
'acres': park.geometry.area * 0.000247,
},
context_instance=RequestContext(request)
)
def neighborhood(request, n_slug): # Activity slug, and Neighborhood slug
neighborhood = Neighborhood.objects.get(slug=n_slug)
parks = Park.objects.filter(neighborhoods=neighborhood)
response_d = {
'neighborhood': neighborhood,
'parks': parks,
}
return render_to_response('parkmap/neighborhood.html',
response_d,
context_instance=RequestContext(request)
)
def parks_in_neighborhood_with_activities(request, a_slug, n_slug): # Activity slug, and Neighborhood slug
activities = Activity.objects.all()
activity = Activity.objects.get(slug=a_slug)
neighborhood, parks = get_n_p_with_a(n_slug, a_slug)
response_d = {
'neighborhood': neighborhood,
'activities': activities,
'activity': activity,
'a_slug': a_slug,
'parks': parks}
return render_to_response('parkmap/play.html',
response_d,
context_instance=RequestContext(request)
)
def get_n_p_with_a(n_slug, a_slug):
"""
Get parks in a neighborhood that have the specific activity for any of its facilities
if no request is passed, returns neighborhood and the parks
"""
a = get_object_or_404(Activity, slug=a_slug)
fac = Facility.objects.filter(activity=a)
if n_slug == 'all':
n = Neighborhood.objects.all()
else:
n = get_object_or_404(Neighborhood, slug=n_slug)
facility_ids = []
for f in fac:
facility_ids.append(f.id)
p = Park.objects.filter(neighborhoods=n, facility__id__in=facility_ids)
return n, p
def neighborhood_activity_ajax(request, n_slug, a_slug):
"""
Returns a json string of parks with activities in the specified neighborhood
"""
try:
n, parks = get_n_p_with_a(n_slug, a_slug)
except Http404:
return HttpResponse("{}")
parks_json = []
for park in parks:
p_dict = {}
p_dict['activity'] = []
for f in park.facility_set.all():
for a in f.activity.all():
p_dict['activity'].append({'slug': a.slug})
p_dict['name'] = park.name
p_dict['slug'] = park.slug
p_dict['description'] = park.description
parks_json.append(p_dict)
return HttpResponse(json.dumps(parks_json))
def events(request, event_id, event_name):
event = get_object_or_404(Event, pk=event_id)
return render_to_response('parkmap/event.html', {'event': event})
def explore(request): # Activity slug, and Neighborhood slug
parkname = request.POST.get('parkname',None)
neighborhoods = Neighborhood.objects.all().order_by('name')
#activities = Activity.objects.all().order_by('name')
parks = Park.objects.all().order_by('name')
facilitytypes = Facilitytype.objects.all().order_by('name')
neighborhood_slug = request.GET.get('neighborhood', None)
neighborhood = None
if neighborhood_slug:
neighborhood = Neighborhood.objects.get(slug=neighborhood_slug)
response_d = {
'neighborhoods': neighborhoods,
'neighborhoodpassed': neighborhood,
'facilitytypes':facilitytypes,
'parks':parks,
'parkname':parkname,
}
return render_to_response('parkmap/explore.html',
response_d,
context_instance=RequestContext(request)
)
def plan_a_trip(request): # Activity slug, and Neighborhood slug
return render_to_response('parkmap/trip.html',
{ },
context_instance=RequestContext(request)
)
def story(request, story_id):
story = get_object_or_404(Story, id=story_id)
return render_to_response('parkmap/story.html',
dict(story=story), context_instance=RequestContext(request))
def story_flag(request,story_id):
story = Story.objects.get(pk=story_id)
if not story.objectionable_content:
MESSAGE = """
A user of the Boston Parks website has flagged this story as objectionable.
Here is the story:
{story}
Link to Admin: http://{domain}/admin/parkmap/story/{id}
""".format(story=story.text,domain=current_site.domain,id=story.id)
emails = []
for admin in settings.ADMINS:
emails.append(admin[1])
send_mail('Flagged Story on the BostonParks website', MESSAGE, '[email protected]',
emails, fail_silently=False)
story.objectionable_content = True
story.save()
return HttpResponse("")
def policy(request):
return render_to_response('parkmap/policy.html',
{}, context_instance=RequestContext(request))
def home_search(request):
if request.method == "POST":
parkname = request.POST.get("parkname",None)
if parkname:
parkname = slugify(parkname)
try:
park = Park.objects.get(slug=parkname)
if park:
return HttpResponseRedirect("/park/%s/" % parkname)
except Park.DoesNotExist:
pass
return HttpResponseRedirect("/")
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
# pylint: disable=unused-import
"""MobileNet v1 models for Keras.
MobileNet is a general architecture and can be used for multiple use cases.
Depending on the use case, it can use different input layer size and
different width factors. This allows different width models to reduce
the number of multiply-adds and thereby
reduce inference cost on mobile devices.
MobileNets support any input size greater than 32 x 32, with larger image sizes
offering better performance.
The number of parameters and number of multiply-adds
can be modified by using the `alpha` parameter,
which increases/decreases the number of filters in each layer.
By altering the image size and `alpha` parameter,
all 16 models from the paper can be built, with ImageNet weights provided.
The paper demonstrates the performance of MobileNets using `alpha` values of
1.0 (also called 100 % MobileNet), 0.75, 0.5 and 0.25.
For each of these `alpha` values, weights for 4 different input image sizes
are provided (224, 192, 160, 128).
The following table describes the size and accuracy of the 100% MobileNet
on size 224 x 224:
----------------------------------------------------------------------------
Width Multiplier (alpha) | ImageNet Acc | Multiply-Adds (M) | Params (M)
----------------------------------------------------------------------------
| 1.0 MobileNet-224 | 70.6 % | 529 | 4.2 |
| 0.75 MobileNet-224 | 68.4 % | 325 | 2.6 |
| 0.50 MobileNet-224 | 63.7 % | 149 | 1.3 |
| 0.25 MobileNet-224 | 50.6 % | 41 | 0.5 |
----------------------------------------------------------------------------
The following table describes the performance of
the 100 % MobileNet on various input sizes:
------------------------------------------------------------------------
Resolution | ImageNet Acc | Multiply-Adds (M) | Params (M)
------------------------------------------------------------------------
| 1.0 MobileNet-224 | 70.6 % | 529 | 4.2 |
| 1.0 MobileNet-192 | 69.1 % | 529 | 4.2 |
| 1.0 MobileNet-160 | 67.2 % | 529 | 4.2 |
| 1.0 MobileNet-128 | 64.4 % | 529 | 4.2 |
------------------------------------------------------------------------
The weights for all 16 models are obtained and translated
from TensorFlow checkpoints found at
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md
# Reference
- [MobileNets: Efficient Convolutional Neural Networks for
Mobile Vision Applications](https://arxiv.org/pdf/1704.04861.pdf))
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import constraints
from tensorflow.python.keras._impl.keras import initializers
from tensorflow.python.keras._impl.keras import regularizers
from tensorflow.python.keras._impl.keras.applications import imagenet_utils
from tensorflow.python.keras._impl.keras.applications.imagenet_utils import _obtain_input_shape
from tensorflow.python.keras._impl.keras.applications.imagenet_utils import decode_predictions
from tensorflow.python.keras._impl.keras.engine import InputSpec
from tensorflow.python.keras._impl.keras.engine.base_layer import shape_type_conversion
from tensorflow.python.keras._impl.keras.engine.network import get_source_inputs
from tensorflow.python.keras._impl.keras.layers import Activation
from tensorflow.python.keras._impl.keras.layers import BatchNormalization
from tensorflow.python.keras._impl.keras.layers import Conv2D
from tensorflow.python.keras._impl.keras.layers import Dropout
from tensorflow.python.keras._impl.keras.layers import GlobalAveragePooling2D
from tensorflow.python.keras._impl.keras.layers import GlobalMaxPooling2D
from tensorflow.python.keras._impl.keras.layers import Input
from tensorflow.python.keras._impl.keras.layers import Reshape
from tensorflow.python.keras._impl.keras.models import Model
from tensorflow.python.keras._impl.keras.utils import conv_utils
from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
BASE_WEIGHT_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.6/'
def relu6(x):
return K.relu(x, max_value=6)
@tf_export('keras.applications.mobilenet.preprocess_input')
def preprocess_input(x):
"""Preprocesses a numpy array encoding a batch of images.
Arguments:
x: a 4D numpy array consists of RGB values within [0, 255].
Returns:
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, mode='tf')
class DepthwiseConv2D(Conv2D):
"""Depthwise separable 2D convolution.
Depthwise Separable convolutions consists in performing
just the first step in a depthwise spatial convolution
(which acts on each input channel separately).
The `depth_multiplier` argument controls how many
output channels are generated per input channel in the depthwise step.
Arguments:
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `'valid'` or `'same'` (case-insensitive).
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be 'channels_last'.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. 'linear' activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
depthwise_initializer: Initializer for the depthwise kernel matrix.
bias_initializer: Initializer for the bias vector.
depthwise_regularizer: Regularizer function applied to
the depthwise kernel matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its 'activation')..
depthwise_constraint: Constraint function applied to
the depthwise kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`[batch, channels, rows, cols]` if data_format='channels_first'
or 4D tensor with shape:
`[batch, rows, cols, channels]` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`[batch, filters, new_rows, new_cols]` if data_format='channels_first'
or 4D tensor with shape:
`[batch, new_rows, new_cols, filters]` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
kernel_size,
strides=(1, 1),
padding='valid',
depth_multiplier=1,
data_format=None,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
bias_constraint=None,
**kwargs):
super(DepthwiseConv2D, self).__init__(
filters=None,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
bias_constraint=bias_constraint,
**kwargs)
self.depth_multiplier = depth_multiplier
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.bias_initializer = initializers.get(bias_initializer)
@shape_type_conversion
def build(self, input_shape):
if len(input_shape) < 4:
raise ValueError('Inputs to `DepthwiseConv2D` should have rank 4. '
'Received input shape:', str(input_shape))
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs to '
'`DepthwiseConv2D` '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
depthwise_kernel_shape = (self.kernel_size[0], self.kernel_size[1],
input_dim, self.depth_multiplier)
self.depthwise_kernel = self.add_weight(
shape=depthwise_kernel_shape,
initializer=self.depthwise_initializer,
name='depthwise_kernel',
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint)
if self.use_bias:
self.bias = self.add_weight(
shape=(input_dim * self.depth_multiplier,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
self.built = True
def call(self, inputs, training=None):
outputs = K.depthwise_conv2d(
inputs,
self.depthwise_kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format)
if self.bias:
outputs = K.bias_add(outputs, self.bias, data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
@shape_type_conversion
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
out_filters = input_shape[1] * self.depth_multiplier
elif self.data_format == 'channels_last':
rows = input_shape[1]
cols = input_shape[2]
out_filters = input_shape[3] * self.depth_multiplier
rows = conv_utils.conv_output_length(rows, self.kernel_size[0],
self.padding, self.strides[0])
cols = conv_utils.conv_output_length(cols, self.kernel_size[1],
self.padding, self.strides[1])
if self.data_format == 'channels_first':
return (input_shape[0], out_filters, rows, cols)
elif self.data_format == 'channels_last':
return (input_shape[0], rows, cols, out_filters)
def get_config(self):
config = super(DepthwiseConv2D, self).get_config()
config.pop('filters')
config.pop('kernel_initializer')
config.pop('kernel_regularizer')
config.pop('kernel_constraint')
config['depth_multiplier'] = self.depth_multiplier
config['depthwise_initializer'] = initializers.serialize(
self.depthwise_initializer)
config['depthwise_regularizer'] = regularizers.serialize(
self.depthwise_regularizer)
config['depthwise_constraint'] = constraints.serialize(
self.depthwise_constraint)
return config
@tf_export('keras.applications.MobileNet',
'keras.applications.mobilenet.MobileNet')
def MobileNet(input_shape=None,
alpha=1.0,
depth_multiplier=1,
dropout=1e-3,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000):
"""Instantiates the MobileNet architecture.
Note that only TensorFlow is supported for now,
therefore it only works with the data format
`image_data_format='channels_last'` in your Keras config
at `~/.keras/keras.json`.
To load a MobileNet model via `load_model`, import the custom
objects `relu6` and `DepthwiseConv2D` and pass them to the
`custom_objects` parameter.
E.g.
model = load_model('mobilenet.h5', custom_objects={
'relu6': mobilenet.relu6,
'DepthwiseConv2D': mobilenet.DepthwiseConv2D})
Arguments:
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or (3, 224, 224) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: depth multiplier for depthwise convolution
(also called the resolution multiplier)
dropout: dropout rate
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
if K.backend() != 'tensorflow':
raise RuntimeError('Only TensorFlow backend is currently supported, '
'as other backends do not support '
'depthwise convolution.')
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as ImageNet with `include_top` '
'as true, `classes` should be 1000')
# Determine proper input shape and default size.
if input_shape is None:
default_size = 224
else:
if K.image_data_format() == 'channels_first':
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if rows == cols and rows in [128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
input_shape = _obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=K.image_data_format(),
require_flatten=include_top,
weights=weights)
if K.image_data_format() == 'channels_last':
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if weights == 'imagenet':
if depth_multiplier != 1:
raise ValueError('If imagenet weights are being loaded, '
'depth multiplier must be 1')
if alpha not in [0.25, 0.50, 0.75, 1.0]:
raise ValueError('If imagenet weights are being loaded, '
'alpha can be one of'
'`0.25`, `0.50`, `0.75` or `1.0` only.')
if rows != cols or rows not in [128, 160, 192, 224]:
raise ValueError('If imagenet weights are being loaded, '
'input must have a static square shape (one of '
'(128,128), (160,160), (192,192), or (224, 224)).'
' Input shape provided = %s' % (input_shape,))
if K.image_data_format() != 'channels_last':
logging.warning('The MobileNet family of models is only available '
'for the input data format "channels_last" '
'(width, height, channels). '
'However your settings specify the default '
'data format "channels_first" (channels, width, height).'
' You should set `image_data_format="channels_last"` '
'in your Keras config located at ~/.keras/keras.json. '
'The model being returned right now will expect inputs '
'to follow the "channels_last" data format.')
K.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = _conv_block(img_input, 32, alpha, strides=(2, 2))
x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)
x = _depthwise_conv_block(
x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=2)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)
x = _depthwise_conv_block(
x, 256, alpha, depth_multiplier, strides=(2, 2), block_id=4)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)
x = _depthwise_conv_block(
x, 512, alpha, depth_multiplier, strides=(2, 2), block_id=6)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)
x = _depthwise_conv_block(
x, 1024, alpha, depth_multiplier, strides=(2, 2), block_id=12)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13)
if include_top:
if K.image_data_format() == 'channels_first':
shape = (int(1024 * alpha), 1, 1)
else:
shape = (1, 1, int(1024 * alpha))
x = GlobalAveragePooling2D()(x)
x = Reshape(shape, name='reshape_1')(x)
x = Dropout(dropout, name='dropout')(x)
x = Conv2D(classes, (1, 1), padding='same', name='conv_preds')(x)
x = Activation('softmax', name='act_softmax')(x)
x = Reshape((classes,), name='reshape_2')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='mobilenet_%0.2f_%s' % (alpha, rows))
# load weights
if weights == 'imagenet':
if K.image_data_format() == 'channels_first':
raise ValueError('Weights for "channels_last" format '
'are not available.')
if alpha == 1.0:
alpha_text = '1_0'
elif alpha == 0.75:
alpha_text = '7_5'
elif alpha == 0.50:
alpha_text = '5_0'
else:
alpha_text = '2_5'
if include_top:
model_name = 'mobilenet_%s_%d_tf.h5' % (alpha_text, rows)
weigh_path = BASE_WEIGHT_PATH + model_name
weights_path = get_file(model_name, weigh_path, cache_subdir='models')
else:
model_name = 'mobilenet_%s_%d_tf_no_top.h5' % (alpha_text, rows)
weigh_path = BASE_WEIGHT_PATH + model_name
weights_path = get_file(model_name, weigh_path, cache_subdir='models')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
if old_data_format:
K.set_image_data_format(old_data_format)
return model
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
"""Adds an initial convolution layer (with batch normalization and relu6).
Arguments:
inputs: Input tensor of shape `(rows, cols, 3)`
(with `channels_last` data format) or
(3, rows, cols) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
kernel: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
filters = int(filters * alpha)
x = Conv2D(
filters,
kernel,
padding='same',
use_bias=False,
strides=strides,
name='conv1')(
inputs)
x = BatchNormalization(axis=channel_axis, name='conv1_bn')(x)
return Activation(relu6, name='conv1_relu')(x)
def _depthwise_conv_block(inputs,
pointwise_conv_filters,
alpha,
depth_multiplier=1,
strides=(1, 1),
block_id=1):
"""Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, relu6, pointwise convolution,
batch normalization and relu6 activation.
Arguments:
inputs: Input tensor of shape `(rows, cols, channels)`
(with `channels_last` data format) or
(channels, rows, cols) (with `channels_first` data format).
pointwise_conv_filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the pointwise convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
block_id: Integer, a unique identification designating the block number.
Input shape:
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
pointwise_conv_filters = int(pointwise_conv_filters * alpha)
x = DepthwiseConv2D( # pylint: disable=not-callable
(3, 3),
padding='same',
depth_multiplier=depth_multiplier,
strides=strides,
use_bias=False,
name='conv_dw_%d' % block_id)(
inputs)
x = BatchNormalization(axis=channel_axis, name='conv_dw_%d_bn' % block_id)(x)
x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)
x = Conv2D(
pointwise_conv_filters, (1, 1),
padding='same',
use_bias=False,
strides=(1, 1),
name='conv_pw_%d' % block_id)(
x)
x = BatchNormalization(axis=channel_axis, name='conv_pw_%d_bn' % block_id)(x)
return Activation(relu6, name='conv_pw_%d_relu' % block_id)(x)
|
|
import regex as re
import itertools
from collections import defaultdict
def exact_barcode_filter(chunk, bc1, bc2, bc3):
parser_re = re.compile('(.*):CELL_(?P<CB>.*):UMI_(.*)\\n(.*)\\n\\+\\n(.*)\\n')
kept = []
for read in chunk:
match = parser_re.search(read).groupdict()
cb1 = match['CB']
if bc3:
cb1, cb2, cb3 = cb1.split("-")
elif bc2:
cb1, cb2 = cb1.split("-")
if cb1 not in bc1:
continue
if bc2 and cb2 not in bc2:
continue
if bc3 and cb3 not in bc3:
continue
kept.append(read)
return kept
def correcting_barcode_filter(chunk, bc1hash, bc2hash, bc3hash):
parser_re = re.compile('(.*):CELL_(?P<CB>.*):UMI_(.*)\\n(.*)\\n\\+\\n(.*)\\n')
kept = []
for read in chunk:
match = parser_re.search(read).groupdict()
cb1 = match['CB']
if bc3hash:
cb1, cb2, cb3 = cb1.split("-")
elif bc2hash:
cb1, cb2 = cb1.split("-")
bc1corrected = bc1hash[cb1]
if not bc1corrected:
continue
if bc2hash:
bc2corrected = bc2hash[cb2]
if not bc2corrected:
continue
if bc3hash:
bc3corrected = bc3hash[cb3]
if not bc3corrected:
continue
if bc3hash:
correctbc = bc1corrected + "-" + bc2corrected + "-" + bc3corrected
elif bc2hash:
correctbc = bc1corrected + "-" + bc2corrected
else:
correctbc = bc1corrected
if correctbc == match['CB']:
kept.append(read)
else:
read = read.replace("CELL_" + match['CB'], "CELL_" + correctbc)
kept.append(read)
return kept
def exact_sample_filter2(chunk, barcodes):
parser_re = re.compile('(.*):CELL_(.*):UMI_(.*):SAMPLE_(?P<SB>.*)\\n(.*)\\n\\+\\n(.*)\\n')
kept = []
for read in chunk:
match = parser_re.search(read).groupdict()
sample = match['SB']
if sample not in barcodes:
continue
kept.append(read)
return kept
def correcting_sample_filter2(chunk, barcodehash):
parser_re = re.compile('(.*):CELL_(.*):UMI_(.*):SAMPLE_(?P<SB>.*)\\n(.*)\\n\\+\\n(.*)\\n')
kept = []
for read in chunk:
match = parser_re.search(read).groupdict()
sample = match['SB']
barcodecorrected = barcodehash[sample]
if not barcodecorrected:
continue
correctbc = barcodecorrected
if correctbc == match['SB']:
kept.append(read)
else:
read = read.replace("SAMPLE_" + match['SB'], "SAMPLE_" + correctbc)
kept.append(read)
return kept
def exact_sample_filter(read, barcodes):
parser_re = re.compile('(.*):CELL_(.*):UMI_(.*):SAMPLE_(?P<SB>.*)\\n(.*)\\n\\+\\n(.*)\\n')
match = parser_re.search(read).groupdict()
sample = match['SB']
if sample not in barcodes:
return None
return read
def umi_filter(chunk):
parser_re = re.compile('(.*):CELL_(.*):UMI_(?P<MB>.*):SAMPLE_(.*)\\n(.*)\\n\\+\\n(.*)\\n')
kept = []
for read in chunk:
match = parser_re.search(read).groupdict()
MB = match['MB']
if not acgt_match(MB):
continue
else:
kept.append(read)
return kept
def append_uids(chunk):
parser_re = re.compile('(.*):CELL_(?P<CB>.*):UMI_(?P<MB>.*):SAMPLE_(?P<SB>.*)\\n(.*)\\n\\+\\n(.*)\\n')
kept = []
for read in chunk:
match = parser_re.search(read).groupdict()
CB = match['CB']
MB = match['MB']
SB = match['SB']
sample = "SAMPLE_"+ match['SB']
idx = read.find(sample)+len(sample)
read = read[:idx]+":UID_" + SB + CB + MB+ read[idx:]
kept.append(read)
return kept
def correcting_sample_filter(read, barcodehash):
parser_re = re.compile('(.*):CELL_(.*):UMI_(.*):SAMPLE_(?P<SB>.*)\\n(.*)\\n\\+\\n(.*)\\n')
match = parser_re.search(read).groupdict()
sample = match['SB']
barcodecorrected = barcodehash[sample]
if not barcodecorrected:
return None
correctbc = barcodecorrected
if correctbc == match['SB']:
return(read)
else:
read = read.replace("SAMPLE_" + match['SB'], "SAMPLE_" + correctbc)
return(read)
class MutationHash(object):
def __init__(self, strings, nedit):
self.hash = mutationhash(strings, nedit)
def __getitem__(self, barcode):
result = self.hash[barcode]
if len(result) != 1:
return None
else:
return list(result)[0]
def mutationhash(strings, nedit):
"""
produce a hash with each key a nedit distance substitution for a set of
strings. values of the hash is the set of strings the substitution could
have come from
"""
maxlen = max([len(string) for string in strings])
indexes = generate_idx(maxlen, nedit)
muthash = defaultdict(set)
for string in strings:
muthash[string].update([string])
for x in substitution_set(string, indexes):
muthash[x].update([string])
return muthash
def substitution_set(string, indexes):
"""
for a string, return a set of all possible substitutions
"""
strlen = len(string)
return {mutate_string(string, x) for x in indexes if valid_substitution(strlen, x)}
def valid_substitution(strlen, index):
"""
skip performing substitutions that are outside the bounds of the string
"""
values = index[0]
return all([strlen > i for i in values])
def generate_idx(maxlen, nedit):
"""
generate all possible nedit edits of a string. each item has the form
((index1, index2), 'A', 'G') for nedit=2
index1 will be replaced by 'A', index2 by 'G'
this covers all edits < nedit as well since some of the specified
substitutions will not change the base
"""
ALPHABET = ["A", "C", "G", "T", "N"]
indexlists = []
ALPHABETS = [ALPHABET for x in range(nedit)]
return list(itertools.product(itertools.combinations(range(maxlen), nedit),
*ALPHABETS))
def acgt_match(string):
"""
returns True if sting consist of only "A "C" "G" "T"
"""
search = re.compile(r'[^ACGT]').search
return not bool(search(string))
def mutate_string(string, tomutate):
strlist = list(string)
for i, idx in enumerate(tomutate[0]):
strlist[idx] = tomutate[i+1]
return "".join(strlist)
|
|
"""
Provide functionality to keep track of devices.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/device_tracker/
"""
# pylint: disable=too-many-instance-attributes, too-many-arguments
# pylint: disable=too-many-locals
import asyncio
from datetime import timedelta
import logging
import os
import threading
from typing import Any, Sequence, Callable
import voluptuous as vol
import yaml
from homeassistant.bootstrap import (
prepare_setup_platform, log_exception)
from homeassistant.components import group, zone
from homeassistant.components.discovery import SERVICE_NETGEAR
from homeassistant.config import load_yaml_config_file
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_per_platform, discovery
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import GPSType, ConfigType, HomeAssistantType
import homeassistant.helpers.config_validation as cv
import homeassistant.util as util
from homeassistant.util.async import run_coroutine_threadsafe
import homeassistant.util.dt as dt_util
from homeassistant.helpers.event import track_utc_time_change
from homeassistant.const import (
ATTR_GPS_ACCURACY, ATTR_LATITUDE, ATTR_LONGITUDE,
DEVICE_DEFAULT_NAME, STATE_HOME, STATE_NOT_HOME)
DOMAIN = 'device_tracker'
DEPENDENCIES = ['zone']
GROUP_NAME_ALL_DEVICES = 'all devices'
ENTITY_ID_ALL_DEVICES = group.ENTITY_ID_FORMAT.format('all_devices')
ENTITY_ID_FORMAT = DOMAIN + '.{}'
YAML_DEVICES = 'known_devices.yaml'
CONF_TRACK_NEW = 'track_new_devices'
DEFAULT_TRACK_NEW = True
CONF_CONSIDER_HOME = 'consider_home'
DEFAULT_CONSIDER_HOME = 180
CONF_SCAN_INTERVAL = 'interval_seconds'
DEFAULT_SCAN_INTERVAL = 12
CONF_AWAY_HIDE = 'hide_if_away'
DEFAULT_AWAY_HIDE = False
SERVICE_SEE = 'see'
ATTR_MAC = 'mac'
ATTR_DEV_ID = 'dev_id'
ATTR_HOST_NAME = 'host_name'
ATTR_LOCATION_NAME = 'location_name'
ATTR_GPS = 'gps'
ATTR_BATTERY = 'battery'
ATTR_ATTRIBUTES = 'attributes'
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend({
vol.Optional(CONF_SCAN_INTERVAL): cv.positive_int, # seconds
vol.Optional(CONF_TRACK_NEW, default=DEFAULT_TRACK_NEW): cv.boolean,
vol.Optional(CONF_CONSIDER_HOME,
default=timedelta(seconds=DEFAULT_CONSIDER_HOME)): vol.All(
cv.time_period, cv.positive_timedelta)
})
DISCOVERY_PLATFORMS = {
SERVICE_NETGEAR: 'netgear',
}
_LOGGER = logging.getLogger(__name__)
def is_on(hass: HomeAssistantType, entity_id: str=None):
"""Return the state if any or a specified device is home."""
entity = entity_id or ENTITY_ID_ALL_DEVICES
return hass.states.is_state(entity, STATE_HOME)
# pylint: disable=too-many-arguments
def see(hass: HomeAssistantType, mac: str=None, dev_id: str=None,
host_name: str=None, location_name: str=None,
gps: GPSType=None, gps_accuracy=None,
battery=None, attributes: dict=None):
"""Call service to notify you see device."""
data = {key: value for key, value in
((ATTR_MAC, mac),
(ATTR_DEV_ID, dev_id),
(ATTR_HOST_NAME, host_name),
(ATTR_LOCATION_NAME, location_name),
(ATTR_GPS, gps),
(ATTR_GPS_ACCURACY, gps_accuracy),
(ATTR_BATTERY, battery)) if value is not None}
if attributes:
data[ATTR_ATTRIBUTES] = attributes
hass.services.call(DOMAIN, SERVICE_SEE, data)
def setup(hass: HomeAssistantType, config: ConfigType):
"""Setup device tracker."""
yaml_path = hass.config.path(YAML_DEVICES)
try:
conf = config.get(DOMAIN, [])
except vol.Invalid as ex:
log_exception(ex, DOMAIN, config, hass)
return False
else:
conf = conf[0] if len(conf) > 0 else {}
consider_home = conf.get(CONF_CONSIDER_HOME,
timedelta(seconds=DEFAULT_CONSIDER_HOME))
track_new = conf.get(CONF_TRACK_NEW, DEFAULT_TRACK_NEW)
devices = load_config(yaml_path, hass, consider_home)
tracker = DeviceTracker(hass, consider_home, track_new, devices)
def setup_platform(p_type, p_config, disc_info=None):
"""Setup a device tracker platform."""
platform = prepare_setup_platform(hass, config, DOMAIN, p_type)
if platform is None:
return
try:
if hasattr(platform, 'get_scanner'):
scanner = platform.get_scanner(hass, {DOMAIN: p_config})
if scanner is None:
_LOGGER.error('Error setting up platform %s', p_type)
return
setup_scanner_platform(hass, p_config, scanner, tracker.see)
return
if not platform.setup_scanner(hass, p_config, tracker.see):
_LOGGER.error('Error setting up platform %s', p_type)
except Exception: # pylint: disable=broad-except
_LOGGER.exception('Error setting up platform %s', p_type)
for p_type, p_config in config_per_platform(config, DOMAIN):
setup_platform(p_type, p_config)
def device_tracker_discovered(service, info):
"""Called when a device tracker platform is discovered."""
setup_platform(DISCOVERY_PLATFORMS[service], {}, info)
discovery.listen(hass, DISCOVERY_PLATFORMS.keys(),
device_tracker_discovered)
def update_stale(now):
"""Clean up stale devices."""
tracker.update_stale(now)
track_utc_time_change(hass, update_stale, second=range(0, 60, 5))
tracker.setup_group()
def see_service(call):
"""Service to see a device."""
args = {key: value for key, value in call.data.items() if key in
(ATTR_MAC, ATTR_DEV_ID, ATTR_HOST_NAME, ATTR_LOCATION_NAME,
ATTR_GPS, ATTR_GPS_ACCURACY, ATTR_BATTERY, ATTR_ATTRIBUTES)}
tracker.see(**args)
descriptions = load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))
hass.services.register(DOMAIN, SERVICE_SEE, see_service,
descriptions.get(SERVICE_SEE))
return True
class DeviceTracker(object):
"""Representation of a device tracker."""
def __init__(self, hass: HomeAssistantType, consider_home: timedelta,
track_new: bool, devices: Sequence) -> None:
"""Initialize a device tracker."""
self.hass = hass
self.devices = {dev.dev_id: dev for dev in devices}
self.mac_to_dev = {dev.mac: dev for dev in devices if dev.mac}
for dev in devices:
if self.devices[dev.dev_id] is not dev:
_LOGGER.warning('Duplicate device IDs detected %s', dev.dev_id)
if dev.mac and self.mac_to_dev[dev.mac] is not dev:
_LOGGER.warning('Duplicate device MAC addresses detected %s',
dev.mac)
self.consider_home = consider_home
self.track_new = track_new
self.lock = threading.Lock()
for device in devices:
if device.track:
device.update_ha_state()
self.group = None # type: group.Group
def see(self, mac: str=None, dev_id: str=None, host_name: str=None,
location_name: str=None, gps: GPSType=None, gps_accuracy=None,
battery: str=None, attributes: dict=None):
"""Notify the device tracker that you see a device."""
with self.lock:
if mac is None and dev_id is None:
raise HomeAssistantError('Neither mac or device id passed in')
elif mac is not None:
mac = str(mac).upper()
device = self.mac_to_dev.get(mac)
if not device:
dev_id = util.slugify(host_name or '') or util.slugify(mac)
else:
dev_id = cv.slug(str(dev_id).lower())
device = self.devices.get(dev_id)
if device:
device.seen(host_name, location_name, gps, gps_accuracy,
battery, attributes)
if device.track:
device.update_ha_state()
return
# If no device can be found, create it
dev_id = util.ensure_unique_string(dev_id, self.devices.keys())
device = Device(
self.hass, self.consider_home, self.track_new,
dev_id, mac, (host_name or dev_id).replace('_', ' '))
self.devices[dev_id] = device
if mac is not None:
self.mac_to_dev[mac] = device
device.seen(host_name, location_name, gps, gps_accuracy, battery,
attributes)
if device.track:
device.update_ha_state()
# During init, we ignore the group
if self.group is not None:
self.group.update_tracked_entity_ids(
list(self.group.tracking) + [device.entity_id])
update_config(self.hass.config.path(YAML_DEVICES), dev_id, device)
def setup_group(self):
"""Initialize group for all tracked devices."""
run_coroutine_threadsafe(
self.async_setup_group(), self.hass.loop).result()
@asyncio.coroutine
def async_setup_group(self):
"""Initialize group for all tracked devices.
This method must be run in the event loop.
"""
entity_ids = (dev.entity_id for dev in self.devices.values()
if dev.track)
self.group = yield from group.Group.async_create_group(
self.hass, GROUP_NAME_ALL_DEVICES, entity_ids, False)
def update_stale(self, now: dt_util.dt.datetime):
"""Update stale devices."""
with self.lock:
for device in self.devices.values():
if (device.track and device.last_update_home and
device.stale(now)):
device.update_ha_state(True)
class Device(Entity):
"""Represent a tracked device."""
host_name = None # type: str
location_name = None # type: str
gps = None # type: GPSType
gps_accuracy = 0
last_seen = None # type: dt_util.dt.datetime
battery = None # type: str
attributes = None # type: dict
# Track if the last update of this device was HOME.
last_update_home = False
_state = STATE_NOT_HOME
def __init__(self, hass: HomeAssistantType, consider_home: timedelta,
track: bool, dev_id: str, mac: str, name: str=None,
picture: str=None, gravatar: str=None,
hide_if_away: bool=False) -> None:
"""Initialize a device."""
self.hass = hass
self.entity_id = ENTITY_ID_FORMAT.format(dev_id)
# Timedelta object how long we consider a device home if it is not
# detected anymore.
self.consider_home = consider_home
# Device ID
self.dev_id = dev_id
self.mac = mac
# If we should track this device
self.track = track
# Configured name
self.config_name = name
# Configured picture
if gravatar is not None:
self.config_picture = get_gravatar_for_email(gravatar)
else:
self.config_picture = picture
self.away_hide = hide_if_away
@property
def name(self):
"""Return the name of the entity."""
return self.config_name or self.host_name or DEVICE_DEFAULT_NAME
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def entity_picture(self):
"""Return the picture of the device."""
return self.config_picture
@property
def state_attributes(self):
"""Return the device state attributes."""
attr = {}
if self.gps:
attr[ATTR_LATITUDE] = self.gps[0]
attr[ATTR_LONGITUDE] = self.gps[1]
attr[ATTR_GPS_ACCURACY] = self.gps_accuracy
if self.battery:
attr[ATTR_BATTERY] = self.battery
if self.attributes:
for key, value in self.attributes.items():
attr[key] = value
return attr
@property
def hidden(self):
"""If device should be hidden."""
return self.away_hide and self.state != STATE_HOME
def seen(self, host_name: str=None, location_name: str=None,
gps: GPSType=None, gps_accuracy=0, battery: str=None,
attributes: dict=None):
"""Mark the device as seen."""
self.last_seen = dt_util.utcnow()
self.host_name = host_name
self.location_name = location_name
self.gps_accuracy = gps_accuracy or 0
self.battery = battery
self.attributes = attributes
self.gps = None
if gps is not None:
try:
self.gps = float(gps[0]), float(gps[1])
except (ValueError, TypeError, IndexError):
_LOGGER.warning('Could not parse gps value for %s: %s',
self.dev_id, gps)
self.update()
def stale(self, now: dt_util.dt.datetime=None):
"""Return if device state is stale."""
return self.last_seen and \
(now or dt_util.utcnow()) - self.last_seen > self.consider_home
def update(self):
"""Update state of entity."""
if not self.last_seen:
return
elif self.location_name:
self._state = self.location_name
elif self.gps is not None:
zone_state = zone.active_zone(self.hass, self.gps[0], self.gps[1],
self.gps_accuracy)
if zone_state is None:
self._state = STATE_NOT_HOME
elif zone_state.entity_id == zone.ENTITY_ID_HOME:
self._state = STATE_HOME
else:
self._state = zone_state.name
elif self.stale():
self._state = STATE_NOT_HOME
self.last_update_home = False
else:
self._state = STATE_HOME
self.last_update_home = True
def load_config(path: str, hass: HomeAssistantType, consider_home: timedelta):
"""Load devices from YAML configuration file."""
dev_schema = vol.Schema({
vol.Required('name'): cv.string,
vol.Optional('track', default=False): cv.boolean,
vol.Optional('mac', default=None): vol.Any(None, vol.All(cv.string,
vol.Upper)),
vol.Optional(CONF_AWAY_HIDE, default=DEFAULT_AWAY_HIDE): cv.boolean,
vol.Optional('gravatar', default=None): vol.Any(None, cv.string),
vol.Optional('picture', default=None): vol.Any(None, cv.string),
vol.Optional(CONF_CONSIDER_HOME, default=consider_home): vol.All(
cv.time_period, cv.positive_timedelta)
})
try:
result = []
try:
devices = load_yaml_config_file(path)
except HomeAssistantError as err:
_LOGGER.error('Unable to load %s: %s', path, str(err))
return []
for dev_id, device in devices.items():
try:
device = dev_schema(device)
device['dev_id'] = cv.slugify(dev_id)
except vol.Invalid as exp:
log_exception(exp, dev_id, devices, hass)
else:
result.append(Device(hass, **device))
return result
except (HomeAssistantError, FileNotFoundError):
# When YAML file could not be loaded/did not contain a dict
return []
def setup_scanner_platform(hass: HomeAssistantType, config: ConfigType,
scanner: Any, see_device: Callable):
"""Helper method to connect scanner-based platform to device tracker."""
interval = config.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
# Initial scan of each mac we also tell about host name for config
seen = set() # type: Any
def device_tracker_scan(now: dt_util.dt.datetime):
"""Called when interval matches."""
for mac in scanner.scan_devices():
if mac in seen:
host_name = None
else:
host_name = scanner.get_device_name(mac)
seen.add(mac)
see_device(mac=mac, host_name=host_name)
track_utc_time_change(hass, device_tracker_scan, second=range(0, 60,
interval))
device_tracker_scan(None)
def update_config(path: str, dev_id: str, device: Device):
"""Add device to YAML configuration file."""
with open(path, 'a') as out:
out.write('\n')
device = {device.dev_id: {
'name': device.name,
'mac': device.mac,
'picture': device.config_picture,
'track': device.track,
CONF_AWAY_HIDE: device.away_hide
}}
yaml.dump(device, out, default_flow_style=False)
def get_gravatar_for_email(email: str):
"""Return an 80px Gravatar for the given email address."""
import hashlib
url = 'https://www.gravatar.com/avatar/{}.jpg?s=80&d=wavatar'
return url.format(hashlib.md5(email.encode('utf-8').lower()).hexdigest())
|
|
#!/usr/bin/env python3 -tt
"""
File: gobjects.py
-------------------------
This example program illustrates the use of the Python graphics library.
"""
from spgl.graphics.gwindow import *
from spgl.gtypes import *
from spgl.graphics.gobjects import *
from spgl.gsounds import *
from spgl.filelib import *
CONTINUE = ""
input("Press enter to start graphics program")
window = GWindow()
input(CONTINUE)
rect = GRect(45,54,10,10)
window.add(rect)
input(CONTINUE)
rect.setFilled(True)
input(CONTINUE)
rect.move(100,100)
input(CONTINUE)
rect.setLocation(x=0,y=0)
input(CONTINUE)
rect.setColor(color = "RED")
input(CONTINUE)
rect.setColor(rgb = 0x00ff00)
input(CONTINUE)
rect.setVisible(False)
input(CONTINUE)
rect.setVisible(True)
input(CONTINUE)
window.repaint()
input(CONTINUE)
window.clear()
input(CONTINUE)
window.add(rect)
rect2 = GRect(10, 10)
gcomp = GCompound()
gcomp.add(rect)
gcomp.add(rect2)
window.add(gcomp, 50, 50)
input(CONTINUE)
gcomp.remove(rect2)
input(CONTINUE)
gcomp.add(rect2)
input(CONTINUE)
gcomp.sendForward(rect)
input(CONTINUE)
gcomp.sendBackward(rect)
input(CONTINUE)
gcomp.sendToFront(rect)
input(CONTINUE)
gcomp.sendToBack(rect)
input(CONTINUE)
gcomp.removeAll()
input(CONTINUE)
round = GRoundRect(30, 30, 50, 50)
window.add(round)
input(CONTINUE)
window.remove(round)
rect3d = G3DRect(30, 30, 50, 50, True)
rect3d.setFilled(True)
window.add(rect3d)
input(CONTINUE)
rect3d.setRaised(False)
input(CONTINUE)
window.remove(rect3d)
oval = GOval(20, 40, 50, 50)
window.add(oval)
input(CONTINUE)
oval.setSize(40, 20)
input(CONTINUE)
oval.setFilled(True)
oval.setFillColor(rgb = 0x30c290)
input(CONTINUE)
window.remove(oval)
arc = GArc(50, 20, 30, 225, 50, 50)
window.add(arc)
input(CONTINUE)
arc.setStartAngle(0)
input(CONTINUE)
arc.setSweepAngle(90)
input(CONTINUE)
arc.setFilled(True)
arc.setFillColor(rgb = 0xffff00)
input(CONTINUE)
window.remove(arc)
line = GLine(0,0,50,50)
window.add(line)
input(CONTINUE)
line.setStartPoint(150,150)
input(CONTINUE)
line.setEndPoint(0,0)
input(CONTINUE)
window.remove(line)
image = GImage("../res/images/python.jpg")
window.add(image)
input(CONTINUE)
image.scale(sf = 3)
input(CONTINUE)
window.remove(image)
label = GLabel("This is the Python client with a quote \\\"", 50, 50)
window.add(label)
input(CONTINUE)
label.setFont("Arial-30")
input(CONTINUE)
label.setLabel("Changing the label now")
input(CONTINUE)
window.remove(label)
poly = GPolygon()
poly.addVertex(50, 50)
poly.addVertex(75, 25)
poly.addEdge(-20,100)
poly.addPolarEdge(15, 190)
window.add(poly)
input(CONTINUE)
poly.setFilled(True)
poly.setFillColor(rgb = 0xa01090)
input(CONTINUE)
window.remove(poly)
input(CONTINUE)
s = Sound("../res/sounds/fireball.wav")
input(CONTINUE)
s.play()
input(CONTINUE)
file = open_file_dialog(title = "Open", mode = "load", path=__file__)
input(CONTINUE)
window.remove(rect)
input(CONTINUE)
window.requestFocus()
input("Completed, press enter to close program")
window.close()
|
|
import sqlite3
import time
from twisted.trial.unittest import TestCase
from twisted.python.util import sibpath
from twisted.internet import defer
from frack.db import (TicketStore, UnauthorizedError, NotFoundError,
AuthStore, Collision)
from norm.sqlite import SqliteTranslator
from norm.common import BlockingRunner
from norm.operation import SQL
class TicketStoreTest(TestCase):
def populatedStore(self):
db = sqlite3.connect(":memory:")
db.executescript(open(sibpath(__file__, "trac_test.sql")).read())
translator = SqliteTranslator()
runner = BlockingRunner(db, translator)
store = TicketStore(runner, user='foo')
return store
@defer.inlineCallbacks
def test_createTicket_minimal(self):
"""
You can create tickets
"""
store = self.populatedStore()
# a minimal ticket
data = {
'summary': 'the summary',
}
ticket_id = yield store.createTicket(data)
self.assertNotEqual(ticket_id, None,
"Should return the new id: %s" % (ticket_id,))
ticket = yield store.fetchTicket(ticket_id)
# Assert for each of the fields:
# http://trac.edgewall.org/wiki/TracDev/DatabaseSchema/TicketSystem#Tableticket
# XXX should these be '' instead of None?
self.assertEqual(ticket['id'], ticket_id)
self.assertEqual(ticket['type'], None)
self.assertTrue(ticket['time'])
self.assertTrue(ticket['changetime'])
self.assertEqual(ticket['time'], ticket['changetime'])
self.assertEqual(ticket['component'], None)
self.assertEqual(ticket['severity'], None)
self.assertEqual(ticket['priority'], None)
self.assertEqual(ticket['owner'], None)
self.assertEqual(ticket['reporter'], 'foo', "Should use the Store's "
"user as the report")
self.assertEqual(ticket['cc'], None)
self.assertEqual(ticket['version'], None)
self.assertEqual(ticket['milestone'], None)
self.assertEqual(ticket['status'], 'new')
self.assertEqual(ticket['resolution'], None)
self.assertEqual(ticket['summary'], 'the summary')
self.assertEqual(ticket['description'], None)
self.assertEqual(ticket['keywords'], None)
self.assertEqual(ticket['attachments'], [])
def test_createTicket_status(self):
"""
You can't override the status of a ticket while creating it.
"""
store = self.populatedStore()
self.assertFailure(store.createTicket({
'summary': 'something',
'status': 'something',
}), Exception)
@defer.inlineCallbacks
def test_createTicket_maximal(self):
"""
You can create a ticket with all kinds of options.
"""
store = self.populatedStore()
# Do I need to enforce that all the values are valid options in their
# specific join tables?
data = {
# do I need to enforce that this is in enum type=ticket_type?
'type': 'type',
'component': 'component',
'severity': 'severity',
'priority': 'priority',
'owner': 'owner',
'cc': 'cc',
'version': 'version',
'milestone': 'milestone',
'resolution': 'resolution',
'summary': 'summary',
'description': 'description',
'keywords': 'keywords',
}
ticket_id = yield store.createTicket(data)
ticket = yield store.fetchTicket(ticket_id)
self.assertEqual(ticket['id'], ticket_id)
self.assertEqual(ticket['type'], 'type')
self.assertTrue(ticket['time'])
self.assertTrue(ticket['changetime'])
self.assertEqual(ticket['time'], ticket['changetime'])
self.assertEqual(ticket['component'], 'component')
self.assertEqual(ticket['severity'], 'severity')
self.assertEqual(ticket['priority'], 'priority')
self.assertEqual(ticket['owner'], 'owner')
self.assertEqual(ticket['reporter'], 'foo', "Should use the Store's "
"user as the report")
self.assertEqual(ticket['cc'], 'cc')
self.assertEqual(ticket['version'], 'version')
self.assertEqual(ticket['milestone'], 'milestone')
self.assertEqual(ticket['status'], 'new')
self.assertEqual(ticket['resolution'], 'resolution')
self.assertEqual(ticket['summary'], 'summary')
self.assertEqual(ticket['description'], 'description')
self.assertEqual(ticket['keywords'], 'keywords')
@defer.inlineCallbacks
def test_createTicket_customFields(self):
"""
You can create a ticket with custom fields
"""
store = self.populatedStore()
data = {
'branch': 'foo',
'summary': 'something',
'launchpad_bug': '1234',
}
ticket_id = yield store.createTicket(data)
ticket = yield store.fetchTicket(ticket_id)
self.assertEqual(ticket['branch'], 'foo')
self.assertEqual(ticket['summary'], 'something')
self.assertEqual(ticket['launchpad_bug'], '1234')
@defer.inlineCallbacks
def test_createTicket_customFields_fail(self):
"""
If the custom fields can't be created, the whole transaction should be
rolled back and the ticket should not be added.
"""
store = self.populatedStore()
count = yield store.runner.run(SQL('select count(*) from ticket'))
bad_data = {
'summary': 'good summary',
'branch': object(),
}
try:
yield store.createTicket(bad_data)
except:
pass
else:
self.fail("Should have raised an exception")
after_count = yield store.runner.run(SQL('select count(*) from ticket'))
self.assertEqual(count, after_count, "Should NOT have created a ticket")
def test_createTicket_noauth(self):
"""
Unauthenticated users can't create tickets
"""
store = self.populatedStore()
store.user = None
self.assertFailure(store.createTicket({
'summary': 'good summary',
}), UnauthorizedError)
@defer.inlineCallbacks
def test_fetchTicket(self):
"""
You can fetch existing ticket information
"""
store = self.populatedStore()
ticket = yield store.fetchTicket(5622)
# look in test/trac_test.sql to see the values
self.assertEqual(ticket['id'], 5622)
self.assertEqual(ticket['type'], 'enhancement')
self.assertEqual(ticket['time'], 1333844383)
self.assertEqual(ticket['changetime'], 1334260992)
self.assertEqual(ticket['component'], 'core')
self.assertEqual(ticket['severity'], None)
self.assertEqual(ticket['priority'], 'normal')
self.assertEqual(ticket['owner'], '')
self.assertEqual(ticket['reporter'], 'exarkun')
self.assertEqual(ticket['cc'], '')
self.assertEqual(ticket['version'], None)
self.assertEqual(ticket['milestone'], '')
self.assertEqual(ticket['status'], 'closed')
self.assertEqual(ticket['resolution'], 'duplicate')
# ignore summary and description because they're long
self.assertEqual(ticket['keywords'], 'tests')
# custom fields
self.assertEqual(ticket['branch'], 'branches/tcp-endpoints-tests-refactor-5622')
self.assertEqual(ticket['branch_author'], 'exarkun')
self.assertEqual(ticket['launchpad_bug'], '')
# comments
self.assertEqual(len(ticket['comments']), 4)
# attachments
self.assertEqual(len(ticket['attachments']), 0)
@defer.inlineCallbacks
def test_fetchTicket_attachments(self):
"""
Attachment metadata should be included when fetching a ticket.
"""
store = self.populatedStore()
ticket = yield store.fetchTicket(5517)
self.assertEqual(ticket['attachments'], [
{
'filename': '5517.diff',
'size': 3472,
'time': 1331531954,
'description': '',
'author': 'candre717',
'ip': '66.35.39.65',
# for compatibility?
'ipnr': '66.35.39.65',
}
])
def test_dne(self):
"""
Should fail appropriately if the ticket doesn't exist.
"""
store = self.populatedStore()
self.assertFailure(store.fetchTicket(1), NotFoundError)
self.assertFailure(store.updateTicket(1, {}), NotFoundError)
@defer.inlineCallbacks
def test_fetchComments(self):
"""
You can get all the comments for a ticket.
"""
store = self.populatedStore()
comments = yield store.fetchComments(5622)
# look in test/trac_test.sql to see where these assertions come from
self.assertEqual(len(comments), 4, "There are 4 comments")
c = comments[0]
self.assertEqual(c['ticket'], 5622)
self.assertEqual(c['time'], 1333844456)
self.assertEqual(c['author'], 'exarkun')
self.assertEqual(c['number'], '1')
self.assertEqual(c['comment'], "(In [34131]) Branching to 'tcp-endpoints-tests-refactor-5622'")
self.assertEqual(len(c['changes']), 2)
self.assertEqual(c['changes']['branch'], ('', 'branches/tcp-endpoints-tests-refactor-5622'))
self.assertEqual(c['changes']['branch_author'], ('', 'exarkun'))
@defer.inlineCallbacks
def test_fetchComments_reply(self):
"""
The comments should know that they are a reply to another comment
"""
store = self.populatedStore()
comments = yield store.fetchComments(2723)
# look in test/trac_test.sql to see where these assertions come from
comment13 = comments[12]
self.assertEqual(comment13['replyto'], '12')
self.assertEqual(comment13['number'], '13')
@defer.inlineCallbacks
def test_fetchComments_all(self):
"""
All comments should have a comments item, even if it's blank.
"""
store = self.populatedStore()
comments = yield store.fetchComments(4712)
for i,c in enumerate(comments):
self.assertTrue('comment' in c, c)
self.assertEqual(c['number'], str(i+1))
@defer.inlineCallbacks
def test_updateTicket(self):
"""
You can update attributes of a ticket while making a comment
"""
store = self.populatedStore()
data = {
'type': 'type',
'component': 'component',
'severity': 'severity',
'priority': 'priority',
'owner': 'owner',
'cc': 'cc',
'version': 'version',
'milestone': 'milestone',
'status': 'status',
'resolution': 'resolution',
'summary': 'summary',
'description': 'description',
'keywords': 'keywords',
'branch': 'foo',
'launchpad_bug': '1234',
}
comment = 'this is my new comment'
yield store.updateTicket(5622, data, comment)
ticket = yield store.fetchTicket(5622)
for k, v in data.items():
self.assertEqual(ticket[k], v,
"Expected ticket[%r] to be %r, not %r" % (k, v, ticket[k]))
self.assertEqual(ticket['comments'][-1]['comment'],
'this is my new comment', "Should add a comment")
self.assertEqual(ticket['comments'][-1]['number'], '5')
self.assertEqual(ticket['comments'][-1]['author'], 'foo')
self.assertEqual(ticket['comments'][-1]['ticket'], 5622)
self.assertEqual(ticket['comments'][-1]['time'], ticket['changetime'])
self.assertEqual(ticket['comments'][-1]['replyto'], '')
self.assertEqual(ticket['comments'][-1]['followups'], [])
# every change should be recorded, too
changes = ticket['comments'][-1]['changes']
# these magical values come from trac_test.sql
expected_changes = [
('type', 'enhancement', 'type'),
('component', 'core', 'component'),
('severity', None, 'severity'),
('priority', 'normal', 'priority'),
('owner', '', 'owner'),
# reporter
('cc', '', 'cc'),
('version', None, 'version'),
('milestone', '', 'milestone'),
('status', 'closed', 'status'),
('resolution', 'duplicate', 'resolution'),
# summary and description tested separately
('branch', 'branches/tcp-endpoints-tests-refactor-5622',
'foo'),
('launchpad_bug', '', '1234'),
]
for field, old, new in expected_changes:
expected = (old, new)
actual = changes[field]
self.assertEqual(actual, expected, "Expected %r change to"
" be %r, not %r" % (field, expected, actual))
# summary and description are long an obnoxious to duplicate in the code
self.assertEqual(changes['summary'][1], 'summary')
self.assertEqual(changes['description'][1], 'description')
def test_updateTicket_noauth(self):
"""
If you are not authenticated, you can't update tickets
"""
store = self.populatedStore()
store.user = None
self.assertFailure(store.updateTicket(5622, {}),
UnauthorizedError)
@defer.inlineCallbacks
def test_updateTicket_noComment(self):
"""
If there's no comment, that's okay.
"""
store = self.populatedStore()
yield store.updateTicket(5622, dict(type='type'))
ticket = yield store.fetchTicket(5622)
self.assertEqual(ticket['comments'][-1]['comment'], '')
self.assertEqual(ticket['comments'][-1]['changes']['type'],
('enhancement', 'type'))
@defer.inlineCallbacks
def test_updateTicket_reply(self):
"""
You can signal that a comment is in reply to another comment
"""
store = self.populatedStore()
yield store.updateTicket(5622, {}, comment='something', replyto=1)
ticket = yield store.fetchTicket(5622)
comment = ticket['comments'][-1]
self.assertEqual(comment['comment'], 'something')
self.assertEqual(comment['replyto'], '1')
original = ticket['comments'][0]
self.assertEqual(original['followups'], ['5'], "Should know which "
"comments are followups to it")
@defer.inlineCallbacks
def test_updateTicket_onlyLogChanges(self):
"""
Only fields that have actually changed should be logged
"""
store = self.populatedStore()
data = {
'type': 'enhancement',
'component': 'new component',
}
yield store.updateTicket(5622, data)
ticket = yield store.fetchTicket(5622)
changes = ticket['comments'][-1]['changes']
self.assertEqual(changes['component'], ('core', 'new component'))
self.assertEqual(len(changes), 1, "Should only log the component")
@defer.inlineCallbacks
def test_fetchComponents(self):
"""
Should get all the values in the component table.
"""
store = self.populatedStore()
components = yield store.fetchComponents()
self.assertEqual(components, [
{'name': 'conch', 'owner': '', 'description': ''},
{'name': 'core', 'owner': '', 'description': ''},
{'name': 'ftp', 'owner': '', 'description': ''},
])
@defer.inlineCallbacks
def test_fetchMilestones(self):
"""
Should get all the milestones available.
"""
store = self.populatedStore()
milestones = yield store.fetchMilestones()
self.assertEqual(len(milestones), 4)
self.assertIn({
'name': 'not done, not due',
'due': None,
'completed': None,
'description': 'description',
}, milestones)
@defer.inlineCallbacks
def test_fetchEnum(self):
"""
Should get all the enums in the db.
"""
store = self.populatedStore()
priorities = yield store.fetchEnum('priority')
self.assertEqual(priorities, [
{'name': 'drop everything', 'value': ''},
{'name': 'normal', 'value': ''},
])
@defer.inlineCallbacks
def test_userList(self):
"""
Should get all the users in the db.
"""
store = self.populatedStore()
users = yield store.userList()
self.assertEqual(list(users), ['alice'])
@defer.inlineCallbacks
def test_addAttachmentMetadata(self):
"""
You can add attachment metadata to a ticket.
"""
store = self.populatedStore()
now = int(time.time())
yield store.addAttachmentMetadata(5622, {
'filename': 'the file',
'size': 1234,
'description': 'this is a description',
'ip': '127.0.0.1',
})
ticket = yield store.fetchTicket(5622)
self.assertEqual(len(ticket['attachments']), 1)
att = ticket['attachments'][0]
self.assertEqual(att['filename'], 'the file')
self.assertEqual(att['size'], 1234)
self.assertEqual(att['description'], 'this is a description')
self.assertEqual(att['ip'], '127.0.0.1')
self.assertEqual(att['ipnr'], '127.0.0.1')
self.assertTrue(att['time'] >= now)
self.assertEqual(att['author'], 'foo')
def test_addAttachmentMetadata_noauth(self):
"""
If you are not authenticated, you can't upload.
"""
store = self.populatedStore()
store.user = None
self.assertFailure(store.addAttachmentMetadata(5622, {}),
UnauthorizedError)
class AuthStoreTest(TestCase):
"""
Tests for database-backed authentication.
"""
def populatedStore(self):
"""
Return an L{AuthStore} with some expected user data in it.
"""
db = sqlite3.connect(":memory:")
db.executescript(open(sibpath(__file__, "trac_test.sql")).read())
translator = SqliteTranslator()
runner = BlockingRunner(db, translator)
store = AuthStore(runner)
return store
@defer.inlineCallbacks
def test_usernameFromEmail(self):
"""
usernameFromEmail should translate an email address to a username if
possible, otherwise, it should raise an exception.
"""
store = self.populatedStore()
username = yield store.usernameFromEmail('[email protected]')
self.assertEqual(username, 'alice')
self.assertFailure(store.usernameFromEmail('[email protected]'),
NotFoundError)
@defer.inlineCallbacks
def test_createUser(self):
"""
createUser will create a user associated with an email address
"""
store = self.populatedStore()
username = yield store.createUser('[email protected]', 'joe')
self.assertEqual(username, 'joe', "Should return the username")
username = yield store.usernameFromEmail('[email protected]')
self.assertEqual(username, 'joe')
@defer.inlineCallbacks
def test_createUser_justEmail(self):
"""
You can provide just the email address
"""
store = self.populatedStore()
username = yield store.createUser('[email protected]')
self.assertEqual(username, '[email protected]')
username = yield store.usernameFromEmail('[email protected]')
self.assertEqual(username, '[email protected]')
def test_createUser_alreadyExists(self):
"""
When trying to create a user with the same username as another user,
an error is returned. Also, if the email address is being used, it's
an error too.
"""
store = self.populatedStore()
self.assertFailure(store.createUser('[email protected]', 'alice'),
Collision)
# email associated with more than one user is not allowed
self.assertFailure(store.createUser('[email protected]', 'bob'),
Collision)
@defer.inlineCallbacks
def test_cookieFromUsername(self):
"""
Should get or create an auth_cookie entry.
"""
store = self.populatedStore()
cookie_value = yield store.cookieFromUsername('alice')
# this magical value is found in test/trac_test.sql
self.assertEqual(cookie_value, "a331422278bd676f3809e7a9d8600647",
"Should match the existing cookie value")
username = yield store.createUser('[email protected]')
cookie_value = yield store.cookieFromUsername(username)
self.assertNotEqual(cookie_value, None)
value2 = yield store.cookieFromUsername(username)
self.assertEqual(cookie_value, value2)
@defer.inlineCallbacks
def test_usernameFromCookie(self):
"""
Should return the username associated with a cookie value.
"""
store = self.populatedStore()
alice_cookie = "a331422278bd676f3809e7a9d8600647"
username = yield store.usernameFromCookie(alice_cookie)
self.assertEqual(username, 'alice')
self.assertFailure(store.usernameFromCookie('dne'), NotFoundError)
|
|
#
# o o
# 8
# .oPYo. .oPYo. odYo. o8P o8 .oPYo. odYo. .oPYo. .oPYo.
# Yb.. 8oooo8 8' `8 8 8 8oooo8 8' `8 8 ' 8oooo8
# 'Yb. 8. 8 8 8 8 8. 8 8 8 . 8.
# `YooP' `Yooo' 8 8 8 8 `Yooo' 8 8 `YooP' `Yooo'
# :.....::.....:..::..::..::..:.....:..::..:.....::.....:
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
# Copyright Yazan Obeidi, 2017
#
# kafka.python.client - Kafka wrapper for Python -- DEPRECATED
#
from pykafka import KafkaClient
from pykafka.utils.compat import get_bytes
from json import dumps, loads
from uuid import uuid4
from random import getrandbits
import queue
__author__ = 'yazan'
__version__ = '0.0.1'
__licence__ = 'Apache V2'
class KafkaBase(object):
"""A high level convenience wrapper for Apache Kafka using PyKafka.
Goals of this interface are to provide a fault-tolerant and scalable way to
interact with the message queue.
Use the inherited class for faster runtime at the expense of greater memory.
"""
def __init__(self, config, log):
"""Establish connection based on config file"""
self.zkpr = config.get('zookeeper', 'gateway')
host = config.get('kafka', 'gateway')
log.debug("Initializing kafka client ({})".format(host))
self.client = KafkaClient(hosts=host)
self.config = config
self.log = log
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
@staticmethod
def make_request(function, _id, kwargs):
return dumps({'function': function, 'kwargs': kwargs,'id': _id})
def request(self, function, kwargs={}):
"""Send KafkaManager request to kafka host e.g. list topics.
Post request to <kafka-manager-in> with randomly created ID.
See docker-kafka/python/KafkaManager.py for more information.
"""
# Generate the request
_id = str(uuid4())
request = self.make_request(function=function, _id=_id, kwargs=kwargs)
self.log.debug('POST {} to KafkaManager'.format(request))
# Post to KafkaManager input topic
self.put(request, 'kafka-manager-in')
# Process output, simple cons. so all msgs are received by all consumers
consumer = self.get_topic('kafka-manager-out').get_simple_consumer()
for i, message in enumerate(consumer):
if message is not None:
msg = loads(message.value)
if _id == msg['id']:
self.log.debug('Retrieved response: {} ({})'.format(msg, i))
return msg['output']
def get_topic(self, topic):
"""Returns handle to pykafka topic instance"""
return self.client.topics[get_bytes(topic)]
def list_topics(self):
"""Returns list of available topics."""
return self.request('list_topics', {})
def is_topic(self, topic):
"""Check if topic exists"""
return self.request('is_topic', {'topic': str(topic)})
def make_topic(self, topic, partitions=3, replication=1):
"""Create a topic if does not exist"""
kwargs = {'topic': topic,
'partitions': partitions,
'replication': replication}
return self.request('make_topic', kwargs)
def delete_topic(self, topic):
"""Delete single topic by name"""
return self.request('delete_topic', {'topic': str(topic)})
def purge_topics(self):
"""Delete all topics."""
for topic in self.list_topics():
if not topic.startswith(b'__'): # leave internal topics alone
self.delete_topic(topic)
def get(self,
topic,
consumer_group=None):
"""Fetches single message or optionally generates indefinite messages.
continuous : If True function acts like a generator.
"""
if not consumer_group:
consumer_group = self.config.get('kafka', 'default_consumer_group')
balanced_consumer = self.get_topic(topic).get_balanced_consumer(
consumer_group=get_bytes(consumer_group),
auto_commit_enable=True,
zookeeper_connect=self.zkpr)
for message in balanced_consumer:
if message is not None:
self.log.debug("Found msg @ offset {}".format(message.offset))
msg = message.value
return msg
else:
self.log.warning("Found None msg in topic: {}".format(topic))
balanced_consumer.stop()
def put(self, msg, topic, pk=None):
"""Async producer."""
topic = self.get_topic(topic)
with topic.get_producer(linger_ms=1) as producer:
pk = pk if pk else getrandbits(100) # any unique partition key works
producer.produce(get_bytes(msg), partition_key=b'%i' % pk)
class Kafka(KafkaBase):
"""This class caches producer and consumer objects allowing for greater speed
than the base class, which initializes a new producer and consumer for each
message send / recieved.
TODO: eventually move to its own file, for now leave here
"""
def __init__(self, config, log, producer=None, consumer=None, **kwargs):
log.info("Starting Kafka")
self.log = log
self.config = config
self.kwargs = kwargs
self.prod_name = None if producer is None else producer
self.producer = None # set by user or context manager
self.cons_name = None if consumer is None else consumer
self.consumer = None # set by user or context manager
self.cons_pk = 0
self.prod_pk = 0
KafkaBase.__init__(self, config, log)
def __enter__(self):
if self.prod_name:
self.start_producer(self.prod_name, **self.kwargs)
if self.cons_name:
self.start_consumer(self.cons_name, **self.kwargs)
return self
def __exit__(self, type, value, traceback):
if self.producer:
self.stop_producer()
if self.consumer:
self.stop_consumer()
def start_producer(self, producer=None, linger_ms=1, **kwargs):
"""Subscribes producer to given topic with supplied kwargs
If producer is None will use self.producer"""
self.log.debug("Initializing producer {}".format(producer))
self.producer = self.get_topic(producer).get_producer(linger_ms=linger_ms,
**kwargs)
self.producer.start()
self.cons_pk = 0 # initialize partition key to zero
def start_consumer(self, consumer, consumer_group=None, **kwargs):
"""Subscribes consumer to given topic and consumer group with supplied
kwargs
"""
if not consumer_group:
consumer_group = self.config.get('kafka', 'default_consumer_group')
self.log.debug("Initializing consumer {} in consumer group <{}>".format(
consumer, consumer_group))
self.consumer = self.get_topic(consumer)
self.consumer = self.consumer.get_balanced_consumer(
consumer_group=get_bytes(consumer_group),
auto_commit_enable=True,
zookeeper_connect=self.zkpr,
**kwargs)
self.consumer.start()
self.prod_pk = 0
def stop_consumer(self):
self.log.debug("stopping consumer {}".format(self.cons_name))
self.consumer.stop()
def stop_producer(self):
self.log.debug("stopping producer {}".format(self.prod_name))
self.producer.stop()
def sub_producer(self, producer, **kwargs):
"""Change Producer subscription topic"""
self.log.debug("Subscribing producer: "\
"{} to {}".format(self.producer, producer))
self.producer = self.get_topic(producer).get_producer(**kwargs)
self.prod_name = producer
def sub_consumer(self, consumer, **kwargs):
"""Change Consumer subscription topic"""
self.log.debug("Subscribing consumer: "\
"{} to {}".format(self.consumer, consumer))
self.consumer = self.get_topic(consumer).get_consumer(**kwargs)
self.cons_name = consumer
def put(self, message, topic=None):
"""
If topic is given we temporarily subscribe to that topic to post message.
"""
# If topic is not given use currently subscribed producer
# and if topic does not match assigned producer, subscribe to it
if topic is not None and topic != self.prod_name:
self.sub_producer(topic, self.kwargs)
#self.log.debug("kafka PUT {} -> {}".format(message, self.prod_name))
self.producer.produce(
get_bytes(message), partition_key=b'%i' % self.prod_pk)
# increment partition key
self.prod_pk += 1
def messages(self, meta=False, topic=None):
"""Generator object to yield continuous stream of messages.
If meta is false, only message.value is passed.
"""
# If topic is not given use currently subscribed consumer
# and if topic does not match assigned consumer, subscribe to it
if topic is not None and topic != self.cons_name:
self.sub_consumer(topic, self.kwargs)
self.log.debug("kafka GET {}".format(self.cons_name))
for message in self.consumer:
if message is not None:
self.log.debug("Found msg @ topic {} offset {}".format(
topic, message.offset))
# match partition key
self.cons_pk = message.offset
msg = message if meta else message.value
return msg
else:
self.log.warning("Found None msg in topic: {}".format(topic))
def get(self, meta=False, topic=None):
"""Return just one message from messages()"""
return self.messages(meta=meta, topic=topic).next()
if __name__ == "__main__":
pass
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import unittest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.dygraph.nn import Embedding, Linear
import paddle.fluid.framework as framework
from paddle.fluid.optimizer import Adam
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.learning_rate_scheduler import LearningRateDecay
from test_imperative_base import new_program_scope
import numpy as np
import six
import paddle
from paddle.fluid.framework import _test_eager_guard
class SimpleLSTMRNN(fluid.Layer):
def __init__(self,
hidden_size,
num_steps,
num_layers=2,
init_scale=0.1,
dropout=None):
super(SimpleLSTMRNN, self).__init__()
self._hidden_size = hidden_size
self._num_layers = num_layers
self._init_scale = init_scale
self._dropout = dropout
self._input = None
self._num_steps = num_steps
self.cell_array = []
self.hidden_array = []
self.weight_1_arr = []
self.weight_2_arr = []
self.bias_arr = []
self.mask_array = []
for i in range(self._num_layers):
weight_1 = self.create_parameter(
attr=fluid.ParamAttr(
initializer=fluid.initializer.UniformInitializer(
low=-self._init_scale, high=self._init_scale)),
shape=[self._hidden_size * 2, self._hidden_size * 4],
dtype="float32",
default_initializer=fluid.initializer.UniformInitializer(
low=-self._init_scale, high=self._init_scale))
self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1))
bias_1 = self.create_parameter(
attr=fluid.ParamAttr(
initializer=fluid.initializer.UniformInitializer(
low=-self._init_scale, high=self._init_scale)),
shape=[self._hidden_size * 4],
dtype="float32",
default_initializer=fluid.initializer.Constant(0.0))
self.bias_arr.append(self.add_parameter('b_%d' % i, bias_1))
def forward(self, input_embedding, init_hidden=None, init_cell=None):
self.cell_array = []
self.hidden_array = []
for i in range(self._num_layers):
pre_hidden = fluid.layers.slice(
init_hidden, axes=[0], starts=[i], ends=[i + 1])
pre_cell = fluid.layers.slice(
init_cell, axes=[0], starts=[i], ends=[i + 1])
pre_hidden = fluid.layers.reshape(
pre_hidden, shape=[-1, self._hidden_size])
pre_cell = fluid.layers.reshape(
pre_cell, shape=[-1, self._hidden_size])
self.hidden_array.append(pre_hidden)
self.cell_array.append(pre_cell)
res = []
for index in range(self._num_steps):
self._input = fluid.layers.slice(
input_embedding, axes=[1], starts=[index], ends=[index + 1])
self._input = fluid.layers.reshape(
self._input, shape=[-1, self._hidden_size])
for k in range(self._num_layers):
pre_hidden = self.hidden_array[k]
pre_cell = self.cell_array[k]
weight_1 = self.weight_1_arr[k]
bias = self.bias_arr[k]
nn = fluid.layers.concat([self._input, pre_hidden], 1)
gate_input = fluid.layers.matmul(x=nn, y=weight_1)
gate_input = fluid.layers.elementwise_add(gate_input, bias)
i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1)
c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid(
i) * fluid.layers.tanh(j)
m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o)
self.hidden_array[k] = m
self.cell_array[k] = c
self._input = m
if self._dropout is not None and self._dropout > 0.0:
self._input = fluid.layers.dropout(
self._input,
dropout_prob=self._dropout,
dropout_implementation='upscale_in_train')
res.append(
fluid.layers.reshape(
self._input, shape=[1, -1, self._hidden_size]))
real_res = fluid.layers.concat(res, 0)
real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2])
last_hidden = fluid.layers.concat(self.hidden_array, 1)
last_hidden = fluid.layers.reshape(
last_hidden, shape=[-1, self._num_layers, self._hidden_size])
last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2])
last_cell = fluid.layers.concat(self.cell_array, 1)
last_cell = fluid.layers.reshape(
last_cell, shape=[-1, self._num_layers, self._hidden_size])
last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2])
return real_res, last_hidden, last_cell
class PtbModel(fluid.Layer):
def __init__(self,
hidden_size,
vocab_size,
num_layers=2,
num_steps=20,
init_scale=0.1,
dropout=None):
super(PtbModel, self).__init__()
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.init_scale = init_scale
self.num_layers = num_layers
self.num_steps = num_steps
self.dropout = dropout
self.simple_lstm_rnn = SimpleLSTMRNN(
hidden_size,
num_steps,
num_layers=num_layers,
init_scale=init_scale,
dropout=dropout)
self.embedding = Embedding(
size=[vocab_size, hidden_size],
dtype='float32',
is_sparse=False,
param_attr=fluid.ParamAttr(
name='embedding_para',
initializer=fluid.initializer.UniformInitializer(
low=-init_scale, high=init_scale)))
self.softmax_weight = self.create_parameter(
attr=fluid.ParamAttr(),
shape=[self.hidden_size, self.vocab_size],
dtype="float32",
default_initializer=fluid.initializer.UniformInitializer(
low=-self.init_scale, high=self.init_scale))
self.softmax_bias = self.create_parameter(
attr=fluid.ParamAttr(),
shape=[self.vocab_size],
dtype="float32",
default_initializer=fluid.initializer.UniformInitializer(
low=-self.init_scale, high=self.init_scale))
def forward(self, input, label, init_hidden, init_cell):
init_h = fluid.layers.reshape(
init_hidden, shape=[self.num_layers, -1, self.hidden_size])
init_c = fluid.layers.reshape(
init_cell, shape=[self.num_layers, -1, self.hidden_size])
x_emb = self.embedding(input)
x_emb = fluid.layers.reshape(
x_emb, shape=[-1, self.num_steps, self.hidden_size])
if self.dropout is not None and self.dropout > 0.0:
x_emb = fluid.layers.dropout(
x_emb,
dropout_prob=self.drop_out,
dropout_implementation='upscale_in_train')
rnn_out, last_hidden, last_cell = self.simple_lstm_rnn(x_emb, init_h,
init_c)
rnn_out = fluid.layers.reshape(
rnn_out, shape=[-1, self.num_steps, self.hidden_size])
projection = fluid.layers.matmul(rnn_out, self.softmax_weight)
projection = fluid.layers.elementwise_add(projection, self.softmax_bias)
projection = fluid.layers.reshape(
projection, shape=[-1, self.vocab_size])
loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False)
loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps])
loss = fluid.layers.reduce_mean(loss, dim=[0])
loss = fluid.layers.reduce_sum(loss)
return loss, last_hidden, last_cell
class TestDygraphPtbRnn(unittest.TestCase):
def func_setUp(self):
seed = 90
hidden_size = 10
vocab_size = 1000
num_layers = 1
num_steps = 3
init_scale = 0.1
batch_size = 4
batch_num = 200
with fluid.dygraph.guard():
paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,
num_layers=num_layers,
num_steps=num_steps,
init_scale=init_scale)
bd = []
lr_arr = [1.0]
# this a fake lr decay strategy
for i in range(1, 10):
bd.append(100 * i)
new_lr = 1.0
lr_arr.append(new_lr)
place = fluid.CPUPlace() if not core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
adam = Adam(
learning_rate=fluid.layers.piecewise_decay(
boundaries=bd, values=lr_arr),
parameter_list=ptb_model.parameters())
dy_param_updated = dict()
dy_param_init = dict()
dy_loss = None
last_hidden = None
last_cell = None
for i in range(batch_num):
x_data = np.arange(12).reshape(4, 3).astype('int64')
y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
y_data = y_data.reshape((-1, 1))
init_hidden_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_cell_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
x = to_variable(x_data)
y = to_variable(y_data)
init_hidden = to_variable(init_hidden_data)
init_cell = to_variable(init_cell_data)
dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden,
init_cell)
if i == 0:
for param in ptb_model.parameters():
dy_param_init[param.name] = param.numpy()
dy_loss.backward()
adam.minimize(dy_loss)
ptb_model.clear_gradients()
if i == batch_num - 1:
for param in ptb_model.parameters():
dy_param_updated[param.name] = param.numpy()
# check optimizer
self.opti_dict = adam.state_dict()
self.base_opti = {}
for k, v in self.opti_dict.items():
if isinstance(v, (core.VarBase, core.eager.Tensor)):
self.base_opti[v.name] = v.numpy()
self.assertTrue(np.sum(np.abs(v.numpy())) != 0)
else:
self.base_opti[k] = v
fluid.save_dygraph(self.opti_dict, "./test_dy")
self.state_dict = ptb_model.state_dict()
self.model_base = {}
for k, v in self.state_dict.items():
np_t = v.numpy()
self.model_base[k] = np_t
fluid.save_dygraph(self.state_dict, "./test_dy")
def func_testLoadAndSetVarBase(self):
seed = 90
hidden_size = 10
vocab_size = 1000
num_layers = 1
num_steps = 3
init_scale = 0.1
batch_size = 4
batch_num = 200
with fluid.dygraph.guard():
paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,
num_layers=num_layers,
num_steps=num_steps,
init_scale=init_scale)
bd = []
lr_arr = [1.0]
# this a fake lr decay strategy
for i in range(1, 10):
bd.append(100 * i)
new_lr = 1.0
lr_arr.append(new_lr)
place = fluid.CPUPlace() if not core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
adam = Adam(
learning_rate=fluid.layers.piecewise_decay(
boundaries=bd, values=lr_arr),
parameter_list=ptb_model.parameters())
dy_param_updated = dict()
dy_param_init = dict()
dy_loss = None
last_hidden = None
last_cell = None
for i in range(batch_num):
x_data = np.arange(12).reshape(4, 3).astype('int64')
y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
y_data = y_data.reshape((-1, 1))
init_hidden_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_cell_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
x = to_variable(x_data)
y = to_variable(y_data)
init_hidden = to_variable(init_hidden_data)
init_cell = to_variable(init_cell_data)
dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden,
init_cell)
if i == 0:
for param in ptb_model.parameters():
dy_param_init[param.name] = param.numpy()
dy_loss.backward()
adam.minimize(dy_loss)
ptb_model.clear_gradients()
if i == batch_num - 1:
for param in ptb_model.parameters():
dy_param_updated[param.name] = param.numpy()
# check optimizer
opti_dict = adam.state_dict()
# set to zero
for k, v in opti_dict.items():
if isinstance(v, (core.VarBase, core.eager.Tensor)):
np_t = v.numpy()
var = v.value().get_tensor()
var.set(np.zeros_like(np_t), place)
self.assertTrue(np.sum(np.abs(v.numpy())) == 0)
if isinstance(adam._learning_rate, LearningRateDecay):
adam._learning_rate.step_num = 0
para_state_dict, opti_state_dict = fluid.load_dygraph("./test_dy")
print(opti_state_dict.keys())
adam.set_state_dict(opti_state_dict)
opti_dict = adam.state_dict()
for k, v in opti_dict.items():
if isinstance(v, (core.VarBase, core.eager.Tensor)):
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name]))
else:
self.assertEqual(v, self.base_opti[k])
# check parameter
state_dict = ptb_model.state_dict()
for k, v in state_dict.items():
np_t = v.numpy()
var = v.value().get_tensor()
var.set(np.zeros_like(np_t), place)
ptb_model.set_state_dict(stat_dict=para_state_dict)
state_dict = ptb_model.state_dict()
for k, v in state_dict.items():
new_t = v.numpy()
base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t))
def func_testSetVariable(self):
seed = 90
hidden_size = 10
vocab_size = 1000
num_layers = 1
num_steps = 3
init_scale = 0.1
batch_size = 4
batch_num = 200
with fluid.dygraph.guard():
paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,
num_layers=num_layers,
num_steps=num_steps,
init_scale=init_scale)
bd = []
lr_arr = [1.0]
# this a fake lr decay strategy
for i in range(1, 10):
bd.append(100 * i)
new_lr = 1.0
lr_arr.append(new_lr)
place = fluid.CPUPlace() if not core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
adam = Adam(
learning_rate=fluid.layers.piecewise_decay(
boundaries=bd, values=lr_arr),
parameter_list=ptb_model.parameters())
dy_param_updated = dict()
dy_param_init = dict()
dy_loss = None
last_hidden = None
last_cell = None
for i in range(batch_num):
x_data = np.arange(12).reshape(4, 3).astype('int64')
y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
y_data = y_data.reshape((-1, 1))
init_hidden_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_cell_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
x = to_variable(x_data)
y = to_variable(y_data)
init_hidden = to_variable(init_hidden_data)
init_cell = to_variable(init_cell_data)
dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden,
init_cell)
if i == 0:
for param in ptb_model.parameters():
dy_param_init[param.name] = param.numpy()
dy_loss.backward()
adam.minimize(dy_loss)
ptb_model.clear_gradients()
if i == batch_num - 1:
for param in ptb_model.parameters():
dy_param_updated[param.name] = param.numpy()
# check optimizer
opti_dict = adam.state_dict()
# set to zero
for k, v in opti_dict.items():
if isinstance(v, (core.VarBase, core.eager.Tensor)):
np_t = v.numpy()
var = v.value().get_tensor()
var.set(np.zeros_like(np_t), place)
self.assertTrue(np.sum(np.abs(v.numpy())) == 0)
if isinstance(adam._learning_rate, LearningRateDecay):
adam._learning_rate.step_num = 0
adam.set_state_dict(self.opti_dict)
opti_dict = adam.state_dict()
for k, v in opti_dict.items():
if isinstance(v, (core.VarBase, core.eager.Tensor)):
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name]))
else:
self.assertEqual(v, self.base_opti[k])
# check parameter
state_dict = ptb_model.state_dict()
for k, v in state_dict.items():
np_t = v.numpy()
var = v.value().get_tensor()
var.set(np.zeros_like(np_t), place)
ptb_model.set_state_dict(self.state_dict)
state_dict = ptb_model.state_dict()
for k, v in state_dict.items():
new_t = v.numpy()
base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t))
def func_testSetNumpy(self):
seed = 90
hidden_size = 10
vocab_size = 1000
num_layers = 1
num_steps = 3
init_scale = 0.1
batch_size = 4
batch_num = 200
with fluid.dygraph.guard():
paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,
num_layers=num_layers,
num_steps=num_steps,
init_scale=init_scale)
bd = []
lr_arr = [1.0]
# this a fake lr decay strategy
for i in range(1, 10):
bd.append(100 * i)
new_lr = 1.0
lr_arr.append(new_lr)
place = fluid.CPUPlace() if not core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
adam = Adam(
learning_rate=fluid.layers.piecewise_decay(
boundaries=bd, values=lr_arr),
parameter_list=ptb_model.parameters())
dy_param_updated = dict()
dy_param_init = dict()
dy_loss = None
last_hidden = None
last_cell = None
for i in range(batch_num):
x_data = np.arange(12).reshape(4, 3).astype('int64')
y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
y_data = y_data.reshape((-1, 1))
init_hidden_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_cell_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
x = to_variable(x_data)
y = to_variable(y_data)
init_hidden = to_variable(init_hidden_data)
init_cell = to_variable(init_cell_data)
dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden,
init_cell)
if i == 0:
for param in ptb_model.parameters():
dy_param_init[param.name] = param.numpy()
dy_loss.backward()
adam.minimize(dy_loss)
ptb_model.clear_gradients()
if i == batch_num - 1:
for param in ptb_model.parameters():
dy_param_updated[param.name] = param.numpy()
# check optimizer
opti_dict = adam.state_dict()
np_opti_dict = {}
# set to zero
for k, v in opti_dict.items():
if isinstance(v, (core.VarBase, core.eager.Tensor)):
np_t = v.numpy()
np_opti_dict[v.name] = np_t
var = v.value().get_tensor()
var.set(np.zeros_like(np_t), place)
self.assertTrue(np.sum(np.abs(v.numpy())) == 0)
else:
np_opti_dict[k] = v
if isinstance(adam._learning_rate, LearningRateDecay):
adam._learning_rate.step_num = 0
adam.set_state_dict(np_opti_dict)
opti_dict = adam.state_dict()
for k, v in opti_dict.items():
if isinstance(v, (core.VarBase, core.eager.Tensor)):
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name]))
else:
self.assertEqual(v, self.base_opti[k])
# check parameter
state_dict = ptb_model.state_dict()
np_state_dict = {}
for k, v in state_dict.items():
np_t = v.numpy()
np_state_dict[k] = np_t
var = v.value().get_tensor()
var.set(np.zeros_like(np_t), place)
ptb_model.set_state_dict(np_state_dict)
state_dict = ptb_model.state_dict()
for k, v in state_dict.items():
new_t = v.numpy()
base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t))
def func_testSetVariableBeforeTrain(self):
seed = 90
hidden_size = 10
vocab_size = 1000
num_layers = 1
num_steps = 3
init_scale = 0.1
batch_size = 4
batch_num = 200
with fluid.dygraph.guard():
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,
num_layers=num_layers,
num_steps=num_steps,
init_scale=init_scale)
place = fluid.CPUPlace() if not core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
adam = Adam(
learning_rate=0.0,
beta1=0.8,
beta2=0.6,
parameter_list=ptb_model.parameters())
dy_param_updated = dict()
dy_param_init = dict()
dy_loss = None
last_hidden = None
last_cell = None
adam.set_state_dict(self.opti_dict)
ptb_model.set_state_dict(self.state_dict)
for i in range(1):
x_data = np.arange(12).reshape(4, 3).astype('int64')
y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
y_data = y_data.reshape((-1, 1))
init_hidden_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_cell_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
x = to_variable(x_data)
y = to_variable(y_data)
init_hidden = to_variable(init_hidden_data)
init_cell = to_variable(init_cell_data)
dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden,
init_cell)
dy_loss.backward()
adam.minimize(dy_loss)
ptb_model.clear_gradients()
opti_dict = adam.state_dict()
for k, v in opti_dict.items():
if k == "global_step":
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name] + 1))
if k.find("beta1_pow_acc_0") > 0:
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name] *
adam._beta1))
if k.find("beta2_pow_acc_0") > 0:
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name] *
adam._beta2))
state_dict = ptb_model.state_dict()
for k, v in state_dict.items():
new_t = v.numpy()
base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t))
def func_testLoadAndSetVarBaseBeforeTrain(self):
seed = 90
hidden_size = 10
vocab_size = 1000
num_layers = 1
num_steps = 3
init_scale = 0.1
batch_size = 4
batch_num = 200
with fluid.dygraph.guard():
paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,
num_layers=num_layers,
num_steps=num_steps,
init_scale=init_scale)
bd = []
lr_arr = [0.0]
# this a fake lr decay strategy
for i in range(1, 10):
bd.append(100 * i)
# set lr to zero not update parameter
new_lr = 0.0
lr_arr.append(new_lr)
place = fluid.CPUPlace() if not core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
adam = Adam(
learning_rate=0.0,
beta1=0.8,
beta2=0.6,
parameter_list=ptb_model.parameters())
dy_param_updated = dict()
dy_param_init = dict()
dy_loss = None
last_hidden = None
last_cell = None
state_dict, opti_dict = fluid.load_dygraph("./test_dy")
adam.set_state_dict(opti_dict)
ptb_model.set_state_dict(state_dict)
for i in range(1):
x_data = np.arange(12).reshape(4, 3).astype('int64')
y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
y_data = y_data.reshape((-1, 1))
init_hidden_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_cell_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
x = to_variable(x_data)
y = to_variable(y_data)
init_hidden = to_variable(init_hidden_data)
init_cell = to_variable(init_cell_data)
dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden,
init_cell)
dy_loss.backward()
adam.minimize(dy_loss)
ptb_model.clear_gradients()
opti_dict = adam.state_dict()
for k, v in opti_dict.items():
if k == "global_step":
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name] + 1))
if k.find("beta1_pow_acc_0") > 0:
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name] *
adam._beta1))
if k.find("beta2_pow_acc_0") > 0:
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name] *
adam._beta2))
# check parameter
state_dict = ptb_model.state_dict()
for k, v in state_dict.items():
new_t = v.numpy()
base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t))
def func_testSetNumpyBeforeTrain(self):
seed = 90
hidden_size = 10
vocab_size = 1000
num_layers = 1
num_steps = 3
init_scale = 0.1
batch_size = 4
batch_num = 200
with fluid.dygraph.guard():
paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,
num_layers=num_layers,
num_steps=num_steps,
init_scale=init_scale)
bd = []
lr_arr = [0.0]
# this a fake lr decay strategy
for i in range(1, 10):
bd.append(100 * i)
# set lr to 0.0, not update parameter
new_lr = 0.0
lr_arr.append(new_lr)
place = fluid.CPUPlace() if not core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
adam = Adam(
learning_rate=fluid.layers.piecewise_decay(
boundaries=bd, values=lr_arr),
beta1=0.8,
beta2=0.6,
parameter_list=ptb_model.parameters())
dy_param_updated = dict()
dy_param_init = dict()
dy_loss = None
last_hidden = None
last_cell = None
np_opti_dict = {}
np_state_dict = {}
for k, v in self.opti_dict.items():
if isinstance(v, (core.VarBase, core.eager.Tensor)):
np_opti_dict[v.name] = v.numpy()
else:
np_opti_dict[k] = v
for k, v in self.state_dict.items():
np_state_dict[k] = v.numpy()
adam.set_state_dict(np_opti_dict)
ptb_model.set_state_dict(np_state_dict)
for i in range(1):
x_data = np.arange(12).reshape(4, 3).astype('int64')
y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
y_data = y_data.reshape((-1, 1))
init_hidden_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_cell_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
x = to_variable(x_data)
y = to_variable(y_data)
init_hidden = to_variable(init_hidden_data)
init_cell = to_variable(init_cell_data)
dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden,
init_cell)
dy_loss.backward()
adam.minimize(dy_loss)
ptb_model.clear_gradients()
opti_dict = adam.state_dict()
for k, v in opti_dict.items():
if k == "global_step":
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name] + 1))
if k.find("beta1_pow_acc_0") > 0:
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name] *
adam._beta1))
if k.find("beta2_pow_acc_0") > 0:
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name] *
adam._beta2))
# check parameter
state_dict = ptb_model.state_dict()
for k, v in state_dict.items():
new_t = v.numpy()
base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t))
def func_testOnlyLoadParams(self):
with fluid.dygraph.guard():
emb = fluid.dygraph.Embedding([10, 10])
state_dict = emb.state_dict()
fluid.save_dygraph(state_dict, os.path.join('saved_dy', 'emb_dy'))
para_state_dict, opti_state_dict = fluid.load_dygraph(
os.path.join('saved_dy', 'emb_dy'))
self.assertTrue(opti_state_dict == None)
para_state_dict, opti_state_dict = fluid.load_dygraph(
os.path.join('saved_dy', 'emb_dy.pdparams'))
para_state_dict, opti_state_dict = fluid.load_dygraph(
os.path.join('saved_dy', 'emb_dy.pdopt'))
def func_test_load_compatible_with_keep_name_table(self):
with fluid.dygraph.guard():
emb = fluid.dygraph.Embedding([10, 10])
state_dict = emb.state_dict()
fluid.save_dygraph(state_dict, os.path.join('saved_dy', 'emb_dy'))
para_state_dict, opti_state_dict = fluid.load_dygraph(
os.path.join('saved_dy', 'emb_dy'), keep_name_table=True)
self.assertTrue(para_state_dict != None)
self.assertTrue(opti_state_dict == None)
def test_main(self):
self.func_setUp()
self.func_testLoadAndSetVarBase()
self.func_testSetVariable()
self.func_testSetNumpy()
self.func_testSetVariableBeforeTrain()
self.func_testLoadAndSetVarBaseBeforeTrain()
self.func_testSetNumpyBeforeTrain()
self.func_testOnlyLoadParams()
self.func_test_load_compatible_with_keep_name_table()
with _test_eager_guard():
self.func_setUp()
self.func_testLoadAndSetVarBase()
self.func_testSetVariable()
self.func_testSetNumpy()
self.func_testSetVariableBeforeTrain()
self.func_testLoadAndSetVarBaseBeforeTrain()
self.func_testSetNumpyBeforeTrain()
self.func_testOnlyLoadParams()
self.func_test_load_compatible_with_keep_name_table()
if __name__ == '__main__':
unittest.main()
|
|
import math
from ..libmp.backend import xrange
class QuadratureRule(object):
"""
Quadrature rules are implemented using this class, in order to
simplify the code and provide a common infrastructure
for tasks such as error estimation and node caching.
You can implement a custom quadrature rule by subclassing
:class:`QuadratureRule` and implementing the appropriate
methods. The subclass can then be used by :func:`~mpmath.quad` by
passing it as the *method* argument.
:class:`QuadratureRule` instances are supposed to be singletons.
:class:`QuadratureRule` therefore implements instance caching
in :func:`~mpmath.__new__`.
"""
def __init__(self, ctx):
self.ctx = ctx
self.standard_cache = {}
self.transformed_cache = {}
self.interval_count = {}
def clear(self):
"""
Delete cached node data.
"""
self.standard_cache = {}
self.transformed_cache = {}
self.interval_count = {}
def calc_nodes(self, degree, prec, verbose=False):
r"""
Compute nodes for the standard interval `[-1, 1]`. Subclasses
should probably implement only this method, and use
:func:`~mpmath.get_nodes` method to retrieve the nodes.
"""
raise NotImplementedError
def get_nodes(self, a, b, degree, prec, verbose=False):
"""
Return nodes for given interval, degree and precision. The
nodes are retrieved from a cache if already computed;
otherwise they are computed by calling :func:`~mpmath.calc_nodes`
and are then cached.
Subclasses should probably not implement this method,
but just implement :func:`~mpmath.calc_nodes` for the actual
node computation.
"""
key = (a, b, degree, prec)
if key in self.transformed_cache:
return self.transformed_cache[key]
orig = self.ctx.prec
try:
self.ctx.prec = prec+20
# Get nodes on standard interval
if (degree, prec) in self.standard_cache:
nodes = self.standard_cache[degree, prec]
else:
nodes = self.calc_nodes(degree, prec, verbose)
self.standard_cache[degree, prec] = nodes
# Transform to general interval
nodes = self.transform_nodes(nodes, a, b, verbose)
if key in self.interval_count:
self.transformed_cache[key] = nodes
else:
self.interval_count[key] = True
finally:
self.ctx.prec = orig
return nodes
def transform_nodes(self, nodes, a, b, verbose=False):
r"""
Rescale standardized nodes (for `[-1, 1]`) to a general
interval `[a, b]`. For a finite interval, a simple linear
change of variables is used. Otherwise, the following
transformations are used:
.. math ::
[a, \infty] : t = \frac{1}{x} + (a-1)
[-\infty, b] : t = (b+1) - \frac{1}{x}
[-\infty, \infty] : t = \frac{x}{\sqrt{1-x^2}}
"""
ctx = self.ctx
a = ctx.convert(a)
b = ctx.convert(b)
one = ctx.one
if (a, b) == (-one, one):
return nodes
half = ctx.mpf(0.5)
new_nodes = []
if ctx.isinf(a) or ctx.isinf(b):
if (a, b) == (ctx.ninf, ctx.inf):
p05 = -half
for x, w in nodes:
x2 = x*x
px1 = one-x2
spx1 = px1**p05
x = x*spx1
w *= spx1/px1
new_nodes.append((x, w))
elif a == ctx.ninf:
b1 = b+1
for x, w in nodes:
u = 2/(x+one)
x = b1-u
w *= half*u**2
new_nodes.append((x, w))
elif b == ctx.inf:
a1 = a-1
for x, w in nodes:
u = 2/(x+one)
x = a1+u
w *= half*u**2
new_nodes.append((x, w))
elif a == ctx.inf or b == ctx.ninf:
return [(x,-w) for (x,w) in self.transform_nodes(nodes, b, a, verbose)]
else:
raise NotImplementedError
else:
# Simple linear change of variables
C = (b-a)/2
D = (b+a)/2
for x, w in nodes:
new_nodes.append((D+C*x, C*w))
return new_nodes
def guess_degree(self, prec):
"""
Given a desired precision `p` in bits, estimate the degree `m`
of the quadrature required to accomplish full accuracy for
typical integrals. By default, :func:`~mpmath.quad` will perform up
to `m` iterations. The value of `m` should be a slight
overestimate, so that "slightly bad" integrals can be dealt
with automatically using a few extra iterations. On the
other hand, it should not be too big, so :func:`~mpmath.quad` can
quit within a reasonable amount of time when it is given
an "unsolvable" integral.
The default formula used by :func:`~mpmath.guess_degree` is tuned
for both :class:`TanhSinh` and :class:`GaussLegendre`.
The output is roughly as follows:
+---------+---------+
| `p` | `m` |
+=========+=========+
| 50 | 6 |
+---------+---------+
| 100 | 7 |
+---------+---------+
| 500 | 10 |
+---------+---------+
| 3000 | 12 |
+---------+---------+
This formula is based purely on a limited amount of
experimentation and will sometimes be wrong.
"""
# Expected degree
# XXX: use mag
g = int(4 + max(0, self.ctx.log(prec/30.0, 2)))
# Reasonable "worst case"
g += 2
return g
def estimate_error(self, results, prec, epsilon):
r"""
Given results from integrations `[I_1, I_2, \ldots, I_k]` done
with a quadrature of rule of degree `1, 2, \ldots, k`, estimate
the error of `I_k`.
For `k = 2`, we estimate `|I_{\infty}-I_2|` as `|I_2-I_1|`.
For `k > 2`, we extrapolate `|I_{\infty}-I_k| \approx |I_{k+1}-I_k|`
from `|I_k-I_{k-1}|` and `|I_k-I_{k-2}|` under the assumption
that each degree increment roughly doubles the accuracy of
the quadrature rule (this is true for both :class:`TanhSinh`
and :class:`GaussLegendre`). The extrapolation formula is given
by Borwein, Bailey & Girgensohn. Although not very conservative,
this method seems to be very robust in practice.
"""
if len(results) == 2:
return abs(results[0]-results[1])
try:
if results[-1] == results[-2] == results[-3]:
return self.ctx.zero
D1 = self.ctx.log(abs(results[-1]-results[-2]), 10)
D2 = self.ctx.log(abs(results[-1]-results[-3]), 10)
except ValueError:
return epsilon
D3 = -prec
D4 = min(0, max(D1**2/D2, 2*D1, D3))
return self.ctx.mpf(10) ** int(D4)
def summation(self, f, points, prec, epsilon, max_degree, verbose=False):
"""
Main integration function. Computes the 1D integral over
the interval specified by *points*. For each subinterval,
performs quadrature of degree from 1 up to *max_degree*
until :func:`~mpmath.estimate_error` signals convergence.
:func:`~mpmath.summation` transforms each subintegration to
the standard interval and then calls :func:`~mpmath.sum_next`.
"""
ctx = self.ctx
I = err = ctx.zero
for i in xrange(len(points)-1):
a, b = points[i], points[i+1]
if a == b:
continue
# XXX: we could use a single variable transformation,
# but this is not good in practice. We get better accuracy
# by having 0 as an endpoint.
if (a, b) == (ctx.ninf, ctx.inf):
_f = f
f = lambda x: _f(-x) + _f(x)
a, b = (ctx.zero, ctx.inf)
results = []
for degree in xrange(1, max_degree+1):
nodes = self.get_nodes(a, b, degree, prec, verbose)
if verbose:
print("Integrating from %s to %s (degree %s of %s)" % \
(ctx.nstr(a), ctx.nstr(b), degree, max_degree))
results.append(self.sum_next(f, nodes, degree, prec, results, verbose))
if degree > 1:
err = self.estimate_error(results, prec, epsilon)
if err <= epsilon:
break
if verbose:
print("Estimated error:", ctx.nstr(err))
I += results[-1]
if err > epsilon:
if verbose:
print("Failed to reach full accuracy. Estimated error:", ctx.nstr(err))
return I, err
def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
r"""
Evaluates the step sum `\sum w_k f(x_k)` where the *nodes* list
contains the `(w_k, x_k)` pairs.
:func:`~mpmath.summation` will supply the list *results* of
values computed by :func:`~mpmath.sum_next` at previous degrees, in
case the quadrature rule is able to reuse them.
"""
return self.ctx.fdot((w, f(x)) for (x,w) in nodes)
class TanhSinh(QuadratureRule):
r"""
This class implements "tanh-sinh" or "doubly exponential"
quadrature. This quadrature rule is based on the Euler-Maclaurin
integral formula. By performing a change of variables involving
nested exponentials / hyperbolic functions (hence the name), the
derivatives at the endpoints vanish rapidly. Since the error term
in the Euler-Maclaurin formula depends on the derivatives at the
endpoints, a simple step sum becomes extremely accurate. In
practice, this means that doubling the number of evaluation
points roughly doubles the number of accurate digits.
Comparison to Gauss-Legendre:
* Initial computation of nodes is usually faster
* Handles endpoint singularities better
* Handles infinite integration intervals better
* Is slower for smooth integrands once nodes have been computed
The implementation of the tanh-sinh algorithm is based on the
description given in Borwein, Bailey & Girgensohn, "Experimentation
in Mathematics - Computational Paths to Discovery", A K Peters,
2003, pages 312-313. In the present implementation, a few
improvements have been made:
* A more efficient scheme is used to compute nodes (exploiting
recurrence for the exponential function)
* The nodes are computed successively instead of all at once
Various documents describing the algorithm are available online, e.g.:
* http://crd.lbl.gov/~dhbailey/dhbpapers/dhb-tanh-sinh.pdf
* http://users.cs.dal.ca/~jborwein/tanh-sinh.pdf
"""
def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
"""
Step sum for tanh-sinh quadrature of degree `m`. We exploit the
fact that half of the abscissas at degree `m` are precisely the
abscissas from degree `m-1`. Thus reusing the result from
the previous level allows a 2x speedup.
"""
h = self.ctx.mpf(2)**(-degree)
# Abscissas overlap, so reusing saves half of the time
if previous:
S = previous[-1]/(h*2)
else:
S = self.ctx.zero
S += self.ctx.fdot((w,f(x)) for (x,w) in nodes)
return h*S
def calc_nodes(self, degree, prec, verbose=False):
r"""
The abscissas and weights for tanh-sinh quadrature of degree
`m` are given by
.. math::
x_k = \tanh(\pi/2 \sinh(t_k))
w_k = \pi/2 \cosh(t_k) / \cosh(\pi/2 \sinh(t_k))^2
where `t_k = t_0 + hk` for a step length `h \sim 2^{-m}`. The
list of nodes is actually infinite, but the weights die off so
rapidly that only a few are needed.
"""
ctx = self.ctx
nodes = []
extra = 20
ctx.prec += extra
tol = ctx.ldexp(1, -prec-10)
pi4 = ctx.pi/4
# For simplicity, we work in steps h = 1/2^n, with the first point
# offset so that we can reuse the sum from the previous degree
# We define degree 1 to include the "degree 0" steps, including
# the point x = 0. (It doesn't work well otherwise; not sure why.)
t0 = ctx.ldexp(1, -degree)
if degree == 1:
#nodes.append((mpf(0), pi4))
#nodes.append((-mpf(0), pi4))
nodes.append((ctx.zero, ctx.pi/2))
h = t0
else:
h = t0*2
# Since h is fixed, we can compute the next exponential
# by simply multiplying by exp(h)
expt0 = ctx.exp(t0)
a = pi4 * expt0
b = pi4 / expt0
udelta = ctx.exp(h)
urdelta = 1/udelta
for k in xrange(0, 20*2**degree+1):
# Reference implementation:
# t = t0 + k*h
# x = tanh(pi/2 * sinh(t))
# w = pi/2 * cosh(t) / cosh(pi/2 * sinh(t))**2
# Fast implementation. Note that c = exp(pi/2 * sinh(t))
c = ctx.exp(a-b)
d = 1/c
co = (c+d)/2
si = (c-d)/2
x = si / co
w = (a+b) / co**2
diff = abs(x-1)
if diff <= tol:
break
nodes.append((x, w))
nodes.append((-x, w))
a *= udelta
b *= urdelta
if verbose and k % 300 == 150:
# Note: the number displayed is rather arbitrary. Should
# figure out how to print something that looks more like a
# percentage
print("Calculating nodes:", ctx.nstr(-ctx.log(diff, 10) / prec))
ctx.prec -= extra
return nodes
class GaussLegendre(QuadratureRule):
"""
This class implements Gauss-Legendre quadrature, which is
exceptionally efficient for polynomials and polynomial-like (i.e.
very smooth) integrands.
The abscissas and weights are given by roots and values of
Legendre polynomials, which are the orthogonal polynomials
on `[-1, 1]` with respect to the unit weight
(see :func:`~mpmath.legendre`).
In this implementation, we take the "degree" `m` of the quadrature
to denote a Gauss-Legendre rule of degree `3 \cdot 2^m` (following
Borwein, Bailey & Girgensohn). This way we get quadratic, rather
than linear, convergence as the degree is incremented.
Comparison to tanh-sinh quadrature:
* Is faster for smooth integrands once nodes have been computed
* Initial computation of nodes is usually slower
* Handles endpoint singularities worse
* Handles infinite integration intervals worse
"""
def calc_nodes(self, degree, prec, verbose=False):
"""
Calculates the abscissas and weights for Gauss-Legendre
quadrature of degree of given degree (actually `3 \cdot 2^m`).
"""
ctx = self.ctx
# It is important that the epsilon is set lower than the
# "real" epsilon
epsilon = ctx.ldexp(1, -prec-8)
# Fairly high precision might be required for accurate
# evaluation of the roots
orig = ctx.prec
ctx.prec = int(prec*1.5)
if degree == 1:
x = ctx.sqrt(ctx.mpf(3)/5)
w = ctx.mpf(5)/9
nodes = [(-x,w),(ctx.zero,ctx.mpf(8)/9),(x,w)]
ctx.prec = orig
return nodes
nodes = []
n = 3*2**(degree-1)
upto = n//2 + 1
for j in xrange(1, upto):
# Asymptotic formula for the roots
r = ctx.mpf(math.cos(math.pi*(j-0.25)/(n+0.5)))
# Newton iteration
while 1:
t1, t2 = 1, 0
# Evaluates the Legendre polynomial using its defining
# recurrence relation
for j1 in xrange(1,n+1):
t3, t2, t1 = t2, t1, ((2*j1-1)*r*t1 - (j1-1)*t2)/j1
t4 = n*(r*t1- t2)/(r**2-1)
t5 = r
a = t1/t4
r = r - a
if abs(a) < epsilon:
break
x = r
w = 2/((1-r**2)*t4**2)
if verbose and j % 30 == 15:
print("Computing nodes (%i of %i)" % (j, upto))
nodes.append((x, w))
nodes.append((-x, w))
ctx.prec = orig
return nodes
class QuadratureMethods(object):
def __init__(ctx, *args, **kwargs):
ctx._gauss_legendre = GaussLegendre(ctx)
ctx._tanh_sinh = TanhSinh(ctx)
def quad(ctx, f, *points, **kwargs):
r"""
Computes a single, double or triple integral over a given
1D interval, 2D rectangle, or 3D cuboid. A basic example::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> quad(sin, [0, pi])
2.0
A basic 2D integral::
>>> f = lambda x, y: cos(x+y/2)
>>> quad(f, [-pi/2, pi/2], [0, pi])
4.0
**Interval format**
The integration range for each dimension may be specified
using a list or tuple. Arguments are interpreted as follows:
``quad(f, [x1, x2])`` -- calculates
`\int_{x_1}^{x_2} f(x) \, dx`
``quad(f, [x1, x2], [y1, y2])`` -- calculates
`\int_{x_1}^{x_2} \int_{y_1}^{y_2} f(x,y) \, dy \, dx`
``quad(f, [x1, x2], [y1, y2], [z1, z2])`` -- calculates
`\int_{x_1}^{x_2} \int_{y_1}^{y_2} \int_{z_1}^{z_2} f(x,y,z)
\, dz \, dy \, dx`
Endpoints may be finite or infinite. An interval descriptor
may also contain more than two points. In this
case, the integration is split into subintervals, between
each pair of consecutive points. This is useful for
dealing with mid-interval discontinuities, or integrating
over large intervals where the function is irregular or
oscillates.
**Options**
:func:`~mpmath.quad` recognizes the following keyword arguments:
*method*
Chooses integration algorithm (described below).
*error*
If set to true, :func:`~mpmath.quad` returns `(v, e)` where `v` is the
integral and `e` is the estimated error.
*maxdegree*
Maximum degree of the quadrature rule to try before
quitting.
*verbose*
Print details about progress.
**Algorithms**
Mpmath presently implements two integration algorithms: tanh-sinh
quadrature and Gauss-Legendre quadrature. These can be selected
using *method='tanh-sinh'* or *method='gauss-legendre'* or by
passing the classes *method=TanhSinh*, *method=GaussLegendre*.
The functions :func:`~mpmath.quadts` and :func:`~mpmath.quadgl` are also available
as shortcuts.
Both algorithms have the property that doubling the number of
evaluation points roughly doubles the accuracy, so both are ideal
for high precision quadrature (hundreds or thousands of digits).
At high precision, computing the nodes and weights for the
integration can be expensive (more expensive than computing the
function values). To make repeated integrations fast, nodes
are automatically cached.
The advantages of the tanh-sinh algorithm are that it tends to
handle endpoint singularities well, and that the nodes are cheap
to compute on the first run. For these reasons, it is used by
:func:`~mpmath.quad` as the default algorithm.
Gauss-Legendre quadrature often requires fewer function
evaluations, and is therefore often faster for repeated use, but
the algorithm does not handle endpoint singularities as well and
the nodes are more expensive to compute. Gauss-Legendre quadrature
can be a better choice if the integrand is smooth and repeated
integrations are required (e.g. for multiple integrals).
See the documentation for :class:`TanhSinh` and
:class:`GaussLegendre` for additional details.
**Examples of 1D integrals**
Intervals may be infinite or half-infinite. The following two
examples evaluate the limits of the inverse tangent function
(`\int 1/(1+x^2) = \tan^{-1} x`), and the Gaussian integral
`\int_{\infty}^{\infty} \exp(-x^2)\,dx = \sqrt{\pi}`::
>>> mp.dps = 15
>>> quad(lambda x: 2/(x**2+1), [0, inf])
3.14159265358979
>>> quad(lambda x: exp(-x**2), [-inf, inf])**2
3.14159265358979
Integrals can typically be resolved to high precision.
The following computes 50 digits of `\pi` by integrating the
area of the half-circle defined by `x^2 + y^2 \le 1`,
`-1 \le x \le 1`, `y \ge 0`::
>>> mp.dps = 50
>>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1])
3.1415926535897932384626433832795028841971693993751
One can just as well compute 1000 digits (output truncated)::
>>> mp.dps = 1000
>>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1]) #doctest:+ELLIPSIS
3.141592653589793238462643383279502884...216420198
Complex integrals are supported. The following computes
a residue at `z = 0` by integrating counterclockwise along the
diamond-shaped path from `1` to `+i` to `-1` to `-i` to `1`::
>>> mp.dps = 15
>>> chop(quad(lambda z: 1/z, [1,j,-1,-j,1]))
(0.0 + 6.28318530717959j)
**Examples of 2D and 3D integrals**
Here are several nice examples of analytically solvable
2D integrals (taken from MathWorld [1]) that can be evaluated
to high precision fairly rapidly by :func:`~mpmath.quad`::
>>> mp.dps = 30
>>> f = lambda x, y: (x-1)/((1-x*y)*log(x*y))
>>> quad(f, [0, 1], [0, 1])
0.577215664901532860606512090082
>>> +euler
0.577215664901532860606512090082
>>> f = lambda x, y: 1/sqrt(1+x**2+y**2)
>>> quad(f, [-1, 1], [-1, 1])
3.17343648530607134219175646705
>>> 4*log(2+sqrt(3))-2*pi/3
3.17343648530607134219175646705
>>> f = lambda x, y: 1/(1-x**2 * y**2)
>>> quad(f, [0, 1], [0, 1])
1.23370055013616982735431137498
>>> pi**2 / 8
1.23370055013616982735431137498
>>> quad(lambda x, y: 1/(1-x*y), [0, 1], [0, 1])
1.64493406684822643647241516665
>>> pi**2 / 6
1.64493406684822643647241516665
Multiple integrals may be done over infinite ranges::
>>> mp.dps = 15
>>> print(quad(lambda x,y: exp(-x-y), [0, inf], [1, inf]))
0.367879441171442
>>> print(1/e)
0.367879441171442
For nonrectangular areas, one can call :func:`~mpmath.quad` recursively.
For example, we can replicate the earlier example of calculating
`\pi` by integrating over the unit-circle, and actually use double
quadrature to actually measure the area circle::
>>> f = lambda x: quad(lambda y: 1, [-sqrt(1-x**2), sqrt(1-x**2)])
>>> quad(f, [-1, 1])
3.14159265358979
Here is a simple triple integral::
>>> mp.dps = 15
>>> f = lambda x,y,z: x*y/(1+z)
>>> quad(f, [0,1], [0,1], [1,2], method='gauss-legendre')
0.101366277027041
>>> (log(3)-log(2))/4
0.101366277027041
**Singularities**
Both tanh-sinh and Gauss-Legendre quadrature are designed to
integrate smooth (infinitely differentiable) functions. Neither
algorithm copes well with mid-interval singularities (such as
mid-interval discontinuities in `f(x)` or `f'(x)`).
The best solution is to split the integral into parts::
>>> mp.dps = 15
>>> quad(lambda x: abs(sin(x)), [0, 2*pi]) # Bad
3.99900894176779
>>> quad(lambda x: abs(sin(x)), [0, pi, 2*pi]) # Good
4.0
The tanh-sinh rule often works well for integrands having a
singularity at one or both endpoints::
>>> mp.dps = 15
>>> quad(log, [0, 1], method='tanh-sinh') # Good
-1.0
>>> quad(log, [0, 1], method='gauss-legendre') # Bad
-0.999932197413801
However, the result may still be inaccurate for some functions::
>>> quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
1.99999999946942
This problem is not due to the quadrature rule per se, but to
numerical amplification of errors in the nodes. The problem can be
circumvented by temporarily increasing the precision::
>>> mp.dps = 30
>>> a = quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
>>> mp.dps = 15
>>> +a
2.0
**Highly variable functions**
For functions that are smooth (in the sense of being infinitely
differentiable) but contain sharp mid-interval peaks or many
"bumps", :func:`~mpmath.quad` may fail to provide full accuracy. For
example, with default settings, :func:`~mpmath.quad` is able to integrate
`\sin(x)` accurately over an interval of length 100 but not over
length 1000::
>>> quad(sin, [0, 100]); 1-cos(100) # Good
0.137681127712316
0.137681127712316
>>> quad(sin, [0, 1000]); 1-cos(1000) # Bad
-37.8587612408485
0.437620923709297
One solution is to break the integration into 10 intervals of
length 100::
>>> quad(sin, linspace(0, 1000, 10)) # Good
0.437620923709297
Another is to increase the degree of the quadrature::
>>> quad(sin, [0, 1000], maxdegree=10) # Also good
0.437620923709297
Whether splitting the interval or increasing the degree is
more efficient differs from case to case. Another example is the
function `1/(1+x^2)`, which has a sharp peak centered around
`x = 0`::
>>> f = lambda x: 1/(1+x**2)
>>> quad(f, [-100, 100]) # Bad
3.64804647105268
>>> quad(f, [-100, 100], maxdegree=10) # Good
3.12159332021646
>>> quad(f, [-100, 0, 100]) # Also good
3.12159332021646
**References**
1. http://mathworld.wolfram.com/DoubleIntegral.html
"""
rule = kwargs.get('method', 'tanh-sinh')
if type(rule) is str:
if rule == 'tanh-sinh':
rule = ctx._tanh_sinh
elif rule == 'gauss-legendre':
rule = ctx._gauss_legendre
else:
raise ValueError("unknown quadrature rule: %s" % rule)
else:
rule = rule(ctx)
verbose = kwargs.get('verbose')
dim = len(points)
orig = prec = ctx.prec
epsilon = ctx.eps/8
m = kwargs.get('maxdegree') or rule.guess_degree(prec)
points = [ctx._as_points(p) for p in points]
try:
ctx.prec += 20
if dim == 1:
v, err = rule.summation(f, points[0], prec, epsilon, m, verbose)
elif dim == 2:
v, err = rule.summation(lambda x: \
rule.summation(lambda y: f(x,y), \
points[1], prec, epsilon, m)[0],
points[0], prec, epsilon, m, verbose)
elif dim == 3:
v, err = rule.summation(lambda x: \
rule.summation(lambda y: \
rule.summation(lambda z: f(x,y,z), \
points[2], prec, epsilon, m)[0],
points[1], prec, epsilon, m)[0],
points[0], prec, epsilon, m, verbose)
else:
raise NotImplementedError("quadrature must have dim 1, 2 or 3")
finally:
ctx.prec = orig
if kwargs.get("error"):
return +v, err
return +v
def quadts(ctx, *args, **kwargs):
"""
Performs tanh-sinh quadrature. The call
quadts(func, *points, ...)
is simply a shortcut for:
quad(func, *points, ..., method=TanhSinh)
For example, a single integral and a double integral:
quadts(lambda x: exp(cos(x)), [0, 1])
quadts(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
See the documentation for quad for information about how points
arguments and keyword arguments are parsed.
See documentation for TanhSinh for algorithmic information about
tanh-sinh quadrature.
"""
kwargs['method'] = 'tanh-sinh'
return ctx.quad(*args, **kwargs)
def quadgl(ctx, *args, **kwargs):
"""
Performs Gauss-Legendre quadrature. The call
quadgl(func, *points, ...)
is simply a shortcut for:
quad(func, *points, ..., method=GaussLegendre)
For example, a single integral and a double integral:
quadgl(lambda x: exp(cos(x)), [0, 1])
quadgl(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
See the documentation for quad for information about how points
arguments and keyword arguments are parsed.
See documentation for TanhSinh for algorithmic information about
tanh-sinh quadrature.
"""
kwargs['method'] = 'gauss-legendre'
return ctx.quad(*args, **kwargs)
def quadosc(ctx, f, interval, omega=None, period=None, zeros=None):
r"""
Calculates
.. math ::
I = \int_a^b f(x) dx
where at least one of `a` and `b` is infinite and where
`f(x) = g(x) \cos(\omega x + \phi)` for some slowly
decreasing function `g(x)`. With proper input, :func:`~mpmath.quadosc`
can also handle oscillatory integrals where the oscillation
rate is different from a pure sine or cosine wave.
In the standard case when `|a| < \infty, b = \infty`,
:func:`~mpmath.quadosc` works by evaluating the infinite series
.. math ::
I = \int_a^{x_1} f(x) dx +
\sum_{k=1}^{\infty} \int_{x_k}^{x_{k+1}} f(x) dx
where `x_k` are consecutive zeros (alternatively
some other periodic reference point) of `f(x)`.
Accordingly, :func:`~mpmath.quadosc` requires information about the
zeros of `f(x)`. For a periodic function, you can specify
the zeros by either providing the angular frequency `\omega`
(*omega*) or the *period* `2 \pi/\omega`. In general, you can
specify the `n`-th zero by providing the *zeros* arguments.
Below is an example of each::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> f = lambda x: sin(3*x)/(x**2+1)
>>> quadosc(f, [0,inf], omega=3)
0.37833007080198
>>> quadosc(f, [0,inf], period=2*pi/3)
0.37833007080198
>>> quadosc(f, [0,inf], zeros=lambda n: pi*n/3)
0.37833007080198
>>> (ei(3)*exp(-3)-exp(3)*ei(-3))/2 # Computed by Mathematica
0.37833007080198
Note that *zeros* was specified to multiply `n` by the
*half-period*, not the full period. In theory, it does not matter
whether each partial integral is done over a half period or a full
period. However, if done over half-periods, the infinite series
passed to :func:`~mpmath.nsum` becomes an *alternating series* and this
typically makes the extrapolation much more efficient.
Here is an example of an integration over the entire real line,
and a half-infinite integration starting at `-\infty`::
>>> quadosc(lambda x: cos(x)/(1+x**2), [-inf, inf], omega=1)
1.15572734979092
>>> pi/e
1.15572734979092
>>> quadosc(lambda x: cos(x)/x**2, [-inf, -1], period=2*pi)
-0.0844109505595739
>>> cos(1)+si(1)-pi/2
-0.0844109505595738
Of course, the integrand may contain a complex exponential just as
well as a real sine or cosine::
>>> quadosc(lambda x: exp(3*j*x)/(1+x**2), [-inf,inf], omega=3)
(0.156410688228254 + 0.0j)
>>> pi/e**3
0.156410688228254
>>> quadosc(lambda x: exp(3*j*x)/(2+x+x**2), [-inf,inf], omega=3)
(0.00317486988463794 - 0.0447701735209082j)
>>> 2*pi/sqrt(7)/exp(3*(j+sqrt(7))/2)
(0.00317486988463794 - 0.0447701735209082j)
**Non-periodic functions**
If `f(x) = g(x) h(x)` for some function `h(x)` that is not
strictly periodic, *omega* or *period* might not work, and it might
be necessary to use *zeros*.
A notable exception can be made for Bessel functions which, though not
periodic, are "asymptotically periodic" in a sufficiently strong sense
that the sum extrapolation will work out::
>>> quadosc(j0, [0, inf], period=2*pi)
1.0
>>> quadosc(j1, [0, inf], period=2*pi)
1.0
More properly, one should provide the exact Bessel function zeros::
>>> j0zero = lambda n: findroot(j0, pi*(n-0.25))
>>> quadosc(j0, [0, inf], zeros=j0zero)
1.0
For an example where *zeros* becomes necessary, consider the
complete Fresnel integrals
.. math ::
\int_0^{\infty} \cos x^2\,dx = \int_0^{\infty} \sin x^2\,dx
= \sqrt{\frac{\pi}{8}}.
Although the integrands do not decrease in magnitude as
`x \to \infty`, the integrals are convergent since the oscillation
rate increases (causing consecutive periods to asymptotically
cancel out). These integrals are virtually impossible to calculate
to any kind of accuracy using standard quadrature rules. However,
if one provides the correct asymptotic distribution of zeros
(`x_n \sim \sqrt{n}`), :func:`~mpmath.quadosc` works::
>>> mp.dps = 30
>>> f = lambda x: cos(x**2)
>>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
0.626657068657750125603941321203
>>> f = lambda x: sin(x**2)
>>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
0.626657068657750125603941321203
>>> sqrt(pi/8)
0.626657068657750125603941321203
(Interestingly, these integrals can still be evaluated if one
places some other constant than `\pi` in the square root sign.)
In general, if `f(x) \sim g(x) \cos(h(x))`, the zeros follow
the inverse-function distribution `h^{-1}(x)`::
>>> mp.dps = 15
>>> f = lambda x: sin(exp(x))
>>> quadosc(f, [1,inf], zeros=lambda n: log(n))
-0.25024394235267
>>> pi/2-si(e)
-0.250243942352671
**Non-alternating functions**
If the integrand oscillates around a positive value, without
alternating signs, the extrapolation might fail. A simple trick
that sometimes works is to multiply or divide the frequency by 2::
>>> f = lambda x: 1/x**2+sin(x)/x**4
>>> quadosc(f, [1,inf], omega=1) # Bad
1.28642190869861
>>> quadosc(f, [1,inf], omega=0.5) # Perfect
1.28652953559617
>>> 1+(cos(1)+ci(1)+sin(1))/6
1.28652953559617
**Fast decay**
:func:`~mpmath.quadosc` is primarily useful for slowly decaying
integrands. If the integrand decreases exponentially or faster,
:func:`~mpmath.quad` will likely handle it without trouble (and generally be
much faster than :func:`~mpmath.quadosc`)::
>>> quadosc(lambda x: cos(x)/exp(x), [0, inf], omega=1)
0.5
>>> quad(lambda x: cos(x)/exp(x), [0, inf])
0.5
"""
a, b = ctx._as_points(interval)
a = ctx.convert(a)
b = ctx.convert(b)
if [omega, period, zeros].count(None) != 2:
raise ValueError( \
"must specify exactly one of omega, period, zeros")
if a == ctx.ninf and b == ctx.inf:
s1 = ctx.quadosc(f, [a, 0], omega=omega, zeros=zeros, period=period)
s2 = ctx.quadosc(f, [0, b], omega=omega, zeros=zeros, period=period)
return s1 + s2
if a == ctx.ninf:
if zeros:
return ctx.quadosc(lambda x:f(-x), [-b,-a], lambda n: zeros(-n))
else:
return ctx.quadosc(lambda x:f(-x), [-b,-a], omega=omega, period=period)
if b != ctx.inf:
raise ValueError("quadosc requires an infinite integration interval")
if not zeros:
if omega:
period = 2*ctx.pi/omega
zeros = lambda n: n*period/2
#for n in range(1,10):
# p = zeros(n)
# if p > a:
# break
#if n >= 9:
# raise ValueError("zeros do not appear to be correctly indexed")
n = 1
s = ctx.quadgl(f, [a, zeros(n)])
def term(k):
return ctx.quadgl(f, [zeros(k), zeros(k+1)])
s += ctx.nsum(term, [n, ctx.inf])
return s
if __name__ == '__main__':
import doctest
doctest.testmod()
|
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from cloudify import constants, utils
from cloudify.decorators import workflow
from cloudify.plugins import lifecycle
@workflow
def install(ctx, **kwargs):
"""Default install workflow"""
lifecycle.install_node_instances(
graph=ctx.graph_mode(),
node_instances=set(ctx.node_instances))
@workflow
def uninstall(ctx, **kwargs):
"""Default uninstall workflow"""
lifecycle.uninstall_node_instances(
graph=ctx.graph_mode(),
node_instances=set(ctx.node_instances))
@workflow
def auto_heal_reinstall_node_subgraph(
ctx,
node_instance_id,
diagnose_value='Not provided',
**kwargs):
"""Reinstalls the whole subgraph of the system topology
The subgraph consists of all the nodes that are hosted in the
failing node's compute and the compute itself.
Additionally it unlinks and establishes appropriate relationships
:param ctx: cloudify context
:param node_id: failing node's id
:param diagnose_value: diagnosed reason of failure
"""
ctx.logger.info("Starting 'heal' workflow on {0}, Diagnosis: {1}"
.format(node_instance_id, diagnose_value))
failing_node = ctx.get_node_instance(node_instance_id)
failing_node_host = ctx.get_node_instance(
failing_node._node_instance.host_id
)
subgraph_node_instances = failing_node_host.get_contained_subgraph()
intact_nodes = set(ctx.node_instances) - subgraph_node_instances
graph = ctx.graph_mode()
lifecycle.reinstall_node_instances(
graph=graph,
node_instances=subgraph_node_instances,
intact_nodes=intact_nodes)
@workflow
def scale(ctx, node_id, delta, scale_compute, **kwargs):
"""Scales in/out the subgraph of node_id.
If `scale_compute` is set to false, the subgraph will consist of all
the nodes that are contained in `node_id` and `node_id` itself.
If `scale_compute` is set to true, the subgraph will consist of all
nodes that are contained in the compute node that contains `node_id`
and the compute node itself.
If `node_id` is not contained in a compute node and is not a compute node,
this property is ignored.
`delta` is used to specify the scale factor.
For `delta > 0`: If current number of instances is `N`, scale out to
`N + delta`.
For `delta < 0`: If current number of instances is `N`, scale in to
`N - |delta|`.
:param ctx: cloudify context
:param node_id: the node_id to scale
:param delta: scale in/out factor
:param scale_compute: should scale apply on compute node containing
'node_id'
"""
graph = ctx.graph_mode()
node = ctx.get_node(node_id)
if not node:
raise ValueError("Node {0} doesn't exist".format(node_id))
if delta == 0:
ctx.logger.info('delta parameter is 0, so no scaling will take place.')
return
host_node = node.host_node
scaled_node = host_node if (scale_compute and host_node) else node
curr_num_instances = scaled_node.number_of_instances
planned_num_instances = curr_num_instances + delta
if planned_num_instances < 0:
raise ValueError('Provided delta: {0} is illegal. current number of'
'instances of node {1} is {2}'
.format(delta, node_id, curr_num_instances))
modification = ctx.deployment.start_modification({
scaled_node.id: {
'instances': planned_num_instances
# These following parameters are not exposed at the moment,
# but should be used to control which node instances get scaled in
# (when scaling in).
# They are mentioned here, because currently, the modification API
# is not very documented.
# Special care should be taken because if `scale_compute == True`
# (which is the default), then these ids should be the compute node
# instance ids which are not necessarily instances of the node
# specified by `node_id`.
# Node instances denoted by these instance ids should be *kept* if
# possible.
# 'removed_ids_exclude_hint': [],
# Node instances denoted by these instance ids should be *removed*
# if possible.
# 'removed_ids_include_hint': []
}
})
try:
ctx.logger.info('Deployment modification started. '
'[modification_id={0}]'.format(modification.id))
if delta > 0:
added_and_related = set(modification.added.node_instances)
added = set(i for i in added_and_related
if i.modification == 'added')
related = added_and_related - added
try:
lifecycle.install_node_instances(
graph=graph,
node_instances=added,
intact_nodes=related)
except:
ctx.logger.error('Scale out failed, scaling back in.')
for task in graph.tasks_iter():
graph.remove_task(task)
lifecycle.uninstall_node_instances(
graph=graph,
node_instances=added,
intact_nodes=related)
raise
else:
removed_and_related = set(modification.removed.node_instances)
removed = set(i for i in removed_and_related
if i.modification == 'removed')
related = removed_and_related - removed
lifecycle.uninstall_node_instances(
graph=graph,
node_instances=removed,
intact_nodes=related)
except:
ctx.logger.warn('Rolling back deployment modification. '
'[modification_id={0}]'.format(modification.id))
try:
modification.rollback()
except:
ctx.logger.warn('Deployment modification rollback failed. The '
'deployment model is most likely in some corrupted'
' state.'
'[modification_id={0}]'.format(modification.id))
raise
raise
else:
try:
modification.finish()
except:
ctx.logger.warn('Deployment modification finish failed. The '
'deployment model is most likely in some corrupted'
' state.'
'[modification_id={0}]'.format(modification.id))
raise
def _filter_node_instances(ctx, node_ids, node_instance_ids, type_names):
filtered_node_instances = []
for node in ctx.nodes:
if node_ids and node.id not in node_ids:
continue
if type_names and not next((type_name for type_name in type_names if
type_name in node.type_hierarchy), None):
continue
for instance in node.instances:
if node_instance_ids and instance.id not in node_instance_ids:
continue
filtered_node_instances.append(instance)
return filtered_node_instances
def _get_all_host_instances(ctx):
node_instances = set()
for node_instance in ctx.node_instances:
if lifecycle.is_host_node(node_instance):
node_instances.add(node_instance)
return node_instances
@workflow
def install_new_agents(ctx, install_agent_timeout, node_ids,
node_instance_ids, **_):
if node_ids or node_instance_ids:
filtered_node_instances = _filter_node_instances(
ctx=ctx,
node_ids=node_ids,
node_instance_ids=node_instance_ids,
type_names=[])
error = False
for node_instance in filtered_node_instances:
if not lifecycle.is_host_node(node_instance):
msg = 'Node instance {0} is not host.'.format(node_instance.id)
ctx.logger.error(msg)
error = True
elif utils.internal.get_install_method(
node_instance.node.properties) \
== constants.AGENT_INSTALL_METHOD_NONE:
msg = ('Agent should not be installed on '
'node instance {0}').format(node_instance.id)
ctx.logger.error(msg)
error = True
if error:
raise ValueError('Specified filters are not correct.')
else:
hosts = filtered_node_instances
else:
hosts = (host for host in _get_all_host_instances(ctx)
if utils.internal.get_install_method(host.node.properties)
!= constants.AGENT_INSTALL_METHOD_NONE)
graph = ctx.graph_mode()
for host in hosts:
seq = graph.sequence()
seq.add(
host.send_event('Installing new agent.'),
host.execute_operation(
'cloudify.interfaces.cloudify_agent.create_amqp',
kwargs={'install_agent_timeout': install_agent_timeout},
allow_kwargs_override=True),
host.send_event('New agent installed.'),
*lifecycle.prepare_running_agent(host)
)
for subnode in host.get_contained_subgraph():
seq.add(subnode.execute_operation(
'cloudify.interfaces.monitoring.start'))
graph.execute()
@workflow
def execute_operation(ctx, operation, operation_kwargs, allow_kwargs_override,
run_by_dependency_order, type_names, node_ids,
node_instance_ids, **kwargs):
""" A generic workflow for executing arbitrary operations on nodes """
graph = ctx.graph_mode()
subgraphs = {}
# filtering node instances
filtered_node_instances = _filter_node_instances(
ctx=ctx,
node_ids=node_ids,
node_instance_ids=node_instance_ids,
type_names=type_names)
if run_by_dependency_order:
# if run by dependency order is set, then create stub subgraphs for the
# rest of the instances. This is done to support indirect
# dependencies, i.e. when instance A is dependent on instance B
# which is dependent on instance C, where A and C are to be executed
# with the operation on (i.e. they're in filtered_node_instances)
# yet B isn't.
# We add stub subgraphs rather than creating dependencies between A
# and C themselves since even though it may sometimes increase the
# number of dependency relationships in the execution graph, it also
# ensures their number is linear to the number of relationships in
# the deployment (e.g. consider if A and C are one out of N instances
# of their respective nodes yet there's a single instance of B -
# using subgraphs we'll have 2N relationships instead of N^2).
filtered_node_instances_ids = set(inst.id for inst in
filtered_node_instances)
for instance in ctx.node_instances:
if instance.id not in filtered_node_instances_ids:
subgraphs[instance.id] = graph.subgraph(instance.id)
# preparing the parameters to the execute_operation call
exec_op_params = {
'kwargs': operation_kwargs,
'operation': operation
}
if allow_kwargs_override is not None:
exec_op_params['allow_kwargs_override'] = allow_kwargs_override
# registering actual tasks to sequences
for instance in filtered_node_instances:
start_event_message = 'Starting operation {0}'.format(operation)
if operation_kwargs:
start_event_message += ' (Operation parameters: {0})'.format(
operation_kwargs)
subgraph = graph.subgraph(instance.id)
sequence = subgraph.sequence()
sequence.add(
instance.send_event(start_event_message),
instance.execute_operation(**exec_op_params),
instance.send_event('Finished operation {0}'.format(operation)))
subgraphs[instance.id] = subgraph
# adding tasks dependencies if required
if run_by_dependency_order:
for instance in ctx.node_instances:
for rel in instance.relationships:
graph.add_dependency(subgraphs[instance.id],
subgraphs[rel.target_id])
graph.execute()
|
|
# -*- coding: utf-8 -*-
"""
.. _layer_tutorial:
.. currentmodule:: mlens.parallel
Layer Mechanics
===============
ML-Ensemble is designed to provide an easy user interface. But it is also designed
to be extremely flexible, all the wile providing maximum concurrency at minimal
memory consumption. The lower-level API that builds the ensemble and manages the
computations is constructed in as modular a fashion as possible.
The low-level API introduces a computational graph-like environment that you can
directly exploit to gain further control over your ensemble. In fact, building
your ensemble through the low-level API is almost as straight forward as using
the high-level API. In this tutorial, we will walk through how to use the
:class:`Group` and :class:`Layer` classes to fit several learners.
Suppose we want to fit several learners. The :ref:` learner tutorial <learner_tutorial`
showed us how to fit a single learner, and so one approach would be to simple
iterate over our learners and fit them one at a time. This however is a very slow
approach since we don't exploit the fact that learners can be trained in parallel.
Moreover, any type of aggregation, like putting all predictions into an array, would
have to be done manually.
The Layer API
^^^^^^^^^^^^^
To parallelize the implementation, we can use the :class:`Layer` class. A layer is
a handle that will run any number of :class:`Group` instances attached to it in parallel. Each
group in turn is a wrapper around a ``indexer-transformers-estimators`` triplet.
Basics
------
So, to fit our two learners in parallel, we first need a :class:`Group` object to
handle them.
"""
from mlens.parallel import Layer, Group, make_group, run
from mlens.utils.dummy import OLS, Scale
from mlens.index import FoldIndex
indexer = FoldIndex(folds=2)
group = make_group(indexer, [OLS(1), OLS(2)], None)
############################################################################
# This ``group`` object is now a complete description of how to fit our two
# learners using the prescribed indexing method.
#
# To train the estimators, we need feed the group to a :class:`Layer` instance:
import numpy as np
np.random.seed(2)
X = np.arange(20).reshape(10, 2)
y = np.random.rand(10)
layer = Layer(stack=group)
print(
run(layer, 'fit', X, y, return_preds=True)
)
############################################################################
# To use some preprocessing before fitting the estimators, we can use the
# ``transformers`` argument when creating our ``group``:
group = make_group(indexer, [OLS(1), OLS(2)], [Scale()])
layer = Layer(stack=group)
print(
run(layer, 'fit', X, y, return_preds=True)
)
############################################################################
#
# Multitasking
# ------------
#
# If we want our estimators two have different preprocessing, we can easily
# achieve this either by specifying different cases when making the group,
# or by making two separate groups. In the first case:
group = make_group(
indexer,
{'case-1': [OLS(1)], 'case-2': [OLS(2)]},
{'case-1': [Scale()], 'case-2': []}
)
layer = Layer(stack=group)
print(
run(layer, 'fit', X, y, return_preds=True)
)
############################################################################
# In the latter case:
groups = [
make_group(indexer, OLS(1), Scale()), make_group(indexer, OLS(2), None)
]
layer = Layer(stack=groups)
print(
run(layer, 'fit', X, y, return_preds=True)
)
############################################################################
# Which method to prefer depends on the application, but generally, it is
# preferable to put all transformers and all estimators belonging to a
# given indexing strategy into one ``group`` instance as it is easier to
# separate groups based on indexer and using cases to distinguish between
# different preprocessing pipelines.
############################################################################
# Now, suppose we want to do something more exotic, like using different
# indexing strategies for different estimators. This can easily be achieved
# by creating groups for each indexing strategy we want:
groups = [
make_group(FoldIndex(2), OLS(1), Scale()),
make_group(FoldIndex(4), OLS(2), None)
]
layer = Layer(stack=groups)
print(
run(layer, 'fit', X, y, return_preds=True)
)
############################################################################
# Some care needs to be taken here: if indexing strategies do not return the
# same number of rows, the output array will be zero-padded.
from mlens.index import BlendIndex
groups = [
make_group(FoldIndex(2), OLS(1), None),
make_group(BlendIndex(0.5), OLS(1), None)
]
layer = Layer(stack=groups)
print(
run(layer, 'fit', X, y, return_preds=True)
)
############################################################################
# Note that even if ``mlens`` indexer output different shapes, they preserve
# row indexing to ensure predictions are consistently mapped to their respective
# input. If you build a custom indexer, make sure that it uses a strictly
# sequential (with respect to row indexing) partitioning strategy.
############################################################################
#
# Layer features
# --------------
#
# A layer does not have to be specified all in one go; you can instantiate
# a layer and ``push`` and ``pop`` to its ``stack``.
layer = Layer()
group = make_group(FoldIndex(4), OLS(), None)
layer.push(group)
############################################################################
#
# .. note::
#
# If you push or pop to the stack, you must call ``fit`` before you can
# use the layer for prediction.
run(layer, 'fit', X, y)
group = make_group(FoldIndex(2), OLS(1), None)
layer.push(group)
try:
run(layer, 'predict', X, y)
except Exception as exc:
print("Error: %s" % str(exc))
############################################################################
# The :class:`Layer` class can print the progress of a job, as well as inspect
# data collected during the job. Note that the
# printouts of the layer does not take group membership into account.
from mlens.metrics import rmse
layer = Layer()
group1 = make_group(
indexer,
{'case-1': [OLS(1)], 'case-2': [OLS(2)]},
{'case-1': [Scale()], 'case-2': []},
learner_kwargs={'scorer': rmse}
)
layer.push(group1)
run(layer, 'fit', X, y, return_preds=True)
print()
print("Collected data:")
print(layer.data)
|
|
# Copyright (c) 2015 OpenStack Foundation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_db import api as oslo_db_api
from oslo_db import exception as oslo_db_exception
from oslo_log import log
from oslo_utils import excutils
from sqlalchemy import event
from neutron.db import api as db_api
from neutron.db.quota import api as quota_api
from neutron.i18n import _LE
LOG = log.getLogger(__name__)
def _count_resource(context, plugin, collection_name, tenant_id):
count_getter_name = "get_%s_count" % collection_name
# Some plugins support a count method for particular resources,
# using a DB's optimized counting features. We try to use that one
# if present. Otherwise just use regular getter to retrieve all objects
# and count in python, allowing older plugins to still be supported
try:
obj_count_getter = getattr(plugin, count_getter_name)
meh = obj_count_getter(context, filters={'tenant_id': [tenant_id]})
return meh
except (NotImplementedError, AttributeError):
obj_getter = getattr(plugin, "get_%s" % collection_name)
obj_list = obj_getter(context, filters={'tenant_id': [tenant_id]})
return len(obj_list) if obj_list else 0
class BaseResource(object):
"""Describe a single resource for quota checking."""
def __init__(self, name, flag, plural_name=None):
"""Initializes a resource.
:param name: The name of the resource, i.e., "instances".
:param flag: The name of the flag or configuration option
:param plural_name: Plural form of the resource name. If not
specified, it is generated automatically by
appending an 's' to the resource name, unless
it ends with a 'y'. In that case the last
letter is removed, and 'ies' is appended.
Dashes are always converted to underscores.
"""
self.name = name
# If a plural name is not supplied, default to adding an 's' to
# the resource name, unless the resource name ends in 'y', in which
# case remove the 'y' and add 'ies'. Even if the code should not fiddle
# too much with English grammar, this is a rather common and easy to
# implement rule.
if plural_name:
self.plural_name = plural_name
elif self.name[-1] == 'y':
self.plural_name = "%sies" % self.name[:-1]
else:
self.plural_name = "%ss" % self.name
# always convert dashes to underscores
self.plural_name = self.plural_name.replace('-', '_')
self.flag = flag
@property
def default(self):
"""Return the default value of the quota."""
# Any negative value will be interpreted as an infinite quota,
# and stored as -1 for compatibility with current behaviour
value = getattr(cfg.CONF.QUOTAS,
self.flag,
cfg.CONF.QUOTAS.default_quota)
return max(value, -1)
@property
def dirty(self):
"""Return the current state of the Resource instance.
:returns: True if the resource count is out of sync with actual date,
False if it is in sync, and None if the resource instance
does not track usage.
"""
class CountableResource(BaseResource):
"""Describe a resource where the counts are determined by a function."""
def __init__(self, name, count, flag=None, plural_name=None):
"""Initializes a CountableResource.
Countable resources are those resources which directly
correspond to objects in the database, i.e., netowk, subnet,
etc.,. A CountableResource must be constructed with a counting
function, which will be called to determine the current counts
of the resource.
The counting function will be passed the context, along with
the extra positional and keyword arguments that are passed to
Quota.count(). It should return an integer specifying the
count.
:param name: The name of the resource, i.e., "instances".
:param count: A callable which returns the count of the
resource. The arguments passed are as described
above.
:param flag: The name of the flag or configuration option
which specifies the default value of the quota
for this resource.
:param plural_name: Plural form of the resource name. If not
specified, it is generated automatically by
appending an 's' to the resource name, unless
it ends with a 'y'. In that case the last
letter is removed, and 'ies' is appended.
Dashes are always converted to underscores.
"""
super(CountableResource, self).__init__(
name, flag=flag, plural_name=plural_name)
self._count_func = count
def count(self, context, plugin, tenant_id, **kwargs):
return self._count_func(context, plugin, self.plural_name, tenant_id)
class TrackedResource(BaseResource):
"""Resource which keeps track of its usage data."""
def __init__(self, name, model_class, flag, plural_name=None):
"""Initializes an instance for a given resource.
TrackedResource are directly mapped to data model classes.
Resource usage is tracked in the database, and the model class to
which this resource refers is monitored to ensure always "fresh"
usage data are employed when performing quota checks.
This class operates under the assumption that the model class
describing the resource has a tenant identifier attribute.
:param name: The name of the resource, i.e., "networks".
:param model_class: The sqlalchemy model class of the resource for
which this instance is being created
:param flag: The name of the flag or configuration option
which specifies the default value of the quota
for this resource.
:param plural_name: Plural form of the resource name. If not
specified, it is generated automatically by
appending an 's' to the resource name, unless
it ends with a 'y'. In that case the last
letter is removed, and 'ies' is appended.
Dashes are always converted to underscores.
"""
super(TrackedResource, self).__init__(
name, flag=flag, plural_name=plural_name)
# Register events for addition/removal of records in the model class
# As tenant_id is immutable for all Neutron objects there is no need
# to register a listener for update events
self._model_class = model_class
self._dirty_tenants = set()
self._out_of_sync_tenants = set()
@property
def dirty(self):
return self._dirty_tenants
def mark_dirty(self, context):
if not self._dirty_tenants:
return
with db_api.autonested_transaction(context.session):
# It is not necessary to protect this operation with a lock.
# Indeed when this method is called the request has been processed
# and therefore all resources created or deleted.
# dirty_tenants will contain all the tenants for which the
# resource count is changed. The list might contain also tenants
# for which resource count was altered in other requests, but this
# won't be harmful.
dirty_tenants_snap = self._dirty_tenants.copy()
for tenant_id in dirty_tenants_snap:
quota_api.set_quota_usage_dirty(context, self.name, tenant_id)
LOG.debug(("Persisted dirty status for tenant:%(tenant_id)s "
"on resource:%(resource)s"),
{'tenant_id': tenant_id, 'resource': self.name})
self._out_of_sync_tenants |= dirty_tenants_snap
self._dirty_tenants -= dirty_tenants_snap
def _db_event_handler(self, mapper, _conn, target):
try:
tenant_id = target['tenant_id']
except AttributeError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Model class %s does not have a tenant_id "
"attribute"), target)
self._dirty_tenants.add(tenant_id)
# Retry the operation if a duplicate entry exception is raised. This
# can happen is two or more workers are trying to create a resource of a
# give kind for the same tenant concurrently. Retrying the operation will
# ensure that an UPDATE statement is emitted rather than an INSERT one
@oslo_db_api.wrap_db_retry(
max_retries=db_api.MAX_RETRIES,
retry_on_deadlock=True,
exception_checker=lambda exc:
isinstance(exc, oslo_db_exception.DBDuplicateEntry))
def _set_quota_usage(self, context, tenant_id, in_use):
return quota_api.set_quota_usage(
context, self.name, tenant_id, in_use=in_use)
def _resync(self, context, tenant_id, in_use):
# Update quota usage
usage_info = self._set_quota_usage(context, tenant_id, in_use)
self._dirty_tenants.discard(tenant_id)
self._out_of_sync_tenants.discard(tenant_id)
LOG.debug(("Unset dirty status for tenant:%(tenant_id)s on "
"resource:%(resource)s"),
{'tenant_id': tenant_id, 'resource': self.name})
return usage_info
def resync(self, context, tenant_id):
if tenant_id not in self._out_of_sync_tenants:
return
LOG.debug(("Synchronizing usage tracker for tenant:%(tenant_id)s on "
"resource:%(resource)s"),
{'tenant_id': tenant_id, 'resource': self.name})
in_use = context.session.query(self._model_class).filter_by(
tenant_id=tenant_id).count()
# Update quota usage
return self._resync(context, tenant_id, in_use)
def count(self, context, _plugin, tenant_id, resync_usage=True):
"""Return the current usage count for the resource.
This method will fetch aggregate information for resource usage
data, unless usage data are marked as "dirty".
In the latter case resource usage will be calculated counting
rows for tenant_id in the resource's database model.
Active reserved amount are instead always calculated by summing
amounts for matching records in the 'reservations' database model.
The _plugin and _resource parameters are unused but kept for
compatibility with the signature of the count method for
CountableResource instances.
"""
# Load current usage data, setting a row-level lock on the DB
usage_info = quota_api.get_quota_usage_by_resource_and_tenant(
context, self.name, tenant_id, lock_for_update=True)
# Always fetch reservations, as they are not tracked by usage counters
reservations = quota_api.get_reservations_for_resources(
context, tenant_id, [self.name])
reserved = reservations.get(self.name, 0)
# If dirty or missing, calculate actual resource usage querying
# the database and set/create usage info data
# NOTE: this routine "trusts" usage counters at service startup. This
# assumption is generally valid, but if the database is tampered with,
# or if data migrations do not take care of usage counters, the
# assumption will not hold anymore
if (tenant_id in self._dirty_tenants or
not usage_info or usage_info.dirty):
LOG.debug(("Usage tracker for resource:%(resource)s and tenant:"
"%(tenant_id)s is out of sync, need to count used "
"quota"), {'resource': self.name,
'tenant_id': tenant_id})
in_use = context.session.query(self._model_class).filter_by(
tenant_id=tenant_id).count()
# Update quota usage, if requested (by default do not do that, as
# typically one counts before adding a record, and that would mark
# the usage counter as dirty again)
if resync_usage:
usage_info = self._resync(context, tenant_id, in_use)
else:
resource = usage_info.resource if usage_info else self.name
tenant_id = usage_info.tenant_id if usage_info else tenant_id
dirty = usage_info.dirty if usage_info else True
usage_info = quota_api.QuotaUsageInfo(
resource, tenant_id, in_use, dirty)
LOG.debug(("Quota usage for %(resource)s was recalculated. "
"Used quota:%(used)d."),
{'resource': self.name,
'used': usage_info.used})
return usage_info.used + reserved
def register_events(self):
event.listen(self._model_class, 'after_insert', self._db_event_handler)
event.listen(self._model_class, 'after_delete', self._db_event_handler)
def unregister_events(self):
event.remove(self._model_class, 'after_insert', self._db_event_handler)
event.remove(self._model_class, 'after_delete', self._db_event_handler)
|
|
#!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/group -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_group
short_description: Modify, and idempotently manage openshift groups.
description:
- Modify openshift groups programmatically.
options:
state:
description:
- Supported states, present, absent, list
- present - will ensure object is created or updated to the value specified
- list - will return a group
- absent - will remove the group
required: False
default: present
choices: ["present", 'absent', 'list']
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- Name of the object that is being queried.
required: false
default: None
aliases: []
namespace:
description:
- The namespace where the object lives.
required: false
default: str
aliases: []
author:
- "Joel Diaz <[email protected]>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: create group
oc_group:
state: present
name: acme_org
register: group_out
'''
# -*- -*- -*- End included fragment: doc/group -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key']) or {}
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode(), stderr.decode()
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
rval = {}
results = ''
err = None
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
if returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {}})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import yum
yum_base = yum.YumBase()
if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
return True
return False
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/group.py -*- -*- -*-
class GroupConfig(object):
''' Handle route options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
kubeconfig):
''' constructor for handling group options '''
self.kubeconfig = kubeconfig
self.name = sname
self.namespace = namespace
self.data = {}
self.create_dict()
def create_dict(self):
''' return a service as a dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Group'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['users'] = None
# pylint: disable=too-many-instance-attributes
class Group(Yedit):
''' Class to wrap the oc command line tools '''
kind = 'group'
def __init__(self, content):
'''Group constructor'''
super(Group, self).__init__(content=content)
# -*- -*- -*- End included fragment: lib/group.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_group.py -*- -*- -*-
class OCGroup(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'group'
def __init__(self,
config,
verbose=False):
''' Constructor for OCGroup '''
super(OCGroup, self).__init__(config.namespace, config.kubeconfig)
self.config = config
self.namespace = config.namespace
self._group = None
@property
def group(self):
''' property function service'''
if not self._group:
self.get()
return self._group
@group.setter
def group(self, data):
''' setter function for yedit var '''
self._group = data
def exists(self):
''' return whether a group exists '''
if self.group:
return True
return False
def get(self):
'''return group information '''
result = self._get(self.kind, self.config.name)
if result['returncode'] == 0:
self.group = Group(content=result['results'][0])
elif 'groups \"{}\" not found'.format(self.config.name) in result['stderr']:
result['returncode'] = 0
result['results'] = [{}]
return result
def delete(self):
'''delete the object'''
return self._delete(self.kind, self.config.name)
def create(self):
'''create the object'''
return self._create_from_content(self.config.name, self.config.data)
def update(self):
'''update the object'''
return self._replace_content(self.kind, self.config.name, self.config.data)
def needs_update(self):
''' verify an update is needed '''
return not Utils.check_def_equal(self.config.data, self.group.yaml_dict, skip_keys=[], debug=True)
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params, check_mode=False):
'''run the idempotent ansible code'''
gconfig = GroupConfig(params['name'],
params['namespace'],
params['kubeconfig'],
)
oc_group = OCGroup(gconfig, verbose=params['debug'])
state = params['state']
api_rval = oc_group.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval['results'], 'state': state}
########
# Delete
########
if state == 'absent':
if oc_group.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
api_rval = oc_group.delete()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'changed': False, 'state': state}
if state == 'present':
########
# Create
########
if not oc_group.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
# Create it here
api_rval = oc_group.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_group.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
if oc_group.needs_update():
api_rval = oc_group.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_group.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'changed': False, 'results': api_rval, 'state': state}
return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)}
# -*- -*- -*- End included fragment: class/oc_group.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_group.py -*- -*- -*-
#pylint: disable=too-many-branches
def main():
'''
ansible oc module for group
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
name=dict(default=None, type='str'),
namespace=dict(default='default', type='str'),
# addind users to a group is handled through the oc_users module
#users=dict(default=None, type='list'),
),
supports_check_mode=True,
)
rval = OCGroup.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
return module.fail_json(**rval)
return module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_group.py -*- -*- -*-
|
|
"""Provide functionality to interact with the vlc telnet interface."""
from __future__ import annotations
from datetime import datetime
from functools import wraps
from typing import Any, Callable, TypeVar, cast
from aiovlc.client import Client
from aiovlc.exceptions import AuthError, CommandError, ConnectError
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
STATE_IDLE,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
import homeassistant.util.dt as dt_util
from .const import DATA_AVAILABLE, DATA_VLC, DEFAULT_NAME, DEFAULT_PORT, DOMAIN, LOGGER
MAX_VOLUME = 500
SUPPORT_VLC = (
SUPPORT_CLEAR_PLAYLIST
| SUPPORT_NEXT_TRACK
| SUPPORT_PAUSE
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_SEEK
| SUPPORT_SHUFFLE_SET
| SUPPORT_STOP
| SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_SET
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
Func = TypeVar("Func", bound=Callable[..., Any])
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the vlc platform."""
LOGGER.warning(
"Loading VLC media player Telnet integration via platform setup is deprecated; "
"Please remove it from your configuration"
)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=config
)
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the vlc platform."""
# CONF_NAME is only present in imported YAML.
name = entry.data.get(CONF_NAME) or DEFAULT_NAME
vlc = hass.data[DOMAIN][entry.entry_id][DATA_VLC]
available = hass.data[DOMAIN][entry.entry_id][DATA_AVAILABLE]
async_add_entities([VlcDevice(entry, vlc, name, available)], True)
def catch_vlc_errors(func: Func) -> Func:
"""Catch VLC errors."""
@wraps(func)
async def wrapper(self, *args: Any, **kwargs: Any) -> Any:
"""Catch VLC errors and modify availability."""
try:
await func(self, *args, **kwargs)
except CommandError as err:
LOGGER.error("Command error: %s", err)
except ConnectError as err:
# pylint: disable=protected-access
if self._available:
LOGGER.error("Connection error: %s", err)
self._available = False
return cast(Func, wrapper)
class VlcDevice(MediaPlayerEntity):
"""Representation of a vlc player."""
def __init__(
self, config_entry: ConfigEntry, vlc: Client, name: str, available: bool
) -> None:
"""Initialize the vlc device."""
self._config_entry = config_entry
self._name = name
self._volume: float | None = None
self._muted: bool | None = None
self._state: str | None = None
self._media_position_updated_at: datetime | None = None
self._media_position: int | None = None
self._media_duration: int | None = None
self._vlc = vlc
self._available = available
self._volume_bkp = 0.0
self._media_artist: str | None = None
self._media_title: str | None = None
config_entry_id = config_entry.entry_id
self._attr_unique_id = config_entry_id
self._attr_device_info = {
"name": name,
"identifiers": {(DOMAIN, config_entry_id)},
"manufacturer": "VideoLAN",
"entry_type": "service",
}
@catch_vlc_errors
async def async_update(self) -> None:
"""Get the latest details from the device."""
if not self._available:
try:
await self._vlc.connect()
except ConnectError as err:
LOGGER.debug("Connection error: %s", err)
return
try:
await self._vlc.login()
except AuthError:
LOGGER.debug("Failed to login to VLC")
self.hass.async_create_task(
self.hass.config_entries.async_reload(self._config_entry.entry_id)
)
return
self._state = STATE_IDLE
self._available = True
LOGGER.info("Connected to vlc host: %s", self._vlc.host)
status = await self._vlc.status()
LOGGER.debug("Status: %s", status)
self._volume = status.audio_volume / MAX_VOLUME
state = status.state
if state == "playing":
self._state = STATE_PLAYING
elif state == "paused":
self._state = STATE_PAUSED
else:
self._state = STATE_IDLE
if self._state != STATE_IDLE:
self._media_duration = (await self._vlc.get_length()).length
time_output = await self._vlc.get_time()
vlc_position = time_output.time
# Check if current position is stale.
if vlc_position != self._media_position:
self._media_position_updated_at = dt_util.utcnow()
self._media_position = vlc_position
info = await self._vlc.info()
data = info.data
LOGGER.debug("Info data: %s", data)
self._media_artist = data.get(0, {}).get("artist")
self._media_title = data.get(0, {}).get("title")
if not self._media_title:
# Fall back to filename.
if data_info := data.get("data"):
self._media_title = data_info["filename"]
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
@property
def volume_level(self) -> float | None:
"""Volume level of the media player (0..1)."""
return self._volume
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_VLC
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._media_duration
@property
def media_position(self):
"""Position of current playing media in seconds."""
return self._media_position
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
return self._media_position_updated_at
@property
def media_title(self):
"""Title of current playing media."""
return self._media_title
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self._media_artist
@catch_vlc_errors
async def async_media_seek(self, position: float) -> None:
"""Seek the media to a specific location."""
await self._vlc.seek(round(position))
@catch_vlc_errors
async def async_mute_volume(self, mute: bool) -> None:
"""Mute the volume."""
assert self._volume is not None
if mute:
self._volume_bkp = self._volume
await self.async_set_volume_level(0)
else:
await self.async_set_volume_level(self._volume_bkp)
self._muted = mute
@catch_vlc_errors
async def async_set_volume_level(self, volume: float) -> None:
"""Set volume level, range 0..1."""
await self._vlc.set_volume(round(volume * MAX_VOLUME))
self._volume = volume
if self._muted and self._volume > 0:
# This can happen if we were muted and then see a volume_up.
self._muted = False
@catch_vlc_errors
async def async_media_play(self) -> None:
"""Send play command."""
await self._vlc.play()
self._state = STATE_PLAYING
@catch_vlc_errors
async def async_media_pause(self) -> None:
"""Send pause command."""
status = await self._vlc.status()
if status.state != "paused":
# Make sure we're not already paused since VLCTelnet.pause() toggles
# pause.
await self._vlc.pause()
self._state = STATE_PAUSED
@catch_vlc_errors
async def async_media_stop(self) -> None:
"""Send stop command."""
await self._vlc.stop()
self._state = STATE_IDLE
@catch_vlc_errors
async def async_play_media(
self, media_type: str, media_id: str, **kwargs: Any
) -> None:
"""Play media from a URL or file."""
if media_type != MEDIA_TYPE_MUSIC:
LOGGER.error(
"Invalid media type %s. Only %s is supported",
media_type,
MEDIA_TYPE_MUSIC,
)
return
await self._vlc.add(media_id)
self._state = STATE_PLAYING
@catch_vlc_errors
async def async_media_previous_track(self) -> None:
"""Send previous track command."""
await self._vlc.prev()
@catch_vlc_errors
async def async_media_next_track(self) -> None:
"""Send next track command."""
await self._vlc.next()
@catch_vlc_errors
async def async_clear_playlist(self) -> None:
"""Clear players playlist."""
await self._vlc.clear()
@catch_vlc_errors
async def async_set_shuffle(self, shuffle: bool) -> None:
"""Enable/disable shuffle mode."""
shuffle_command = "on" if shuffle else "off"
await self._vlc.random(shuffle_command)
|
|
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
from tests import mock, unittest, BaseEnvVar
import botocore
from botocore.compat import six
from botocore.exceptions import ClientError, WaiterConfigError, WaiterError
from botocore.waiter import Waiter, WaiterModel, SingleWaiterConfig
from botocore.waiter import create_waiter_with_client
from botocore.waiter import NormalizedOperationMethod
from botocore.loaders import Loader
from botocore.model import ServiceModel
class TestWaiterModel(unittest.TestCase):
def setUp(self):
self.boiler_plate_config = {
'description': 'Waiter description',
'operation': 'HeadBucket',
'delay': 5,
'maxAttempts': 20,
}
def create_acceptor_function(self, for_config):
single_waiter = {
'acceptors': [for_config]
}
single_waiter.update(self.boiler_plate_config)
config = SingleWaiterConfig(single_waiter)
return config.acceptors[0].matcher_func
def test_waiter_version(self):
self.assertEqual(WaiterModel({'version': 2, 'waiters': {}}).version, 2)
def test_wont_load_missing_version_in_config(self):
# We only load waiter configs if we know for sure that we're
# loading version 2 of the format.
waiters = {
# Missing the 'version' key.
'waiters': {}
}
with self.assertRaises(WaiterConfigError):
WaiterModel(waiters)
def test_unsupported_waiter_version(self):
waiters = {
'version': 1,
'waiters': {}
}
with self.assertRaises(WaiterConfigError):
WaiterModel(waiters)
def test_waiter_names(self):
waiters = {
'version': 2,
'waiters': {
'BarWaiter': {},
'FooWaiter': {},
}
}
self.assertEqual(WaiterModel(waiters).waiter_names, ['BarWaiter',
'FooWaiter'])
def test_get_single_waiter_config(self):
single_waiter = {
'description': 'Waiter description',
'operation': 'HeadBucket',
'delay': 5,
'maxAttempts': 20,
'acceptors': [
{'state': 'success', 'matcher': 'status', 'expected': 200},
{'state': 'retry', 'matcher': 'status', 'expected': 404},
],
}
waiters = {
'version': 2,
'waiters': {
'BucketExists': single_waiter,
}
}
model = WaiterModel(waiters)
config = model.get_waiter('BucketExists')
self.assertEqual(config.operation, 'HeadBucket')
def test_get_waiter_does_not_exist(self):
waiters = {
'version': 2,
'waiters': {}
}
model = WaiterModel(waiters)
with self.assertRaises(ValueError):
model.get_waiter('UnknownWaiter')
def test_single_waiter_config_attributes(self):
single_waiter = {
'description': 'Waiter description',
'operation': 'HeadBucket',
'delay': 5,
'maxAttempts': 20,
'acceptors': [
],
}
config = SingleWaiterConfig(single_waiter)
self.assertEqual(config.description, 'Waiter description')
self.assertEqual(config.operation, 'HeadBucket')
self.assertEqual(config.delay, 5)
self.assertEqual(config.max_attempts, 20)
def test_single_waiter_acceptors_built_with_matcher_func(self):
# When the list of acceptors are requested, we actually will transform
# them into values that are easier to use.
single_waiter = {
'acceptors': [
{'state': 'success', 'matcher': 'status', 'expected': 200},
],
}
single_waiter.update(self.boiler_plate_config)
config = SingleWaiterConfig(single_waiter)
success_acceptor = config.acceptors[0]
self.assertEqual(success_acceptor.state, 'success')
self.assertEqual(success_acceptor.matcher, 'status')
self.assertEqual(success_acceptor.expected, 200)
self.assertTrue(callable(success_acceptor.matcher_func))
def test_single_waiter_acceptor_matches_jmespath(self):
single_waiter = {
'acceptors': [
{'state': 'success', 'matcher': 'path',
'argument': 'Table.TableStatus', 'expected': 'ACCEPTED'},
],
}
single_waiter.update(self.boiler_plate_config)
config = SingleWaiterConfig(single_waiter)
success_acceptor = config.acceptors[0].matcher_func
# success_acceptor is a callable that takes a response dict and returns
# True or False.
self.assertTrue(
success_acceptor({'Table': {'TableStatus': 'ACCEPTED'}}))
self.assertFalse(
success_acceptor({'Table': {'TableStatus': 'CREATING'}}))
def test_single_waiter_supports_status_code(self):
single_waiter = {
'acceptors': [
{'state': 'success', 'matcher': 'status',
'expected': 200}
],
}
single_waiter.update(self.boiler_plate_config)
config = SingleWaiterConfig(single_waiter)
success_acceptor = config.acceptors[0].matcher_func
self.assertTrue(
success_acceptor({'ResponseMetadata': {'HTTPStatusCode': 200}}))
self.assertFalse(
success_acceptor({'ResponseMetadata': {'HTTPStatusCode': 404}}))
def test_single_waiter_supports_error(self):
single_waiter = {
'acceptors': [
{'state': 'success', 'matcher': 'error',
'expected': 'DoesNotExistError'}
],
}
single_waiter.update(self.boiler_plate_config)
config = SingleWaiterConfig(single_waiter)
success_acceptor = config.acceptors[0].matcher_func
self.assertTrue(
success_acceptor({'Error': {'Code': 'DoesNotExistError'}}))
self.assertFalse(
success_acceptor({'Error': {'Code': 'DoesNotExistErorr'}}))
def test_unknown_matcher(self):
unknown_type = 'arbitrary_type'
single_waiter = {
'acceptors': [
{'state': 'success', 'matcher': unknown_type,
'expected': 'foo'}
]
}
single_waiter.update(self.boiler_plate_config)
config = SingleWaiterConfig(single_waiter)
with self.assertRaises(WaiterConfigError):
config.acceptors
def test_single_waiter_supports_path_all(self):
matches = self.create_acceptor_function(
for_config={'state': 'success', 'matcher': 'pathAll',
'argument': 'Tables[].State', 'expected': 'GOOD'})
self.assertTrue(
matches({'Tables': [{"State": "GOOD"}]}))
self.assertTrue(
matches({'Tables': [{"State": "GOOD"}, {"State": "GOOD"}]}))
def test_single_waiter_supports_path_any(self):
matches = self.create_acceptor_function(
for_config={'state': 'failure', 'matcher': 'pathAny',
'argument': 'Tables[].State', 'expected': 'FAIL'})
self.assertTrue(
matches({'Tables': [{"State": "FAIL"}]}))
self.assertTrue(
matches({'Tables': [{"State": "GOOD"}, {"State": "FAIL"}]}))
def test_waiter_handles_error_responses_with_path_matchers(self):
path_any = self.create_acceptor_function(
for_config={'state': 'success', 'matcher': 'pathAny',
'argument': 'length(Tables) > `0`',
'expected': True})
path_all = self.create_acceptor_function(
for_config={'state': 'success', 'matcher': 'pathAll',
'argument': 'length(Tables) > `0`',
'expected': True})
path = self.create_acceptor_function(
for_config={'state': 'success', 'matcher': 'path',
'argument': 'length(Tables) > `0`',
'expected': True})
self.assertFalse(path_any({'Error': {'Code': 'DoesNotExist'}}))
self.assertFalse(path_all({'Error': {'Code': 'DoesNotExist'}}))
self.assertFalse(path({'Error': {'Code': 'DoesNotExist'}}))
def test_single_waiter_does_not_match_path_all(self):
matches = self.create_acceptor_function(
for_config={'state': 'success', 'matcher': 'pathAll',
'argument': 'Tables[].State', 'expected': 'GOOD'})
self.assertFalse(
matches({'Tables': [{"State": "GOOD"}, {"State": "BAD"}]}))
self.assertFalse(
matches({'Tables': [{"State": "BAD"}, {"State": "GOOD"}]}))
self.assertFalse(
matches({'Tables': [{"State": "BAD"}, {"State": "BAD"}]}))
self.assertFalse(
matches({'Tables': []}))
self.assertFalse(
matches({'Tables': [{"State": "BAD"},
{"State": "BAD"},
{"State": "BAD"},
{"State": "BAD"}]}))
def test_path_all_missing_field(self):
matches = self.create_acceptor_function(
for_config={'state': 'success', 'matcher': 'pathAll',
'argument': 'Tables[].State', 'expected': 'GOOD'})
self.assertFalse(
matches({'Tables': [{"NotState": "GOOD"}, {"NotState": "BAD"}]}))
def test_path_all_matcher_does_not_receive_list(self):
matches = self.create_acceptor_function(
for_config={'state': 'success', 'matcher': 'pathAll',
'argument': 'Tables[].State', 'expected': 'GOOD'})
self.assertFalse(
matches({"NotTables": []}))
def test_single_waiter_supports_all_three_states(self):
single_waiter = {
'acceptors': [
{'state': 'success', 'matcher': 'error',
'expected': 'DoesNotExistError'},
{'state': 'success', 'matcher': 'status',
'expected': 200},
{'state': 'success', 'matcher': 'path',
'argument': 'Foo.Bar', 'expected': 'baz'},
],
}
single_waiter.update(self.boiler_plate_config)
config = SingleWaiterConfig(single_waiter)
acceptors = config.acceptors
# Each acceptors should be able to handle not matching
# any type of response.
matches_nothing = {}
self.assertFalse(acceptors[0].matcher_func(matches_nothing))
self.assertFalse(acceptors[1].matcher_func(matches_nothing))
self.assertFalse(acceptors[2].matcher_func(matches_nothing))
class TestWaitersObjects(unittest.TestCase):
def setUp(self):
pass
def client_responses_are(self, *responses, **kwargs):
operation_method = kwargs['for_operation']
operation_method.side_effect = responses
return operation_method
def create_waiter_config(self, operation='MyOperation',
delay=0, max_attempts=3,
acceptors=None):
if acceptors is None:
# Create some arbitrary acceptor that will never match.
acceptors = [{'state': 'success', 'matcher': 'status',
'expected': 1000}]
waiter_config = {
'operation': operation,
'delay': delay,
'maxAttempts': max_attempts,
'acceptors': acceptors
}
config = SingleWaiterConfig(waiter_config)
return config
def test_waiter_waits_until_acceptor_matches(self):
config = self.create_waiter_config(
max_attempts=3,
acceptors=[{'state': 'success', 'matcher': 'path',
'argument': 'Foo', 'expected': 'SUCCESS'}])
# Simulate the client having two calls that don't
# match followed by a third call that matches the
# acceptor.
operation_method = mock.Mock()
waiter = Waiter('MyWaiter', config, operation_method)
self.client_responses_are(
{'Foo': 'FAILURE'},
{'Foo': 'FAILURE'},
{'Foo': 'SUCCESS'},
for_operation=operation_method
)
waiter.wait()
self.assertEqual(operation_method.call_count, 3)
def test_waiter_matches_with_invalid_error_response(self):
# Verify that the call will not raise WaiterError
# because of 'Error' key in success response.
config = self.create_waiter_config(
max_attempts=3,
acceptors=[{'state': 'success', 'matcher': 'path',
'argument': 'Foo', 'expected': 'SUCCESS'}])
operation_method = mock.Mock()
waiter = Waiter('MyWaiter', config, operation_method)
self.client_responses_are(
{'Foo': 'SUCCESS', 'Error': 'foo'},
for_operation=operation_method
)
waiter.wait()
self.assertEqual(operation_method.call_count, 1)
def test_waiter_never_matches(self):
# Verify that a matcher will fail after max_attempts
# is exceeded.
config = self.create_waiter_config(max_attempts=3)
operation_method = mock.Mock()
self.client_responses_are(
{'Foo': 'FAILURE'},
{'Foo': 'FAILURE'},
{'Foo': 'FAILURE'},
for_operation=operation_method
)
waiter = Waiter('MyWaiter', config, operation_method)
with self.assertRaises(WaiterError):
waiter.wait()
def test_unspecified_errors_stops_waiter(self):
# If a waiter receives an error response, then the
# waiter immediately stops.
config = self.create_waiter_config()
operation_method = mock.Mock()
self.client_responses_are(
# This is an unknown error that's not called out
# in any of the waiter config, so when the
# waiter encounters this response it will transition
# to the failure state.
{'Error': {'Code': 'UnknownError', 'Message': 'bad error'}},
for_operation=operation_method
)
waiter = Waiter('MyWaiter', config, operation_method)
with self.assertRaises(WaiterError):
waiter.wait()
def test_last_response_available_on_waiter_error(self):
last_response = {'Error': {'Code': 'UnknownError', 'Message': 'bad error'}}
config = self.create_waiter_config()
operation_method = mock.Mock()
self.client_responses_are(last_response,
for_operation=operation_method)
waiter = Waiter('MyWaiter', config, operation_method)
with self.assertRaises(WaiterError) as e:
waiter.wait()
self.assertEqual(e.exception.last_response, last_response)
def test_unspecified_errors_propagate_error_code(self):
# If a waiter receives an error response, then the
# waiter should pass along the error code
config = self.create_waiter_config()
operation_method = mock.Mock()
error_code = 'error_message'
error_message = 'error_message'
self.client_responses_are(
# This is an unknown error that's not called out
# in any of the waiter config, so when the
# waiter encounters this response it will transition
# to the failure state.
{'Error': {'Code': error_code, 'Message': error_message}},
for_operation=operation_method
)
waiter = Waiter('MyWaiter', config, operation_method)
with self.assertRaisesRegex(WaiterError, error_message):
waiter.wait()
def _assert_failure_state_error_raised(self, acceptors, responses, expected_msg):
config = self.create_waiter_config(
acceptors=acceptors)
operation_method = mock.Mock()
waiter = Waiter('MyWaiter', config, operation_method)
self.client_responses_are(
*responses,
for_operation=operation_method
)
with self.assertRaisesRegex(WaiterError, expected_msg):
waiter.wait()
def test_waiter_failure_state_error(self):
test_cases = [
(
[
{
'state': 'failure', 'matcher': 'path',
'argument': 'Foo', 'expected': 'FAILURE'
}
],
[{'Foo': 'FAILURE'}],
'FAILURE'
),
(
[
{
'state': 'failure', 'matcher': 'pathAll',
'argument': 'Tables[].State', 'expected': 'FAILURE'
}
],
[{'Tables': [{"State": "FAILURE"}]}],
'FAILURE'
),
(
[
{
'state': 'failure', 'matcher': 'pathAny',
'argument': 'Tables[].State', 'expected': 'FAILURE'
}
],
[{'Tables': [{"State": "FAILURE"}]}],
'FAILURE'
),
(
[{'state': 'failure', 'matcher': 'status', 'expected': 404}],
[{'ResponseMetadata': {'HTTPStatusCode': 404}}],
'404'
),
(
[{'state': 'failure', 'matcher': 'error', 'expected': 'FailError'}],
[{'Error': {'Code': 'FailError', 'Message': 'foo'}}],
'FailError'
),
(
[{'state': 'retry', 'matcher': 'error', 'expected': 'RetryMe'}],
[{'Success': False}]*4,
'Max attempts exceeded'
),
(
[
{'state': 'success', 'matcher': 'status', 'expected': 200},
{'state': 'retry', 'matcher': 'error', 'expected': 'RetryMe'},
],
[
{'Success': False},
{'Error': {'Code': 'RetryMe', 'Message': 'foo'}},
{'Success': False},
{'Success': False},
],
'Previously accepted state'
),
]
for acceptors, responses, expected_msg in test_cases:
self._assert_failure_state_error_raised(acceptors, responses, expected_msg)
def test_waiter_transitions_to_failure_state(self):
acceptors = [
# A success state that will never be hit.
{'state': 'success', 'matcher': 'status', 'expected': 1000},
{'state': 'failure', 'matcher': 'error', 'expected': 'FailError'},
]
config = self.create_waiter_config(acceptors=acceptors)
operation_method = mock.Mock()
self.client_responses_are(
{'Nothing': 'foo'},
# And on the second attempt, a FailError is seen, which
# causes the waiter to fail fast.
{'Error': {'Code': 'FailError', 'Message': 'foo'}},
{'WillNeverGetCalled': True},
for_operation=operation_method
)
waiter = Waiter('MyWaiter', config, operation_method)
with self.assertRaises(WaiterError):
waiter.wait()
# Not only should we raise an exception, but we should have
# only called the operation_method twice because the second
# response triggered a fast fail.
self.assertEqual(operation_method.call_count, 2)
def test_waiter_handles_retry_state(self):
acceptor_with_retry_state = [
{'state': 'success', 'matcher': 'status', 'expected': 200},
{'state': 'retry', 'matcher': 'error', 'expected': 'RetryMe'},
]
config = self.create_waiter_config(
acceptors=acceptor_with_retry_state)
operation_method = mock.Mock()
self.client_responses_are(
{'Nothing': 'foo'},
{'Error': {'Code': 'RetryMe', 'Message': 'foo'}},
{'Success': True,
'ResponseMetadata': {'HTTPStatusCode': 200}},
{'NeverCalled': True},
for_operation=operation_method
)
waiter = Waiter('MyWaiter', config, operation_method)
waiter.wait()
self.assertEqual(operation_method.call_count, 3)
def test_kwargs_are_passed_through(self):
acceptors = [
{'state': 'success', 'matcher': 'error', 'expected': 'MyError'},
]
config = self.create_waiter_config(acceptors=acceptors)
operation_method = mock.Mock()
self.client_responses_are(
{'Error': {'Code': 'MyError'}},
for_operation=operation_method)
waiter = Waiter('MyWaiter', config, operation_method)
waiter.wait(Foo='foo', Bar='bar', Baz='baz')
operation_method.assert_called_with(Foo='foo', Bar='bar',
Baz='baz')
@mock.patch('time.sleep')
def test_waiter_honors_delay_time_between_retries(self, sleep_mock):
delay_time = 5
config = self.create_waiter_config(delay=delay_time)
operation_method = mock.Mock()
self.client_responses_are(
# This is an unknown error that's not called out
# in any of the waiter config, so when the
# waiter encounters this response it will transition
# to the failure state.
{'Success': False},
{'Success': False},
{'Success': False},
for_operation=operation_method
)
waiter = Waiter('MyWaiter', config, operation_method)
with self.assertRaises(WaiterError):
waiter.wait()
# We attempt three times, which means we need to sleep
# twice, once before each subsequent request.
self.assertEqual(sleep_mock.call_count, 2)
sleep_mock.assert_called_with(delay_time)
@mock.patch('time.sleep')
def test_waiter_invocation_config_honors_delay(self, sleep_mock):
config = self.create_waiter_config()
operation_method = mock.Mock()
self.client_responses_are(
{'Success': False},
{'Success': False},
{'Success': False},
for_operation=operation_method
)
waiter = Waiter('MyWaiter', config, operation_method)
custom_delay = 3
with self.assertRaises(WaiterError):
waiter.wait(WaiterConfig={'Delay': custom_delay})
# We attempt three times, which means we need to sleep
# twice, once before each subsequent request.
self.assertEqual(sleep_mock.call_count, 2)
sleep_mock.assert_called_with(custom_delay)
def test_waiter_invocation_config_honors_max_attempts(self):
config = self.create_waiter_config()
operation_method = mock.Mock()
self.client_responses_are(
{'Success': False},
{'Success': False},
for_operation=operation_method
)
waiter = Waiter('MyWaiter', config, operation_method)
custom_max = 2
with self.assertRaises(WaiterError):
waiter.wait(WaiterConfig={'MaxAttempts': custom_max})
self.assertEqual(operation_method.call_count, 2)
class TestCreateWaiter(unittest.TestCase):
def setUp(self):
self.waiter_config = {
'version': 2,
'waiters': {
'WaiterName': {
'operation': 'Foo',
'delay': 1,
'maxAttempts': 1,
'acceptors': [],
},
},
}
self.waiter_model = WaiterModel(self.waiter_config)
self.service_json_model = {
'metadata': {
'serviceFullName': 'Amazon MyService'
},
'operations': {
'Foo': {
'name': 'Foo',
'input': {'shape': 'FooInputOutput'},
'output': {'shape': 'FooInputOutput'}
}
},
'shapes': {
'FooInputOutput': {
'type': 'structure',
'members': {
'bar': {
'shape': 'String',
'documentation': 'Documents bar'
}
}
},
'String': {
'type': 'string'
}
}
}
self.service_model = ServiceModel(self.service_json_model, 'myservice')
self.client = mock.Mock()
self.client.meta.service_model = self.service_model
def test_can_create_waiter_from_client(self):
waiter_name = 'WaiterName'
waiter = create_waiter_with_client(
waiter_name, self.waiter_model, self.client)
self.assertIsInstance(waiter, Waiter)
def test_waiter_class_name(self):
waiter_name = 'WaiterName'
waiter = create_waiter_with_client(
waiter_name, self.waiter_model, self.client)
self.assertEqual(
waiter.__class__.__name__,
'MyService.Waiter.WaiterName'
)
def test_waiter_help_documentation(self):
waiter_name = 'WaiterName'
waiter = create_waiter_with_client(
waiter_name, self.waiter_model, self.client)
with mock.patch('sys.stdout', six.StringIO()) as mock_stdout:
help(waiter.wait)
content = mock_stdout.getvalue()
lines = [
(' Polls :py:meth:`MyService.Client.foo` every 1 '
'seconds until a successful state is reached. An error '
'is returned after 1 failed checks.'),
' **Request Syntax** ',
' ::',
' waiter.wait(',
" bar='string'",
' )',
' :type bar: string',
' :param bar: Documents bar',
' :returns: None',
]
for line in lines:
self.assertIn(line, content)
class TestOperationMethods(unittest.TestCase):
def test_normalized_op_method_makes_call(self):
client_method = mock.Mock()
op = NormalizedOperationMethod(client_method)
op(Foo='a', Bar='b')
client_method.assert_called_with(Foo='a', Bar='b')
def test_normalized_op_returns_error_response(self):
# Client objects normally throw exceptions when an error
# occurs, but we need to return the parsed error response.
client_method = mock.Mock()
op = NormalizedOperationMethod(client_method)
parsed_response = {
'Error': {'Code': 'Foo', 'Message': 'bar'}
}
exception = ClientError(parsed_response, 'OperationName')
client_method.side_effect = exception
actual_response = op(Foo='a', Bar='b')
self.assertEqual(actual_response, parsed_response)
class ServiceWaiterFunctionalTest(BaseEnvVar):
"""
This class is used as a base class if you want to functionally test the
waiters for a specific service.
"""
def setUp(self):
super(ServiceWaiterFunctionalTest, self).setUp()
self.data_path = os.path.join(
os.path.dirname(botocore.__file__), 'data')
self.environ['AWS_DATA_PATH'] = self.data_path
self.loader = Loader([self.data_path])
def get_waiter_model(self, service, api_version=None):
"""Get the waiter model for the service."""
with mock.patch('botocore.loaders.Loader.list_available_services',
return_value=[service]):
return WaiterModel(self.loader.load_service_model(
service, type_name='waiters-2', api_version=api_version))
def get_service_model(self, service, api_version=None):
"""Get the service model for the service."""
with mock.patch('botocore.loaders.Loader.list_available_services',
return_value=[service]):
return ServiceModel(
self.loader.load_service_model(
service, type_name='service-2', api_version=api_version),
service_name=service
)
class CloudFrontWaitersTest(ServiceWaiterFunctionalTest):
def setUp(self):
super(CloudFrontWaitersTest, self).setUp()
self.client = mock.Mock()
self.service = 'cloudfront'
self.old_api_versions = ['2014-05-31']
def assert_distribution_deployed_call_count(self, api_version=None):
waiter_name = 'DistributionDeployed'
waiter_model = self.get_waiter_model(self.service, api_version)
self.client.meta.service_model = self.get_service_model(
self.service, api_version)
self.client.get_distribution.side_effect = [
{'Distribution': {'Status': 'Deployed'}}
]
waiter = create_waiter_with_client(waiter_name, waiter_model,
self.client)
waiter.wait()
self.assertEqual(self.client.get_distribution.call_count, 1)
def assert_invalidation_completed_call_count(self, api_version=None):
waiter_name = 'InvalidationCompleted'
waiter_model = self.get_waiter_model(self.service, api_version)
self.client.meta.service_model = self.get_service_model(
self.service, api_version)
self.client.get_invalidation.side_effect = [
{'Invalidation': {'Status': 'Completed'}}
]
waiter = create_waiter_with_client(waiter_name, waiter_model,
self.client)
waiter.wait()
self.assertEqual(self.client.get_invalidation.call_count, 1)
def assert_streaming_distribution_deployed_call_count(
self, api_version=None):
waiter_name = 'StreamingDistributionDeployed'
waiter_model = self.get_waiter_model(self.service, api_version)
self.client.meta.service_model = self.get_service_model(
self.service, api_version)
self.client.get_streaming_distribution.side_effect = [
{'StreamingDistribution': {'Status': 'Deployed'}}
]
waiter = create_waiter_with_client(waiter_name, waiter_model,
self.client)
waiter.wait()
self.assertEqual(self.client.get_streaming_distribution.call_count, 1)
def test_distribution_deployed(self):
# Test the latest version.
self.assert_distribution_deployed_call_count()
self.client.reset_mock()
# Test previous api versions.
for api_version in self.old_api_versions:
self.assert_distribution_deployed_call_count(api_version)
self.client.reset_mock()
def test_invalidation_completed(self):
# Test the latest version.
self.assert_invalidation_completed_call_count()
self.client.reset_mock()
# Test previous api versions.
for api_version in self.old_api_versions:
self.assert_invalidation_completed_call_count(api_version)
self.client.reset_mock()
def test_streaming_distribution_deployed(self):
# Test the latest version.
self.assert_streaming_distribution_deployed_call_count()
self.client.reset_mock()
# Test previous api versions.
for api_version in self.old_api_versions:
self.assert_streaming_distribution_deployed_call_count(api_version)
self.client.reset_mock()
|
|
import os
import re
import sys
## used for nt reverse complements
_nt_comp_table = bytes.maketrans(b'ACBDGHKMNSRUTWVYacbdghkmnsrutwvy', \
b'TGVHCDMKNSYAAWBRtgvhcdmknsyaawbr')
# for translations: http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi
_translation_table = {
1: {
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R', 'AGA':'R', 'AGG':'R',
'AAC':'N', 'AAT':'N',
'GAC':'D', 'GAT':'D',
'TGC':'C', 'TGT':'C',
'GAA':'E', 'GAG':'E',
'CAA':'Q', 'CAG':'Q',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'CAC':'H', 'CAT':'H',
'ATA':'I', 'ATC':'I', 'ATT':'I',
'TTA':'L', 'TTG':'L', 'CTA':'L', 'CTC':'L', 'CTG':'L','CTT':'L',
'AAA':'K', 'AAG':'K',
'ATG':'M',
'TTC':'F', 'TTT':'F',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S', 'AGC':'S', 'AGT':'S',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'TGG':'W',
'TAC':'Y', 'TAT':'Y',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'TAA':'*', 'TAG':'*', 'TGA':'*'
}
}
def translate( seq, translation_table=None ):
"""
Does a direct translation of the passed DNA/RNA sequence in phase 0.
You can pass a numeric translation table, else 1 is assumed.
An 'X' is used when a codon is unknown (contains an N)
http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi
"""
if translation_table is None:
translation_table = 1
# make sure we've defined this translation table
if translation_table not in _nt_comp_table:
raise Exception("ERROR: translation requested using table {0}, which isn't yet supported.".format(translation_table))
trans_table = _translation_table[translation_table]
# In case an RNA string was passed
seq = seq.translate(seq.maketrans('Uutagc', 'TTTAGC'))
polypeptide_seq = ''
x = 0
while True:
try:
polypeptide_seq += trans_table[seq[x:x+3]]
except (IndexError):
break
except (KeyError):
if len(seq[x:x+3]) == 3:
#raise Exception("ERROR: Encountered unknown codon during translation: {0}".format(seq[x:x+3]))
print("WARN: Encountered unknown codon during translation: {0}".format(seq[x:x+3]))
polypeptide_seq += 'X'
else:
break
x += 3
return polypeptide_seq
def read_list_file( file ):
"""Parse an list file and return an array of the paths"""
files = []
if ( not os.path.isfile(file) ):
raise Exception("Couldn't find file: " + file)
## only do non-blank lines
with open(file) as f_in:
lines = filter(None, (line.rstrip() for line in f_in))
for line in lines:
files.append(line)
return files
def reverse_complement( seq ):
"""
Biological reverse complementation. Case in sequences are retained, and
IUPAC codes are supported. Code modified from:
http://shootout.alioth.debian.org/u32/program.php?test=revcomp&lang=python3&id=4
"""
return seq.translate(_nt_comp_table)[::-1]
def humancoords_to_0interbase( start, stop ):
"""
The typical human-readable coordinate system, such as found in GBK flat files,
has a start and stop coordinate only. They are 1-based, on-base coordinates
and features on a reverse strand are indicated by having start > stop. This
transforms them into the GMOD standard 0-based inter-base coordinates.
Returns a list of fmin, fmax and strand values.
"""
fmin = start
fmax = stop
strand = 1
if ( stop < start ):
fmin = stop
fmax = start
strand = -1
fmin -= 1
return (fmin, fmax, strand)
def interbase0_to_humancoords( fmin, fmax, strand ):
"""
Takes GMOD-standard 0-based inter-base coordinates and transforms them
into the typical human-readable coordinates used in places like GBK flat
files. Features on the reverse strand have a larger start than stop.
The strand values can be:
+ or 1 for forward features
- or -1 for reverse features
Returns a list: [start, stop] where start < stop if on forward strand.
"""
if strand == '+' or strand == 1:
return (fmin + 1, fmax)
elif strand == '-' or strand == -1:
return (fmax, fmin + 1)
else:
raise Exception("Invalid strand specified ({0}). Expected +, -, 1 or -1".format(strand))
def fasta_dict_from_file( file ):
"""
Reads a file of FASTA entries and returns a dict where each key is a sequence ID.
The value is another dict with two keys 'h' for header and 's' for sequence. The
header is all the other text after the id in the original FASTA header. The
sequence has all whitespace removed. Obviously this should only be used on files
where memory to load them isn't an issue.
"""
seqs = dict()
current_seq = ''
current_id = None
current_header = None
for line in open(file):
line = line.rstrip()
m = re.search('>(\S+)\s*(.*)', line)
if m:
## new residue line matched, purge the existing one, if not the first
if current_id is not None:
## warn if it has already been found
if current_id in seqs:
sys.stderr.write("WARN: Duplicate ID ({0}) found. Only last one kept.\n".format(current_id))
## remove all whitespace and save
current_seq = ''.join(current_seq.split())
seqs[current_id] = {'h':current_header, 's':current_seq}
current_seq = ''
current_id = m.group(1)
current_header = m.group(2)
else:
## python 2.6+ makes string concatenation amortized O(n)
## http://stackoverflow.com/a/4435752/1368079
current_seq += str(line)
## don't forget the last one
current_seq = ''.join(current_seq.split())
seqs[current_id] = {'h':current_header, 's':current_seq}
return seqs
def fasta_sizes_from_file(file):
"""
Reads a file of FASTA entries and returns a dict where each key is a sequence ID and
the value is just the size of each sequence. This is as almost as computationally
intensive as fasta_dict_from_file() but takes less memory and is appropriate when you
only care about the residue lengths.
"""
seqs = dict()
current_id = None
for line in open(file):
line = line.rstrip()
m = re.search('>(\S+)\s*(.*)', line)
if m:
## new residue line matched, set the new seq ID
current_id = m.group(1)
seqs[current_id] = 0
else:
seqs[current_id] += len(line)
return seqs
def add_assembly_fasta(mols, fasta_file):
"""
Takes a collection of molecules (Assembly) objects and adds sequence residues based on
their IDs being found in the passed FASTA file.
"""
fasta_seqs = fasta_dict_from_file(fasta_file)
for mol_id in mols:
# check if the FASTA file provides sequence for this
if mol_id in fasta_seqs:
mol = mols[mol_id]
mol.residues = fasta_seqs[mol_id]['s']
mol.length = len(mol.residues)
def wrapped_fasta(string, every=60):
"""
Pass a string of residues (nucleotide or polypeptides) that has NO whitespace and
this will return another string with new line characters inserted every N residues.
N is specified with the 'every' parameter with a default of 60. For example:
new_fasta = wrapped_fasta(some_string, every=60)
This runs orders of magnitude faster than using the textwrap module.
"""
return '\n'.join(string[i:i+every] for i in range(0, len(string), every))
|
|
"""
(C) 2014-2019 Roman Sirokov and contributors
Licensed under BSD license
http://github.com/r0x0r/pywebview/
"""
import sys
import logging
import json
import webbrowser
try:
from urllib.parse import unquote
except ImportError:
from urllib import unquote
from uuid import uuid1
from threading import Event, Semaphore
from webview import _debug, _user_agent, OPEN_DIALOG, FOLDER_DIALOG, SAVE_DIALOG, parse_file_type, escape_string, windows
from webview.util import parse_api_js, default_html, js_bridge_call
from webview.js.css import disable_text_select
from webview.screen import Screen
from webview.window import FixPoint
logger = logging.getLogger('pywebview')
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
gi.require_version('WebKit2', '4.0')
from gi.repository import Gtk as gtk
from gi.repository import Gdk
from gi.repository import GLib as glib
from gi.repository import WebKit2 as webkit
# version of WebKit2 older than 2.2 does not support returning a result of javascript, so we
# have to resort fetching a result via window title
webkit_ver = webkit.get_major_version(), webkit.get_minor_version(), webkit.get_micro_version()
old_webkit = webkit_ver[0] < 2 or webkit_ver[1] < 22
renderer = 'gtkwebkit2'
settings = {}
class BrowserView:
instances = {}
class JSBridge:
def __init__(self, window):
self.window = window
self.uid = uuid1().hex[:8]
def call(self, func_name, param, value_id):
if param == 'undefined':
param = None
return js_bridge_call(self.window, func_name, param, value_id)
def __init__(self, window):
BrowserView.instances[window.uid] = self
self.uid = window.uid
self.pywebview_window = window
self.is_fullscreen = False
self.js_results = {}
glib.threads_init()
self.window = gtk.Window(title=window.title)
self.shown = window.events.shown
self.loaded = window.events.loaded
self.localization = window.localization
self._last_width = window.initial_width
self._last_height = window.initial_height
if window.resizable:
self.window.set_size_request(window.min_size[0], window.min_size[1])
self.window.resize(window.initial_width, window.initial_height)
else:
self.window.set_size_request(window.initial_width, window.initial_height)
if window.minimized:
self.window.iconify()
if window.initial_x is not None and window.initial_y is not None:
self.move(window.initial_x, window.initial_y)
else:
self.window.set_position(gtk.WindowPosition.CENTER)
self.window.set_resizable(window.resizable)
# Set window background color
style_provider = gtk.CssProvider()
style_provider.load_from_data(
'GtkWindow {{ background-color: {}; }}'.format(window.background_color).encode()
)
gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(),
style_provider,
gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
scrolled_window = gtk.ScrolledWindow()
self.window.add(scrolled_window)
if window.confirm_close:
self.window.connect('delete-event', self.on_destroy)
else:
self.window.connect('delete-event', self.close_window)
self.window.connect('window-state-event', self.on_window_state_change)
self.window.connect('size-allocate', self.on_window_resize)
self.js_bridge = BrowserView.JSBridge(window)
self.text_select = window.text_select
self.webview = webkit.WebView()
self.webview.connect('notify::visible', self.on_webview_ready)
self.webview.connect('load_changed', self.on_load_finish)
self.webview.connect('notify::title', self.on_title_change)
self.webview.connect('decide-policy', self.on_navigation)
user_agent = settings.get('user_agent') or _user_agent
if user_agent:
self.webview.get_settings().props.user_agent = user_agent
if window.frameless:
self.window.set_decorated(False)
if window.easy_drag:
self.move_progress = False
self.webview.connect('button-release-event', self.on_mouse_release)
self.webview.connect('button-press-event', self.on_mouse_press)
self.window.connect('motion-notify-event', self.on_mouse_move)
if window.on_top:
self.window.set_keep_above(True)
self.transparent = window.transparent
if window.transparent:
configure_transparency(self.window)
configure_transparency(self.webview)
wvbg = self.webview.get_background_color()
wvbg.alpha = 0.0
self.webview.set_background_color(wvbg)
if _debug['mode']:
self.webview.get_settings().props.enable_developer_extras = True
else:
self.webview.connect('context-menu', lambda a,b,c,d: True) # Disable context menu
self.webview.set_opacity(0.0)
scrolled_window.add(self.webview)
if window.real_url is not None:
self.webview.load_uri(window.real_url)
elif window.html:
self.webview.load_html(window.html, '')
else:
self.webview.load_html(default_html, '')
if window.fullscreen:
self.toggle_fullscreen()
def close_window(self, *data):
should_cancel = self.pywebview_window.events.closing.set()
if should_cancel:
return
for res in self.js_results.values():
res['semaphore'].release()
while gtk.events_pending():
gtk.main_iteration()
self.window.destroy()
del BrowserView.instances[self.uid]
if self.pywebview_window in windows:
windows.remove(self.pywebview_window)
self.pywebview_window.events.closed.set()
if BrowserView.instances == {}:
gtk.main_quit()
def on_destroy(self, widget=None, *data):
dialog = gtk.MessageDialog(parent=self.window, flags=gtk.DialogFlags.MODAL & gtk.DialogFlags.DESTROY_WITH_PARENT,
type=gtk.MessageType.QUESTION, buttons=gtk.ButtonsType.OK_CANCEL,
message_format=self.localization['global.quitConfirmation'])
result = dialog.run()
if result == gtk.ResponseType.OK:
self.close_window()
dialog.destroy()
return True
def on_window_state_change(self, window, window_state):
if window_state.changed_mask == Gdk.WindowState.ICONIFIED:
if Gdk.WindowState.ICONIFIED & window_state.new_window_state == Gdk.WindowState.ICONIFIED:
self.pywebview_window.events.minimized.set()
else:
self.pywebview_window.events.restored.set()
elif window_state.changed_mask == Gdk.WindowState.MAXIMIZED:
if Gdk.WindowState.MAXIMIZED & window_state.new_window_state == Gdk.WindowState.MAXIMIZED:
self.pywebview_window.events.maximized.set()
else:
self.pywebview_window.events.restored.set()
def on_window_resize(self, window, allocation):
if allocation.width != self._last_width or allocation.height != self._last_height:
self._last_width = allocation.width
self._last_height = allocation.height
self.pywebview_window.events.resized.set(allocation.width, allocation.height)
def on_webview_ready(self, arg1, arg2):
# in webkit2 notify:visible fires after the window was closed and BrowserView object destroyed.
# for a lack of better solution we check that BrowserView has 'webview_ready' attribute
if 'shown' in dir(self):
self.shown.set()
def on_load_finish(self, webview, status):
# Show the webview if it's not already visible
if not webview.props.opacity:
glib.idle_add(webview.set_opacity, 1.0)
if status == webkit.LoadEvent.FINISHED:
if not self.text_select:
webview.run_javascript(disable_text_select)
self._set_js_api()
def on_title_change(self, webview, title):
title = webview.get_title()
try:
js_data = json.loads(title)
if 'type' not in js_data:
return
elif js_data['type'] == 'eval' and old_webkit: # return result of evaluate_js
unique_id = js_data['uid']
result = js_data['result'] if 'result' in js_data else None
js = self.js_results[unique_id]
js['result'] = result
js['semaphore'].release()
elif js_data['type'] == 'invoke': # invoke js api's function
func_name = js_data['function']
value_id = js_data['id']
param = js_data['param'] if 'param' in js_data else None
return_val = self.js_bridge.call(func_name, param, value_id)
# Give back the return value to JS as a string
code = 'pywebview._bridge.return_val = "{0}";'.format(escape_string(str(return_val)))
webview.run_javascript(code)
except ValueError: # Python 2
return
except json.JSONDecodeError: # Python 3
return
def on_navigation(self, webview, decision, decision_type):
if type(decision) == webkit.NavigationPolicyDecision:
uri = decision.get_request().get_uri()
if decision.get_frame_name() == '_blank':
webbrowser.open(uri, 2, True)
decision.ignore()
def on_mouse_release(self, sender, event):
self.move_progress = False
def on_mouse_press(self, _, event):
self.point_diff = [x - y for x, y in zip(self.window.get_position(), [event.x_root, event.y_root])]
self.move_progress = True
def on_mouse_move(self, _, event):
if self.move_progress:
point = [x + y for x, y in zip((event.x_root, event.y_root), self.point_diff)]
self.window.move(point[0], point[1])
def show(self):
self.window.show_all()
if gtk.main_level() == 0:
if self.pywebview_window.hidden:
self.window.hide()
gtk.main()
else:
glib.idle_add(self.window.show_all)
def hide(self):
glib.idle_add(self.window.hide)
def destroy(self):
self.window.emit('delete-event', Gdk.Event())
def set_title(self, title):
self.window.set_title(title)
def toggle_fullscreen(self):
if self.is_fullscreen:
self.window.unfullscreen()
else:
self.window.fullscreen()
self.is_fullscreen = not self.is_fullscreen
def resize(self, width, height, fix_point):
if fix_point & FixPoint.NORTH and fix_point & FixPoint.WEST:
self.window.set_gravity(Gdk.Gravity.NORTH_WEST)
elif fix_point & FixPoint.NORTH and fix_point & FixPoint.EAST:
self.window.set_gravity(Gdk.Gravity.NORTH_EAST)
elif fix_point & FixPoint.SOUTH and fix_point & FixPoint.EAST:
self.window.set_gravity(Gdk.Gravity.SOUTH_EAST)
elif fix_point & FixPoint.SOUTH and fix_point & FixPoint.WEST:
self.window.set_gravity(Gdk.Gravity.SOUTH_WEST)
elif fix_point & FixPoint.SOUTH:
self.window.set_gravity(Gdk.Gravity.SOUTH)
elif fix_point & FixPoint.NORTH:
self.window.set_gravity(Gdk.Gravity.NORTH)
elif fix_point & FixPoint.WEST:
self.window.set_gravity(Gdk.Gravity.WEST)
elif fix_point & FixPoint.EAST:
self.window.set_gravity(Gdk.Gravity.EAST)
self.window.resize(width, height)
def move(self, x, y):
self.window.move(x, y)
def minimize(self):
glib.idle_add(self.window.iconify)
def restore(self):
def _restore():
self.window.deiconify()
self.window.present()
glib.idle_add(_restore)
def create_file_dialog(self, dialog_type, directory, allow_multiple, save_filename, file_types):
if dialog_type == FOLDER_DIALOG:
gtk_dialog_type = gtk.FileChooserAction.SELECT_FOLDER
title = self.localization['linux.openFolder']
button = gtk.STOCK_OPEN
elif dialog_type == OPEN_DIALOG:
gtk_dialog_type = gtk.FileChooserAction.OPEN
if allow_multiple:
title = self.localization['linux.openFiles']
else:
title = self.localization['linux.openFile']
button = gtk.STOCK_OPEN
elif dialog_type == SAVE_DIALOG:
gtk_dialog_type = gtk.FileChooserAction.SAVE
title = self.localization['global.saveFile']
button = gtk.STOCK_SAVE
dialog = gtk.FileChooserDialog(title, self.window, gtk_dialog_type,
(gtk.STOCK_CANCEL, gtk.ResponseType.CANCEL, button, gtk.ResponseType.OK))
dialog.set_select_multiple(allow_multiple)
dialog.set_current_folder(directory)
self._add_file_filters(dialog, file_types)
if dialog_type == SAVE_DIALOG:
dialog.set_current_name(save_filename)
response = dialog.run()
if response == gtk.ResponseType.OK:
if dialog_type == SAVE_DIALOG:
file_name = dialog.get_filename()
else:
file_name = dialog.get_filenames()
else:
file_name = None
dialog.destroy()
return file_name
def _add_file_filters(self, dialog, file_types):
for s in file_types:
description, extensions = parse_file_type(s)
f = gtk.FileFilter()
f.set_name(description)
for e in extensions.split(';'):
f.add_pattern(e)
dialog.add_filter(f)
def get_current_url(self):
self.loaded.wait()
uri = self.webview.get_uri()
return uri if uri != 'about:blank' else None
def load_url(self, url):
self.loaded.clear()
self.webview.load_uri(url)
def load_html(self, content, base_uri):
self.loaded.clear()
self.webview.load_html(content, base_uri)
def evaluate_js(self, script):
def _evaluate_js():
callback = None if old_webkit else _callback
self.webview.run_javascript(script, None, callback, None)
def _callback(webview, task, data):
value = webview.run_javascript_finish(task)
result = value.get_js_value().to_string() if value else None
if unique_id in self.js_results:
self.js_results[unique_id]['result'] = result
result_semaphore.release()
unique_id = uuid1().hex
result_semaphore = Semaphore(0)
self.js_results[unique_id] = {'semaphore': result_semaphore, 'result': None}
if old_webkit:
script = 'document.title = JSON.stringify({{"type": "eval", "uid": "{0}", "result": {1}}})'.format(unique_id, script)
self.loaded.wait()
glib.idle_add(_evaluate_js)
result_semaphore.acquire()
if not gtk.main_level():
# Webview has been closed, don't proceed
return None
result = self.js_results[unique_id]['result']
result = None if result == 'undefined' or result == 'null' or result is None else result if result == '' else json.loads(result)
del self.js_results[unique_id]
return result
def _set_js_api(self):
def create_bridge():
self.webview.run_javascript(parse_api_js(self.js_bridge.window, 'gtk', uid=self.js_bridge.uid))
self.loaded.set()
glib.idle_add(create_bridge)
def create_window(window):
def create():
browser = BrowserView(window)
browser.show()
if window.uid == 'master':
create()
else:
glib.idle_add(create)
def set_title(title, uid):
def _set_title():
BrowserView.instances[uid].set_title(title)
glib.idle_add(_set_title)
def destroy_window(uid):
def _destroy_window():
BrowserView.instances[uid].close_window()
glib.idle_add(_destroy_window)
def toggle_fullscreen(uid):
def _toggle_fullscreen():
BrowserView.instances[uid].toggle_fullscreen()
glib.idle_add(_toggle_fullscreen)
def set_on_top(uid, top):
def _set_on_top():
BrowserView.instances[uid].window.set_keep_above(top)
glib.idle_add(_set_on_top)
def resize(width, height, uid, fix_point):
def _resize():
BrowserView.instances[uid].resize(width, height, fix_point)
glib.idle_add(_resize)
def move(x, y, uid):
def _move():
BrowserView.instances[uid].move(x, y)
glib.idle_add(_move)
def hide(uid):
glib.idle_add(BrowserView.instances[uid].hide)
def show(uid):
glib.idle_add(BrowserView.instances[uid].show)
def minimize(uid):
glib.idle_add(BrowserView.instances[uid].minimize)
def restore(uid):
glib.idle_add(BrowserView.instances[uid].restore)
def get_current_url(uid):
def _get_current_url():
result['url'] = BrowserView.instances[uid].get_current_url()
semaphore.release()
result = {}
semaphore = Semaphore(0)
glib.idle_add(_get_current_url)
semaphore.acquire()
return result['url']
def load_url(url, uid):
def _load_url():
BrowserView.instances[uid].load_url(url)
glib.idle_add(_load_url)
def load_html(content, base_uri, uid):
def _load_html():
BrowserView.instances[uid].load_html(content, base_uri)
glib.idle_add(_load_html)
def create_file_dialog(dialog_type, directory, allow_multiple, save_filename, file_types, uid):
i = BrowserView.instances[uid]
file_name_semaphore = Semaphore(0)
file_names = []
def _create():
result = i.create_file_dialog(dialog_type, directory, allow_multiple, save_filename, file_types)
if result is None:
file_names.append(None)
else:
file_names.append(tuple(result))
file_name_semaphore.release()
glib.idle_add(_create)
file_name_semaphore.acquire()
return file_names[0]
def evaluate_js(script, uid):
return BrowserView.instances[uid].evaluate_js(script)
def get_position(uid):
def _get_position():
result['position'] = BrowserView.instances[uid].window.get_position()
semaphore.release()
result = {}
semaphore = Semaphore(0)
glib.idle_add(_get_position)
semaphore.acquire()
return result['position']
def get_size(uid):
def _get_size():
result['size'] = BrowserView.instances[uid].window.get_size()
semaphore.release()
result = {}
semaphore = Semaphore(0)
glib.idle_add(_get_size)
semaphore.acquire()
return result['size']
def get_screens():
screen = Gdk.Screen.get_default()
n = screen.get_n_monitors()
geometries = [screen.get_monitor_geometry(i) for i in range(n)]
screens = [Screen(geom.width, geom.height) for geom in geometries]
return screens
def configure_transparency(c):
c.set_visual(c.get_screen().get_rgba_visual())
c.override_background_color(gtk.StateFlags.ACTIVE, Gdk.RGBA(0, 0, 0, 0))
c.override_background_color(gtk.StateFlags.BACKDROP, Gdk.RGBA(0, 0, 0, 0))
c.override_background_color(gtk.StateFlags.DIR_LTR, Gdk.RGBA(0, 0, 0, 0))
c.override_background_color(gtk.StateFlags.DIR_RTL, Gdk.RGBA(0, 0, 0, 0))
c.override_background_color(gtk.StateFlags.FOCUSED, Gdk.RGBA(0, 0, 0, 0))
c.override_background_color(gtk.StateFlags.INCONSISTENT, Gdk.RGBA(0, 0, 0, 0))
c.override_background_color(gtk.StateFlags.INSENSITIVE, Gdk.RGBA(0, 0, 0, 0))
c.override_background_color(gtk.StateFlags.NORMAL, Gdk.RGBA(0, 0, 0, 0))
c.override_background_color(gtk.StateFlags.PRELIGHT, Gdk.RGBA(0, 0, 0, 0))
c.override_background_color(gtk.StateFlags.SELECTED, Gdk.RGBA(0, 0, 0, 0))
transparentWindowStyleProvider = gtk.CssProvider()
transparentWindowStyleProvider.load_from_data(b"""
GtkWindow {
background-color:rgba(0,0,0,0);
background-image:none;
}""")
c.get_style_context().add_provider(transparentWindowStyleProvider, gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
|
|
# -*- coding: utf-8 -*-
"""
httpbin.core
~~~~~~~~~~~~
This module provides the core HttpBin experience.
"""
import base64
import json
import os
import random
import time
import uuid
import argparse
from flask import Flask, Response, request, render_template, redirect, jsonify as flask_jsonify, make_response, url_for
from werkzeug.datastructures import WWWAuthenticate, MultiDict
from werkzeug.http import http_date
from werkzeug.wrappers import BaseResponse
from six.moves import range as xrange
from . import filters
from .helpers import get_headers, status_code, get_dict, get_request_range, check_basic_auth, check_digest_auth, secure_cookie, H, ROBOT_TXT, ANGRY_ASCII
from .utils import weighted_choice
from .structures import CaseInsensitiveDict
ENV_COOKIES = (
'_gauges_unique',
'_gauges_unique_year',
'_gauges_unique_month',
'_gauges_unique_day',
'_gauges_unique_hour',
'__utmz',
'__utma',
'__utmb'
)
def jsonify(*args, **kwargs):
response = flask_jsonify(*args, **kwargs)
if not response.data.endswith(b'\n'):
response.data += b'\n'
return response
# Prevent WSGI from correcting the casing of the Location header
BaseResponse.autocorrect_location_header = False
# Find the correct template folder when running from a different location
tmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
app = Flask(__name__, template_folder=tmpl_dir)
# Set up Bugsnag exception tracking, if desired. To use Bugsnag, install the
# Bugsnag Python client with the command "pip install bugsnag", and set the
# environment variable BUGSNAG_API_KEY. You can also optionally set
# BUGSNAG_RELEASE_STAGE.
if os.environ.get("BUGSNAG_API_KEY") is not None:
try:
import bugsnag
import bugsnag.flask
release_stage = os.environ.get("BUGSNAG_RELEASE_STAGE") or "production"
bugsnag.configure(api_key=os.environ.get("BUGSNAG_API_KEY"),
project_root=os.path.dirname(os.path.abspath(__file__)),
use_ssl=True, release_stage=release_stage,
ignore_classes=['werkzeug.exceptions.NotFound'])
bugsnag.flask.handle_exceptions(app)
except:
app.logger.warning("Unable to initialize Bugsnag exception handling.")
# -----------
# Middlewares
# -----------
@app.after_request
def set_cors_headers(response):
response.headers['Access-Control-Allow-Origin'] = request.headers.get('Origin', '*')
response.headers['Access-Control-Allow-Credentials'] = 'true'
if request.method == 'OPTIONS':
# Both of these headers are only used for the "preflight request"
# http://www.w3.org/TR/cors/#access-control-allow-methods-response-header
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, DELETE, PATCH, OPTIONS'
response.headers['Access-Control-Max-Age'] = '3600' # 1 hour cache
if request.headers.get('Access-Control-Request-Headers') is not None:
response.headers['Access-Control-Allow-Headers'] = request.headers['Access-Control-Request-Headers']
return response
# ------
# Routes
# ------
@app.route('/')
def view_landing_page():
"""Generates Landing Page."""
tracking_enabled = 'HTTPBIN_TRACKING' in os.environ
return render_template('index.html', tracking_enabled=tracking_enabled)
@app.route('/html')
def view_html_page():
"""Simple Html Page"""
return render_template('moby.html')
@app.route('/robots.txt')
def view_robots_page():
"""Simple Html Page"""
response = make_response()
response.data = ROBOT_TXT
response.content_type = "text/plain"
return response
@app.route('/deny')
def view_deny_page():
"""Simple Html Page"""
response = make_response()
response.data = ANGRY_ASCII
response.content_type = "text/plain"
return response
# return "YOU SHOULDN'T BE HERE"
@app.route('/ip')
def view_origin():
"""Returns Origin IP."""
return jsonify(origin=request.headers.get('X-Forwarded-For', request.remote_addr))
@app.route('/headers')
def view_headers():
"""Returns HTTP HEADERS."""
return jsonify(get_dict('headers'))
@app.route('/user-agent')
def view_user_agent():
"""Returns User-Agent."""
headers = get_headers()
return jsonify({'user-agent': headers['user-agent']})
@app.route('/get', methods=('GET',))
def view_get():
"""Returns GET Data."""
return jsonify(get_dict('url', 'args', 'headers', 'origin'))
@app.route('/post', methods=('POST',))
def view_post():
"""Returns POST Data."""
return jsonify(get_dict(
'url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json'))
@app.route('/put', methods=('PUT',))
def view_put():
"""Returns PUT Data."""
return jsonify(get_dict(
'url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json'))
@app.route('/patch', methods=('PATCH',))
def view_patch():
"""Returns PATCH Data."""
return jsonify(get_dict(
'url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json'))
@app.route('/delete', methods=('DELETE',))
def view_delete():
"""Returns DELETE Data."""
return jsonify(get_dict(
'url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json'))
@app.route('/gzip')
@filters.gzip
def view_gzip_encoded_content():
"""Returns GZip-Encoded Data."""
return jsonify(get_dict(
'origin', 'headers', method=request.method, gzipped=True))
@app.route('/deflate')
@filters.deflate
def view_deflate_encoded_content():
"""Returns Deflate-Encoded Data."""
return jsonify(get_dict(
'origin', 'headers', method=request.method, deflated=True))
@app.route('/redirect/<int:n>')
def redirect_n_times(n):
"""302 Redirects n times."""
assert n > 0
absolute = request.args.get('absolute', 'false').lower() == 'true'
if n == 1:
return redirect(url_for('view_get', _external=absolute))
if absolute:
return _redirect('absolute', n, True)
else:
return _redirect('relative', n, False)
def _redirect(kind, n, external):
return redirect(url_for('{0}_redirect_n_times'.format(kind), n=n - 1, _external=external))
@app.route('/redirect-to', methods=['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'TRACE'])
def redirect_to():
"""302/3XX Redirects to the given URL."""
args = CaseInsensitiveDict(request.args.items())
# We need to build the response manually and convert to UTF-8 to prevent
# werkzeug from "fixing" the URL. This endpoint should set the Location
# header to the exact string supplied.
response = app.make_response('')
response.status_code = 302
if 'status_code' in args:
status_code = int(args['status_code'])
if status_code >= 300 and status_code < 400:
response.status_code = status_code
response.headers['Location'] = args['url'].encode('utf-8')
return response
@app.route('/relative-redirect/<int:n>')
def relative_redirect_n_times(n):
"""302 Redirects n times."""
assert n > 0
response = app.make_response('')
response.status_code = 302
if n == 1:
response.headers['Location'] = url_for('view_get')
return response
response.headers['Location'] = url_for('relative_redirect_n_times', n=n - 1)
return response
@app.route('/absolute-redirect/<int:n>')
def absolute_redirect_n_times(n):
"""302 Redirects n times."""
assert n > 0
if n == 1:
return redirect(url_for('view_get', _external=True))
return _redirect('absolute', n, True)
@app.route('/stream/<int:n>')
def stream_n_messages(n):
"""Stream n JSON messages"""
response = get_dict('url', 'args', 'headers', 'origin')
n = min(n, 100)
def generate_stream():
for i in range(n):
response['id'] = i
yield json.dumps(response) + '\n'
return Response(generate_stream(), headers={
"Content-Type": "application/json",
})
@app.route('/status/<codes>', methods=['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'TRACE'])
def view_status_code(codes):
"""Return status code or random status code if more than one are given"""
if ',' not in codes:
code = int(codes)
return status_code(code)
choices = []
for choice in codes.split(','):
if ':' not in choice:
code = choice
weight = 1
else:
code, weight = choice.split(':')
choices.append((int(code), float(weight)))
code = weighted_choice(choices)
return status_code(code)
@app.route('/response-headers')
def response_headers():
"""Returns a set of response headers from the query string """
headers = MultiDict(request.args.items(multi=True))
response = jsonify(list(headers.lists()))
while True:
original_data = response.data
d = {}
for key in response.headers.keys():
value = response.headers.get_all(key)
if len(value) == 1:
value = value[0]
d[key] = value
response = jsonify(d)
for key, value in headers.items(multi=True):
response.headers.add(key, value)
response_has_changed = response.data != original_data
if not response_has_changed:
break
return response
@app.route('/cookies')
def view_cookies(hide_env=True):
"""Returns cookie data."""
cookies = dict(request.cookies.items())
if hide_env and ('show_env' not in request.args):
for key in ENV_COOKIES:
try:
del cookies[key]
except KeyError:
pass
return jsonify(cookies=cookies)
@app.route('/forms/post')
def view_forms_post():
"""Simple HTML form."""
return render_template('forms-post.html')
@app.route('/cookies/set/<name>/<value>')
def set_cookie(name, value):
"""Sets a cookie and redirects to cookie list."""
r = app.make_response(redirect(url_for('view_cookies')))
r.set_cookie(key=name, value=value, secure=secure_cookie())
return r
@app.route('/cookies/set')
def set_cookies():
"""Sets cookie(s) as provided by the query string and redirects to cookie list."""
cookies = dict(request.args.items())
r = app.make_response(redirect(url_for('view_cookies')))
for key, value in cookies.items():
r.set_cookie(key=key, value=value, secure=secure_cookie())
return r
@app.route('/cookies/delete')
def delete_cookies():
"""Deletes cookie(s) as provided by the query string and redirects to cookie list."""
cookies = dict(request.args.items())
r = app.make_response(redirect(url_for('view_cookies')))
for key, value in cookies.items():
r.delete_cookie(key=key)
return r
@app.route('/basic-auth/<user>/<passwd>')
def basic_auth(user='user', passwd='passwd'):
"""Prompts the user for authorization using HTTP Basic Auth."""
if not check_basic_auth(user, passwd):
return status_code(401)
return jsonify(authenticated=True, user=user)
@app.route('/hidden-basic-auth/<user>/<passwd>')
def hidden_basic_auth(user='user', passwd='passwd'):
"""Prompts the user for authorization using HTTP Basic Auth."""
if not check_basic_auth(user, passwd):
return status_code(404)
return jsonify(authenticated=True, user=user)
@app.route('/digest-auth/<qop>/<user>/<passwd>')
def digest_auth_md5(qop=None, user='user', passwd='passwd'):
return digest_auth(qop, user, passwd, "MD5")
@app.route('/digest-auth/<qop>/<user>/<passwd>/<algorithm>')
def digest_auth(qop=None, user='user', passwd='passwd', algorithm='MD5'):
"""Prompts the user for authorization using HTTP Digest auth"""
if algorithm not in ('MD5', 'SHA-256'):
algorithm = 'MD5'
if qop not in ('auth', 'auth-int'):
qop = None
if 'Authorization' not in request.headers or \
not check_digest_auth(user, passwd) or \
'Cookie' not in request.headers:
response = app.make_response('')
response.status_code = 401
# RFC2616 Section4.2: HTTP headers are ASCII. That means
# request.remote_addr was originally ASCII, so I should be able to
# encode it back to ascii. Also, RFC2617 says about nonces: "The
# contents of the nonce are implementation dependent"
nonce = H(b''.join([
getattr(request,'remote_addr',u'').encode('ascii'),
b':',
str(time.time()).encode('ascii'),
b':',
os.urandom(10)
]), "MD5")
opaque = H(os.urandom(10), "MD5")
auth = WWWAuthenticate("digest")
auth.set_digest('[email protected]', nonce, opaque=opaque,
qop=('auth', 'auth-int') if qop is None else (qop, ), algorithm=algorithm)
response.headers['WWW-Authenticate'] = auth.to_header()
response.headers['Set-Cookie'] = 'fake=fake_value'
return response
return jsonify(authenticated=True, user=user)
@app.route('/delay/<delay>')
def delay_response(delay):
"""Returns a delayed response"""
delay = min(float(delay), 10)
time.sleep(delay)
return jsonify(get_dict(
'url', 'args', 'form', 'data', 'origin', 'headers', 'files'))
@app.route('/drip')
def drip():
"""Drips data over a duration after an optional initial delay."""
args = CaseInsensitiveDict(request.args.items())
duration = float(args.get('duration', 2))
numbytes = min(int(args.get('numbytes', 10)),(10 * 1024 * 1024)) # set 10MB limit
code = int(args.get('code', 200))
pause = duration / numbytes
delay = float(args.get('delay', 0))
if delay > 0:
time.sleep(delay)
def generate_bytes():
for i in xrange(numbytes):
yield u"*".encode('utf-8')
time.sleep(pause)
response = Response(generate_bytes(), headers={
"Content-Type": "application/octet-stream",
"Content-Length": str(numbytes),
})
response.status_code = code
return response
@app.route('/base64/<value>')
def decode_base64(value):
"""Decodes base64url-encoded string"""
encoded = value.encode('utf-8') # base64 expects binary string as input
return base64.urlsafe_b64decode(encoded).decode('utf-8')
@app.route('/cache', methods=('GET',))
def cache():
"""Returns a 304 if an If-Modified-Since header or If-None-Match is present. Returns the same as a GET otherwise."""
is_conditional = request.headers.get('If-Modified-Since') or request.headers.get('If-None-Match')
if is_conditional is None:
response = view_get()
response.headers['Last-Modified'] = http_date()
response.headers['ETag'] = uuid.uuid4().hex
return response
else:
return status_code(304)
@app.route('/cache/<int:value>')
def cache_control(value):
"""Sets a Cache-Control header."""
response = view_get()
response.headers['Cache-Control'] = 'public, max-age={0}'.format(value)
return response
@app.route('/encoding/utf8')
def encoding():
return render_template('UTF-8-demo.txt')
@app.route('/bytes/<int:n>')
def random_bytes(n):
"""Returns n random bytes generated with given seed."""
n = min(n, 100 * 1024) # set 100KB limit
params = CaseInsensitiveDict(request.args.items())
if 'seed' in params:
random.seed(int(params['seed']))
response = make_response()
# Note: can't just use os.urandom here because it ignores the seed
response.data = bytearray(random.randint(0, 255) for i in range(n))
response.content_type = 'application/octet-stream'
return response
@app.route('/stream-bytes/<int:n>')
def stream_random_bytes(n):
"""Streams n random bytes generated with given seed, at given chunk size per packet."""
n = min(n, 100 * 1024) # set 100KB limit
params = CaseInsensitiveDict(request.args.items())
if 'seed' in params:
random.seed(int(params['seed']))
if 'chunk_size' in params:
chunk_size = max(1, int(params['chunk_size']))
else:
chunk_size = 10 * 1024
def generate_bytes():
chunks = bytearray()
for i in xrange(n):
chunks.append(random.randint(0, 255))
if len(chunks) == chunk_size:
yield(bytes(chunks))
chunks = bytearray()
if chunks:
yield(bytes(chunks))
headers = {'Content-Type': 'application/octet-stream'}
return Response(generate_bytes(), headers=headers)
@app.route('/range/<int:numbytes>')
def range_request(numbytes):
"""Streams n random bytes generated with given seed, at given chunk size per packet."""
if numbytes <= 0 or numbytes > (100 * 1024):
response = Response(headers={
'ETag' : 'range%d' % numbytes,
'Accept-Ranges' : 'bytes'
})
response.status_code = 404
response.data = 'number of bytes must be in the range (0, 10240]'
return response
params = CaseInsensitiveDict(request.args.items())
if 'chunk_size' in params:
chunk_size = max(1, int(params['chunk_size']))
else:
chunk_size = 10 * 1024
duration = float(params.get('duration', 0))
pause_per_byte = duration / numbytes
request_headers = get_headers()
first_byte_pos, last_byte_pos = get_request_range(request_headers, numbytes)
if first_byte_pos > last_byte_pos or first_byte_pos not in xrange(0, numbytes) or last_byte_pos not in xrange(0, numbytes):
response = Response(headers={
'ETag' : 'range%d' % numbytes,
'Accept-Ranges' : 'bytes',
'Content-Range' : 'bytes */%d' % numbytes
})
response.status_code = 416
return response
def generate_bytes():
chunks = bytearray()
for i in xrange(first_byte_pos, last_byte_pos + 1):
# We don't want the resource to change across requests, so we need
# to use a predictable data generation function
chunks.append(ord('a') + (i % 26))
if len(chunks) == chunk_size:
yield(bytes(chunks))
time.sleep(pause_per_byte * chunk_size)
chunks = bytearray()
if chunks:
time.sleep(pause_per_byte * len(chunks))
yield(bytes(chunks))
content_range = 'bytes %d-%d/%d' % (first_byte_pos, last_byte_pos, numbytes)
response_headers = {
'Content-Type': 'application/octet-stream',
'ETag' : 'range%d' % numbytes,
'Accept-Ranges' : 'bytes',
'Content-Range' : content_range }
response = Response(generate_bytes(), headers=response_headers)
if (first_byte_pos == 0) and (last_byte_pos == (numbytes - 1)):
response.status_code = 200
else:
response.status_code = 206
return response
@app.route('/links/<int:n>/<int:offset>')
def link_page(n, offset):
"""Generate a page containing n links to other pages which do the same."""
n = min(max(1, n), 200) # limit to between 1 and 200 links
link = "<a href='{0}'>{1}</a> "
html = ['<html><head><title>Links</title></head><body>']
for i in xrange(n):
if i == offset:
html.append("{0} ".format(i))
else:
html.append(link.format(url_for('link_page', n=n, offset=i), i))
html.append('</body></html>')
return ''.join(html)
@app.route('/links/<int:n>')
def links(n):
"""Redirect to first links page."""
return redirect(url_for('link_page', n=n, offset=0))
@app.route('/image')
def image():
"""Returns a simple image of the type suggest by the Accept header."""
headers = get_headers()
if 'accept' not in headers:
return image_png() # Default media type to png
accept = headers['accept'].lower()
if 'image/webp' in accept:
return image_webp()
elif 'image/svg+xml' in accept:
return image_svg()
elif 'image/jpeg' in accept:
return image_jpeg()
elif 'image/png' in accept or 'image/*' in accept:
return image_png()
else:
return status_code(406) # Unsupported media type
@app.route('/image/png')
def image_png():
data = resource('images/pig_icon.png')
return Response(data, headers={'Content-Type': 'image/png'})
@app.route('/image/jpeg')
def image_jpeg():
data = resource('images/jackal.jpg')
return Response(data, headers={'Content-Type': 'image/jpeg'})
@app.route('/image/webp')
def image_webp():
data = resource('images/wolf_1.webp')
return Response(data, headers={'Content-Type': 'image/webp'})
@app.route('/image/svg')
def image_svg():
data = resource('images/svg_logo.svg')
return Response(data, headers={'Content-Type': 'image/svg+xml'})
def resource(filename):
path = os.path.join(
tmpl_dir,
filename)
return open(path, 'rb').read()
@app.route("/xml")
def xml():
response = make_response(render_template("sample.xml"))
response.headers["Content-Type"] = "application/xml"
return response
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--port", type=int, default=5000)
parser.add_argument("--host", default="127.0.0.1")
args = parser.parse_args()
app.run(port=args.port, host=args.host)
|
|
'''
The MIT License (MIT)
Copyright (c) 2007 ~ 2015, Hong-She Liang <[email protected]>.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
'''
Bootstrap rabird setup environment
@date 2015-08-20
@author Hong-She Liang <[email protected]>
'''
import os
import os.path
import platform
import subprocess
import sys
def _clean_check(cmd, target):
"""
Run the command to download target. If the command fails, clean up before
re-raising the error.
"""
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
if os.access(target, os.F_OK):
os.unlink(target)
raise
def download_file_powershell(url, target):
"""
Download the file at url to target using Powershell (which will validate
trust). Raise an exception if the command cannot complete.
"""
target = os.path.abspath(target)
cmd = [
'powershell',
'-Command',
"(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)" % vars(),
]
_clean_check(cmd, target)
def has_powershell():
if platform.system() != 'Windows':
return False
cmd = ['powershell', '-Command', 'echo test']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_powershell.viable = has_powershell
def download_file_curl(url, target):
cmd = ['curl', url, '--silent', '--output', target]
_clean_check(cmd, target)
def has_curl():
cmd = ['curl', '--version']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_curl.viable = has_curl
def download_file_wget(url, target):
cmd = ['wget', url, '--quiet', '--output-document', target]
_clean_check(cmd, target)
def has_wget():
cmd = ['wget', '--version']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_wget.viable = has_wget
def download_file_insecure(url, target):
"""
Use Python to download the file, even though it cannot authenticate the
connection.
"""
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
src = dst = None
try:
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(target, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
download_file_insecure.viable = lambda: True
def get_best_downloader():
downloaders = [
download_file_powershell,
download_file_curl,
download_file_wget,
download_file_insecure,
]
for dl in downloaders:
if dl.viable():
return dl
def download(url):
downloader = get_best_downloader()
downloader(url, os.path.basename(url))
def use_pip():
try:
import pip
except:
import os
import sys
# If we do not have pip, we fetch and install one. It will also install
# setuptools and wheel.
url = "https://bootstrap.pypa.io/get-pip.py"
filename = os.path.basename(url)
download(url)
os.system("%s %s" % (sys.executable, filename))
def use_rabird():
try:
import rabird.core
except:
use_pip()
import pip
pip.main(["install", "rabird.core"])
module_dirs = "rabird/core/__init__.py"
for apath in sys.path:
module_path = os.path.join(apath, module_dirs)
if os.path.exists(module_path) and os.path.isfile(module_path):
# Generate empty __init__.py into rabird/, an ugly fix
# for can't find rabird.core module during installation.
#
# Because there does not have any __init__.py in the
# namespace directory and we can't import it immediately
# after pip installed in the same process, so we added
# an empty __init__.py into rabird/ namespace directory
# for satisfy it.
afile = open(os.path.join(os.path.dirname(
os.path.dirname(module_path)), "__init__.py"), "wb")
afile.close()
break
|
|
#!/usr/bin/env python
#
# Copyright 2011-2013 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
# This script uses the following Unicode tables:
# - DerivedCoreProperties.txt
# - DerivedNormalizationProps.txt
# - EastAsianWidth.txt
# - auxiliary/GraphemeBreakProperty.txt
# - PropList.txt
# - ReadMe.txt
# - Scripts.txt
# - UnicodeData.txt
#
# Since this should not require frequent updates, we just store this
# out-of-line and check the unicode.rs file into git.
import fileinput, re, os, sys, operator
preamble = '''// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// NOTE: The following code was generated by "src/etc/unicode.py", do not edit directly
#![allow(missing_docs, non_upper_case_globals, non_snake_case)]
'''
# Mapping taken from Table 12 from:
# http://www.unicode.org/reports/tr44/#General_Category_Values
expanded_categories = {
'Lu': ['LC', 'L'], 'Ll': ['LC', 'L'], 'Lt': ['LC', 'L'],
'Lm': ['L'], 'Lo': ['L'],
'Mn': ['M'], 'Mc': ['M'], 'Me': ['M'],
'Nd': ['N'], 'Nl': ['N'], 'No': ['No'],
'Pc': ['P'], 'Pd': ['P'], 'Ps': ['P'], 'Pe': ['P'],
'Pi': ['P'], 'Pf': ['P'], 'Po': ['P'],
'Sm': ['S'], 'Sc': ['S'], 'Sk': ['S'], 'So': ['S'],
'Zs': ['Z'], 'Zl': ['Z'], 'Zp': ['Z'],
'Cc': ['C'], 'Cf': ['C'], 'Cs': ['C'], 'Co': ['C'], 'Cn': ['C'],
}
# these are the surrogate codepoints, which are not valid rust characters
surrogate_codepoints = (0xd800, 0xdfff)
def fetch(f):
if not os.path.exists(os.path.basename(f)):
os.system("curl -O http://www.unicode.org/Public/UNIDATA/%s"
% f)
if not os.path.exists(os.path.basename(f)):
sys.stderr.write("cannot load %s" % f)
exit(1)
def is_surrogate(n):
return surrogate_codepoints[0] <= n <= surrogate_codepoints[1]
def load_unicode_data(f):
fetch(f)
gencats = {}
upperlower = {}
lowerupper = {}
combines = {}
canon_decomp = {}
compat_decomp = {}
udict = {};
range_start = -1;
for line in fileinput.input(f):
data = line.split(';');
if len(data) != 15:
continue
cp = int(data[0], 16);
if is_surrogate(cp):
continue
if range_start >= 0:
for i in xrange(range_start, cp):
udict[i] = data;
range_start = -1;
if data[1].endswith(", First>"):
range_start = cp;
continue;
udict[cp] = data;
for code in udict:
[code_org, name, gencat, combine, bidi,
decomp, deci, digit, num, mirror,
old, iso, upcase, lowcase, titlecase ] = udict[code];
# generate char to char direct common and simple conversions
# uppercase to lowercase
if gencat == "Lu" and lowcase != "" and code_org != lowcase:
upperlower[code] = int(lowcase, 16)
# lowercase to uppercase
if gencat == "Ll" and upcase != "" and code_org != upcase:
lowerupper[code] = int(upcase, 16)
# store decomposition, if given
if decomp != "":
if decomp.startswith('<'):
seq = []
for i in decomp.split()[1:]:
seq.append(int(i, 16))
compat_decomp[code] = seq
else:
seq = []
for i in decomp.split():
seq.append(int(i, 16))
canon_decomp[code] = seq
# place letter in categories as appropriate
for cat in [gencat, "Assigned"] + expanded_categories.get(gencat, []):
if cat not in gencats:
gencats[cat] = []
gencats[cat].append(code)
# record combining class, if any
if combine != "0":
if combine not in combines:
combines[combine] = []
combines[combine].append(code)
# generate Not_Assigned from Assigned
gencats["Cn"] = gen_unassigned(gencats["Assigned"])
# Assigned is not a real category
del(gencats["Assigned"])
# Other contains Not_Assigned
gencats["C"].extend(gencats["Cn"])
gencats = group_cats(gencats)
combines = to_combines(group_cats(combines))
return (canon_decomp, compat_decomp, gencats, combines, lowerupper, upperlower)
def group_cats(cats):
cats_out = {}
for cat in cats:
cats_out[cat] = group_cat(cats[cat])
return cats_out
def group_cat(cat):
cat_out = []
letters = sorted(set(cat))
cur_start = letters.pop(0)
cur_end = cur_start
for letter in letters:
assert letter > cur_end, \
"cur_end: %s, letter: %s" % (hex(cur_end), hex(letter))
if letter == cur_end + 1:
cur_end = letter
else:
cat_out.append((cur_start, cur_end))
cur_start = cur_end = letter
cat_out.append((cur_start, cur_end))
return cat_out
def ungroup_cat(cat):
cat_out = []
for (lo, hi) in cat:
while lo <= hi:
cat_out.append(lo)
lo += 1
return cat_out
def gen_unassigned(assigned):
assigned = set(assigned)
return ([i for i in range(0, 0xd800) if i not in assigned] +
[i for i in range(0xe000, 0x110000) if i not in assigned])
def to_combines(combs):
combs_out = []
for comb in combs:
for (lo, hi) in combs[comb]:
combs_out.append((lo, hi, comb))
combs_out.sort(key=lambda comb: comb[0])
return combs_out
def format_table_content(f, content, indent):
line = " "*indent
first = True
for chunk in content.split(","):
if len(line) + len(chunk) < 98:
if first:
line += chunk
else:
line += ", " + chunk
first = False
else:
f.write(line + ",\n")
line = " "*indent + chunk
f.write(line)
def load_properties(f, interestingprops):
fetch(f)
props = {}
re1 = re.compile("^ *([0-9A-F]+) *; *(\w+)")
re2 = re.compile("^ *([0-9A-F]+)\.\.([0-9A-F]+) *; *(\w+)")
for line in fileinput.input(os.path.basename(f)):
prop = None
d_lo = 0
d_hi = 0
m = re1.match(line)
if m:
d_lo = m.group(1)
d_hi = m.group(1)
prop = m.group(2)
else:
m = re2.match(line)
if m:
d_lo = m.group(1)
d_hi = m.group(2)
prop = m.group(3)
else:
continue
if interestingprops and prop not in interestingprops:
continue
d_lo = int(d_lo, 16)
d_hi = int(d_hi, 16)
if prop not in props:
props[prop] = []
props[prop].append((d_lo, d_hi))
# optimize if possible
for prop in props:
props[prop] = group_cat(ungroup_cat(props[prop]))
return props
# load all widths of want_widths, except those in except_cats
def load_east_asian_width(want_widths, except_cats):
f = "EastAsianWidth.txt"
fetch(f)
widths = {}
re1 = re.compile("^([0-9A-F]+);(\w+) +# (\w+)")
re2 = re.compile("^([0-9A-F]+)\.\.([0-9A-F]+);(\w+) +# (\w+)")
for line in fileinput.input(f):
width = None
d_lo = 0
d_hi = 0
cat = None
m = re1.match(line)
if m:
d_lo = m.group(1)
d_hi = m.group(1)
width = m.group(2)
cat = m.group(3)
else:
m = re2.match(line)
if m:
d_lo = m.group(1)
d_hi = m.group(2)
width = m.group(3)
cat = m.group(4)
else:
continue
if cat in except_cats or width not in want_widths:
continue
d_lo = int(d_lo, 16)
d_hi = int(d_hi, 16)
if width not in widths:
widths[width] = []
widths[width].append((d_lo, d_hi))
return widths
def escape_char(c):
return "'\\u{%x}'" % c
def emit_bsearch_range_table(f):
f.write("""
fn bsearch_range_table(c: char, r: &'static [(char,char)]) -> bool {
use core::cmp::Ordering::{Equal, Less, Greater};
use core::slice::SliceExt;
r.binary_search_by(|&(lo,hi)| {
if lo <= c && c <= hi { Equal }
else if hi < c { Less }
else { Greater }
}).is_ok()
}\n
""")
def emit_table(f, name, t_data, t_type = "&'static [(char, char)]", is_pub=True,
pfun=lambda x: "(%s,%s)" % (escape_char(x[0]), escape_char(x[1]))):
pub_string = ""
if is_pub:
pub_string = "pub "
f.write(" %sconst %s: %s = &[\n" % (pub_string, name, t_type))
data = ""
first = True
for dat in t_data:
if not first:
data += ","
first = False
data += pfun(dat)
format_table_content(f, data, 8)
f.write("\n ];\n\n")
def emit_property_module(f, mod, tbl, emit):
f.write("pub mod %s {\n" % mod)
for cat in sorted(emit):
emit_table(f, "%s_table" % cat, tbl[cat])
f.write(" pub fn %s(c: char) -> bool {\n" % cat)
f.write(" super::bsearch_range_table(c, %s_table)\n" % cat)
f.write(" }\n\n")
f.write("}\n\n")
def emit_conversions_module(f, lowerupper, upperlower):
f.write("pub mod conversions {")
f.write("""
use core::cmp::Ordering::{Equal, Less, Greater};
use core::slice::SliceExt;
use core::option::Option;
use core::option::Option::{Some, None};
use core::result::Result::{Ok, Err};
pub fn to_lower(c: char) -> char {
match bsearch_case_table(c, LuLl_table) {
None => c,
Some(index) => LuLl_table[index].1
}
}
pub fn to_upper(c: char) -> char {
match bsearch_case_table(c, LlLu_table) {
None => c,
Some(index) => LlLu_table[index].1
}
}
fn bsearch_case_table(c: char, table: &'static [(char, char)]) -> Option<usize> {
match table.binary_search_by(|&(key, _)| {
if c == key { Equal }
else if key < c { Less }
else { Greater }
}) {
Ok(i) => Some(i),
Err(_) => None,
}
}
""")
emit_table(f, "LuLl_table",
sorted(upperlower.iteritems(), key=operator.itemgetter(0)), is_pub=False)
emit_table(f, "LlLu_table",
sorted(lowerupper.iteritems(), key=operator.itemgetter(0)), is_pub=False)
f.write("}\n\n")
def emit_grapheme_module(f, grapheme_table, grapheme_cats):
f.write("""pub mod grapheme {
use core::slice::SliceExt;
pub use self::GraphemeCat::*;
use core::result::Result::{Ok, Err};
#[allow(non_camel_case_types)]
#[derive(Clone, Copy)]
pub enum GraphemeCat {
""")
for cat in grapheme_cats + ["Any"]:
f.write(" GC_" + cat + ",\n")
f.write(""" }
fn bsearch_range_value_table(c: char, r: &'static [(char, char, GraphemeCat)]) -> GraphemeCat {
use core::cmp::Ordering::{Equal, Less, Greater};
match r.binary_search_by(|&(lo, hi, _)| {
if lo <= c && c <= hi { Equal }
else if hi < c { Less }
else { Greater }
}) {
Ok(idx) => {
let (_, _, cat) = r[idx];
cat
}
Err(_) => GC_Any
}
}
pub fn grapheme_category(c: char) -> GraphemeCat {
bsearch_range_value_table(c, grapheme_cat_table)
}
""")
emit_table(f, "grapheme_cat_table", grapheme_table, "&'static [(char, char, GraphemeCat)]",
pfun=lambda x: "(%s,%s,GC_%s)" % (escape_char(x[0]), escape_char(x[1]), x[2]),
is_pub=False)
f.write("}\n")
def emit_charwidth_module(f, width_table):
f.write("pub mod charwidth {\n")
f.write(" use core::option::Option;\n")
f.write(" use core::option::Option::{Some, None};\n")
f.write(" use core::slice::SliceExt;\n")
f.write(" use core::result::Result::{Ok, Err};\n")
f.write("""
fn bsearch_range_value_table(c: char, is_cjk: bool, r: &'static [(char, char, u8, u8)]) -> u8 {
use core::cmp::Ordering::{Equal, Less, Greater};
match r.binary_search_by(|&(lo, hi, _, _)| {
if lo <= c && c <= hi { Equal }
else if hi < c { Less }
else { Greater }
}) {
Ok(idx) => {
let (_, _, r_ncjk, r_cjk) = r[idx];
if is_cjk { r_cjk } else { r_ncjk }
}
Err(_) => 1
}
}
""")
f.write("""
pub fn width(c: char, is_cjk: bool) -> Option<usize> {
match c as usize {
_c @ 0 => Some(0), // null is zero width
cu if cu < 0x20 => None, // control sequences have no width
cu if cu < 0x7F => Some(1), // ASCII
cu if cu < 0xA0 => None, // more control sequences
_ => Some(bsearch_range_value_table(c, is_cjk, charwidth_table) as usize)
}
}
""")
f.write(" // character width table. Based on Markus Kuhn's free wcwidth() implementation,\n")
f.write(" // http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c\n")
emit_table(f, "charwidth_table", width_table, "&'static [(char, char, u8, u8)]", is_pub=False,
pfun=lambda x: "(%s,%s,%s,%s)" % (escape_char(x[0]), escape_char(x[1]), x[2], x[3]))
f.write("}\n\n")
def emit_norm_module(f, canon, compat, combine, norm_props):
canon_keys = canon.keys()
canon_keys.sort()
compat_keys = compat.keys()
compat_keys.sort()
canon_comp = {}
comp_exclusions = norm_props["Full_Composition_Exclusion"]
for char in canon_keys:
if True in map(lambda (lo, hi): lo <= char <= hi, comp_exclusions):
continue
decomp = canon[char]
if len(decomp) == 2:
if not canon_comp.has_key(decomp[0]):
canon_comp[decomp[0]] = []
canon_comp[decomp[0]].append( (decomp[1], char) )
canon_comp_keys = canon_comp.keys()
canon_comp_keys.sort()
f.write("pub mod normalization {\n")
def mkdata_fun(table):
def f(char):
data = "(%s,&[" % escape_char(char)
first = True
for d in table[char]:
if not first:
data += ","
first = False
data += escape_char(d)
data += "])"
return data
return f
f.write(" // Canonical decompositions\n")
emit_table(f, "canonical_table", canon_keys, "&'static [(char, &'static [char])]",
pfun=mkdata_fun(canon))
f.write(" // Compatibility decompositions\n")
emit_table(f, "compatibility_table", compat_keys, "&'static [(char, &'static [char])]",
pfun=mkdata_fun(compat))
def comp_pfun(char):
data = "(%s,&[" % escape_char(char)
canon_comp[char].sort(lambda x, y: x[0] - y[0])
first = True
for pair in canon_comp[char]:
if not first:
data += ","
first = False
data += "(%s,%s)" % (escape_char(pair[0]), escape_char(pair[1]))
data += "])"
return data
f.write(" // Canonical compositions\n")
emit_table(f, "composition_table", canon_comp_keys,
"&'static [(char, &'static [(char, char)])]", pfun=comp_pfun)
f.write("""
fn bsearch_range_value_table(c: char, r: &'static [(char, char, u8)]) -> u8 {
use core::cmp::Ordering::{Equal, Less, Greater};
use core::slice::SliceExt;
use core::result::Result::{Ok, Err};
match r.binary_search_by(|&(lo, hi, _)| {
if lo <= c && c <= hi { Equal }
else if hi < c { Less }
else { Greater }
}) {
Ok(idx) => {
let (_, _, result) = r[idx];
result
}
Err(_) => 0
}
}\n
""")
emit_table(f, "combining_class_table", combine, "&'static [(char, char, u8)]", is_pub=False,
pfun=lambda x: "(%s,%s,%s)" % (escape_char(x[0]), escape_char(x[1]), x[2]))
f.write(""" #[deprecated(reason = "use the crates.io `unicode-normalization` lib instead",
since = "1.0.0")]
#[unstable(feature = "unicode",
reason = "this functionality will be moved to crates.io")]
pub fn canonical_combining_class(c: char) -> u8 {
bsearch_range_value_table(c, combining_class_table)
}
}
""")
def remove_from_wtable(wtable, val):
wtable_out = []
while wtable:
if wtable[0][1] < val:
wtable_out.append(wtable.pop(0))
elif wtable[0][0] > val:
break
else:
(wt_lo, wt_hi, width, width_cjk) = wtable.pop(0)
if wt_lo == wt_hi == val:
continue
elif wt_lo == val:
wtable_out.append((wt_lo+1, wt_hi, width, width_cjk))
elif wt_hi == val:
wtable_out.append((wt_lo, wt_hi-1, width, width_cjk))
else:
wtable_out.append((wt_lo, val-1, width, width_cjk))
wtable_out.append((val+1, wt_hi, width, width_cjk))
if wtable:
wtable_out.extend(wtable)
return wtable_out
def optimize_width_table(wtable):
wtable_out = []
w_this = wtable.pop(0)
while wtable:
if w_this[1] == wtable[0][0] - 1 and w_this[2:3] == wtable[0][2:3]:
w_tmp = wtable.pop(0)
w_this = (w_this[0], w_tmp[1], w_tmp[2], w_tmp[3])
else:
wtable_out.append(w_this)
w_this = wtable.pop(0)
wtable_out.append(w_this)
return wtable_out
if __name__ == "__main__":
r = "tables.rs"
if os.path.exists(r):
os.remove(r)
with open(r, "w") as rf:
# write the file's preamble
rf.write(preamble)
# download and parse all the data
fetch("ReadMe.txt")
with open("ReadMe.txt") as readme:
pattern = "for Version (\d+)\.(\d+)\.(\d+) of the Unicode"
unicode_version = re.search(pattern, readme.read()).groups()
rf.write("""
/// The version of [Unicode](http://www.unicode.org/)
/// that the unicode parts of `CharExt` and `UnicodeStrPrelude` traits are based on.
pub const UNICODE_VERSION: (u64, u64, u64) = (%s, %s, %s);
""" % unicode_version)
(canon_decomp, compat_decomp, gencats, combines,
lowerupper, upperlower) = load_unicode_data("UnicodeData.txt")
want_derived = ["XID_Start", "XID_Continue", "Alphabetic", "Lowercase", "Uppercase"]
derived = load_properties("DerivedCoreProperties.txt", want_derived)
scripts = load_properties("Scripts.txt", [])
props = load_properties("PropList.txt",
["White_Space", "Join_Control", "Noncharacter_Code_Point"])
norm_props = load_properties("DerivedNormalizationProps.txt",
["Full_Composition_Exclusion"])
# bsearch_range_table is used in all the property modules below
emit_bsearch_range_table(rf)
# category tables
for (name, cat, pfuns) in ("general_category", gencats, ["N", "Cc"]), \
("derived_property", derived, want_derived), \
("property", props, ["White_Space"]):
emit_property_module(rf, name, cat, pfuns)
# normalizations and conversions module
emit_norm_module(rf, canon_decomp, compat_decomp, combines, norm_props)
emit_conversions_module(rf, lowerupper, upperlower)
### character width module
width_table = []
for zwcat in ["Me", "Mn", "Cf"]:
width_table.extend(map(lambda (lo, hi): (lo, hi, 0, 0), gencats[zwcat]))
width_table.append((4448, 4607, 0, 0))
# get widths, except those that are explicitly marked zero-width above
ea_widths = load_east_asian_width(["W", "F", "A"], ["Me", "Mn", "Cf"])
# these are doublewidth
for dwcat in ["W", "F"]:
width_table.extend(map(lambda (lo, hi): (lo, hi, 2, 2), ea_widths[dwcat]))
width_table.extend(map(lambda (lo, hi): (lo, hi, 1, 2), ea_widths["A"]))
width_table.sort(key=lambda w: w[0])
# soft hyphen is not zero width in preformatted text; it's used to indicate
# a hyphen inserted to facilitate a linebreak.
width_table = remove_from_wtable(width_table, 173)
# optimize the width table by collapsing adjacent entities when possible
width_table = optimize_width_table(width_table)
emit_charwidth_module(rf, width_table)
### grapheme cluster module
# from http://www.unicode.org/reports/tr29/#Grapheme_Cluster_Break_Property_Values
grapheme_cats = load_properties("auxiliary/GraphemeBreakProperty.txt", [])
# Control
# Note 1:
# This category also includes Cs (surrogate codepoints), but Rust's `char`s are
# Unicode Scalar Values only, and surrogates are thus invalid `char`s.
# Thus, we have to remove Cs from the Control category
# Note 2:
# 0x0a and 0x0d (CR and LF) are not in the Control category for Graphemes.
# However, the Graphemes iterator treats these as a special case, so they
# should be included in grapheme_cats["Control"] for our implementation.
grapheme_cats["Control"] = group_cat(list(
(set(ungroup_cat(grapheme_cats["Control"]))
| set(ungroup_cat(grapheme_cats["CR"]))
| set(ungroup_cat(grapheme_cats["LF"])))
- set(ungroup_cat([surrogate_codepoints]))))
del(grapheme_cats["CR"])
del(grapheme_cats["LF"])
grapheme_table = []
for cat in grapheme_cats:
grapheme_table.extend([(x, y, cat) for (x, y) in grapheme_cats[cat]])
grapheme_table.sort(key=lambda w: w[0])
emit_grapheme_module(rf, grapheme_table, grapheme_cats.keys())
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pickle
import pytest
import random
import unittest
import pandas.util.testing as pdt
from io import BytesIO
from os.path import join as pjoin
import numpy as np
import pyarrow as pa
import pyarrow.tests.test_parquet as test_parquet
from pyarrow.compat import guid
# ----------------------------------------------------------------------
# HDFS tests
def hdfs_test_client(driver='libhdfs'):
host = os.environ.get('ARROW_HDFS_TEST_HOST', 'default')
user = os.environ.get('ARROW_HDFS_TEST_USER', None)
try:
port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0))
except ValueError:
raise ValueError('Env variable ARROW_HDFS_TEST_PORT was not '
'an integer')
return pa.hdfs.connect(host, port, user, driver=driver)
@pytest.mark.hdfs
class HdfsTestCases(object):
def _make_test_file(self, hdfs, test_name, test_path, test_data):
base_path = pjoin(self.tmp_path, test_name)
hdfs.mkdir(base_path)
full_path = pjoin(base_path, test_path)
with hdfs.open(full_path, 'wb') as f:
f.write(test_data)
return full_path
@classmethod
def setUpClass(cls):
cls.check_driver()
cls.hdfs = hdfs_test_client(cls.DRIVER)
cls.tmp_path = '/tmp/pyarrow-test-{0}'.format(random.randint(0, 1000))
cls.hdfs.mkdir(cls.tmp_path)
@classmethod
def tearDownClass(cls):
cls.hdfs.delete(cls.tmp_path, recursive=True)
cls.hdfs.close()
def test_unknown_driver(self):
with pytest.raises(ValueError):
hdfs_test_client(driver="not_a_driver_name")
def test_pickle(self):
s = pickle.dumps(self.hdfs)
h2 = pickle.loads(s)
assert h2.is_open
assert h2.host == self.hdfs.host
assert h2.port == self.hdfs.port
assert h2.user == self.hdfs.user
assert h2.kerb_ticket == self.hdfs.kerb_ticket
assert h2.driver == self.hdfs.driver
# smoketest unpickled client works
h2.ls(self.tmp_path)
def test_cat(self):
path = pjoin(self.tmp_path, 'cat-test')
data = b'foobarbaz'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
contents = self.hdfs.cat(path)
assert contents == data
def test_capacity_space(self):
capacity = self.hdfs.get_capacity()
space_used = self.hdfs.get_space_used()
disk_free = self.hdfs.df()
assert capacity > 0
assert capacity > space_used
assert disk_free == (capacity - space_used)
def test_close(self):
client = hdfs_test_client()
assert client.is_open
client.close()
assert not client.is_open
with pytest.raises(Exception):
client.ls('/')
def test_mkdir(self):
path = pjoin(self.tmp_path, 'test-dir/test-dir')
parent_path = pjoin(self.tmp_path, 'test-dir')
self.hdfs.mkdir(path)
assert self.hdfs.exists(path)
self.hdfs.delete(parent_path, recursive=True)
assert not self.hdfs.exists(path)
def test_mv_rename(self):
path = pjoin(self.tmp_path, 'mv-test')
new_path = pjoin(self.tmp_path, 'mv-new-test')
data = b'foobarbaz'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
assert self.hdfs.exists(path)
self.hdfs.mv(path, new_path)
assert not self.hdfs.exists(path)
assert self.hdfs.exists(new_path)
assert self.hdfs.cat(new_path) == data
self.hdfs.rename(new_path, path)
assert self.hdfs.cat(path) == data
def test_info(self):
path = pjoin(self.tmp_path, 'info-base')
file_path = pjoin(path, 'ex')
self.hdfs.mkdir(path)
data = b'foobarbaz'
with self.hdfs.open(file_path, 'wb') as f:
f.write(data)
path_info = self.hdfs.info(path)
file_path_info = self.hdfs.info(file_path)
assert path_info['kind'] == 'directory'
assert file_path_info['kind'] == 'file'
assert file_path_info['size'] == len(data)
def test_exists_isdir_isfile(self):
dir_path = pjoin(self.tmp_path, 'info-base')
file_path = pjoin(dir_path, 'ex')
missing_path = pjoin(dir_path, 'this-path-is-missing')
self.hdfs.mkdir(dir_path)
with self.hdfs.open(file_path, 'wb') as f:
f.write(b'foobarbaz')
assert self.hdfs.exists(dir_path)
assert self.hdfs.exists(file_path)
assert not self.hdfs.exists(missing_path)
assert self.hdfs.isdir(dir_path)
assert not self.hdfs.isdir(file_path)
assert not self.hdfs.isdir(missing_path)
assert not self.hdfs.isfile(dir_path)
assert self.hdfs.isfile(file_path)
assert not self.hdfs.isfile(missing_path)
def test_disk_usage(self):
path = pjoin(self.tmp_path, 'disk-usage-base')
p1 = pjoin(path, 'p1')
p2 = pjoin(path, 'p2')
subdir = pjoin(path, 'subdir')
p3 = pjoin(subdir, 'p3')
if self.hdfs.exists(path):
self.hdfs.delete(path, True)
self.hdfs.mkdir(path)
self.hdfs.mkdir(subdir)
data = b'foobarbaz'
for file_path in [p1, p2, p3]:
with self.hdfs.open(file_path, 'wb') as f:
f.write(data)
assert self.hdfs.disk_usage(path) == len(data) * 3
def test_ls(self):
base_path = pjoin(self.tmp_path, 'ls-test')
self.hdfs.mkdir(base_path)
dir_path = pjoin(base_path, 'a-dir')
f1_path = pjoin(base_path, 'a-file-1')
self.hdfs.mkdir(dir_path)
f = self.hdfs.open(f1_path, 'wb')
f.write(b'a' * 10)
contents = sorted(self.hdfs.ls(base_path, False))
assert contents == [dir_path, f1_path]
def test_chmod_chown(self):
path = pjoin(self.tmp_path, 'chmod-test')
with self.hdfs.open(path, 'wb') as f:
f.write(b'a' * 10)
def test_download_upload(self):
base_path = pjoin(self.tmp_path, 'upload-test')
data = b'foobarbaz'
buf = BytesIO(data)
buf.seek(0)
self.hdfs.upload(base_path, buf)
out_buf = BytesIO()
self.hdfs.download(base_path, out_buf)
out_buf.seek(0)
assert out_buf.getvalue() == data
def test_file_context_manager(self):
path = pjoin(self.tmp_path, 'ctx-manager')
data = b'foo'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
with self.hdfs.open(path, 'rb') as f:
assert f.size() == 3
result = f.read(10)
assert result == data
def test_open_not_exist_error_message(self):
# ARROW-226
path = pjoin(self.tmp_path, 'does-not-exist-123')
try:
self.hdfs.open(path)
except Exception as e:
assert 'file does not exist' in e.args[0].lower()
def test_read_whole_file(self):
path = pjoin(self.tmp_path, 'read-whole-file')
data = b'foo' * 1000
with self.hdfs.open(path, 'wb') as f:
f.write(data)
with self.hdfs.open(path, 'rb') as f:
result = f.read()
assert result == data
def _write_multiple_hdfs_pq_files(self, tmpdir):
import pyarrow.parquet as pq
nfiles = 10
size = 5
test_data = []
for i in range(nfiles):
df = test_parquet._test_dataframe(size, seed=i)
df['index'] = np.arange(i * size, (i + 1) * size)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
path = pjoin(tmpdir, '{0}.parquet'.format(i))
table = pa.Table.from_pandas(df, preserve_index=False)
with self.hdfs.open(path, 'wb') as f:
pq.write_table(table, f)
test_data.append(table)
expected = pa.concat_tables(test_data)
return expected
@pytest.mark.parquet
def test_read_multiple_parquet_files(self):
tmpdir = pjoin(self.tmp_path, 'multi-parquet-' + guid())
self.hdfs.mkdir(tmpdir)
expected = self._write_multiple_hdfs_pq_files(tmpdir)
result = self.hdfs.read_parquet(tmpdir)
pdt.assert_frame_equal(result.to_pandas()
.sort_values(by='index').reset_index(drop=True),
expected.to_pandas())
@pytest.mark.parquet
def test_read_multiple_parquet_files_with_uri(self):
import pyarrow.parquet as pq
tmpdir = pjoin(self.tmp_path, 'multi-parquet-uri-' + guid())
self.hdfs.mkdir(tmpdir)
expected = self._write_multiple_hdfs_pq_files(tmpdir)
path = _get_hdfs_uri(tmpdir)
result = pq.read_table(path)
pdt.assert_frame_equal(result.to_pandas()
.sort_values(by='index').reset_index(drop=True),
expected.to_pandas())
@pytest.mark.parquet
def test_read_write_parquet_files_with_uri(self):
import pyarrow.parquet as pq
tmpdir = pjoin(self.tmp_path, 'uri-parquet-' + guid())
self.hdfs.mkdir(tmpdir)
path = _get_hdfs_uri(pjoin(tmpdir, 'test.parquet'))
size = 5
df = test_parquet._test_dataframe(size, seed=0)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
table = pa.Table.from_pandas(df, preserve_index=False)
pq.write_table(table, path, filesystem=self.hdfs)
result = pq.read_table(path, filesystem=self.hdfs).to_pandas()
pdt.assert_frame_equal(result, df)
@pytest.mark.parquet
def test_read_common_metadata_files(self):
tmpdir = pjoin(self.tmp_path, 'common-metadata-' + guid())
self.hdfs.mkdir(tmpdir)
test_parquet._test_read_common_metadata_files(self.hdfs, tmpdir)
@pytest.mark.parquet
def test_write_to_dataset_with_partitions(self):
tmpdir = pjoin(self.tmp_path, 'write-partitions-' + guid())
self.hdfs.mkdir(tmpdir)
test_parquet._test_write_to_dataset_with_partitions(
tmpdir, filesystem=self.hdfs)
@pytest.mark.parquet
def test_write_to_dataset_no_partitions(self):
tmpdir = pjoin(self.tmp_path, 'write-no_partitions-' + guid())
self.hdfs.mkdir(tmpdir)
test_parquet._test_write_to_dataset_no_partitions(
tmpdir, filesystem=self.hdfs)
class TestLibHdfs(HdfsTestCases, unittest.TestCase):
DRIVER = 'libhdfs'
@classmethod
def check_driver(cls):
if not pa.have_libhdfs():
pytest.skip('No libhdfs available on system')
def test_orphaned_file(self):
hdfs = hdfs_test_client()
file_path = self._make_test_file(hdfs, 'orphaned_file_test', 'fname',
b'foobarbaz')
f = hdfs.open(file_path)
hdfs = None
f = None # noqa
class TestLibHdfs3(HdfsTestCases, unittest.TestCase):
DRIVER = 'libhdfs3'
@classmethod
def check_driver(cls):
if not pa.have_libhdfs3():
pytest.skip('No libhdfs3 available on system')
def _get_hdfs_uri(path):
host = os.environ.get('ARROW_HDFS_TEST_HOST', 'localhost')
try:
port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0))
except ValueError:
raise ValueError('Env variable ARROW_HDFS_TEST_PORT was not '
'an integer')
uri = "hdfs://{}:{}{}".format(host, port, path)
return uri
@pytest.mark.parquet
@pytest.mark.fastparquet
@pytest.mark.parametrize('client', ['libhdfs', 'libhdfs3'])
def test_fastparquet_read_with_hdfs(client):
try:
import snappy # noqa
except ImportError:
pytest.skip('fastparquet test requires snappy')
import pyarrow.parquet as pq
fastparquet = pytest.importorskip('fastparquet')
fs = hdfs_test_client(client)
df = pdt.makeDataFrame()
table = pa.Table.from_pandas(df)
path = '/tmp/testing.parquet'
with fs.open(path, 'wb') as f:
pq.write_table(table, f)
parquet_file = fastparquet.ParquetFile(path, open_with=fs.open)
result = parquet_file.to_pandas()
pdt.assert_frame_equal(result, df)
|
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements Deep CFR Algorithm.
See https://arxiv.org/abs/1811.00164.
The algorithm defines an `advantage` and `strategy` networks that compute
advantages used to do regret matching across information sets and to approximate
the strategy profiles of the game. To train these networks a reservoir buffer
(other data structures may be used) memory is used to accumulate samples to
train the networks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import numpy as np
import tensorflow.compat.v1 as tf
from open_spiel.python import policy
from open_spiel.python import simple_nets
import pyspiel
# Temporarily Disable TF2 behavior until we update the code.
tf.disable_v2_behavior()
AdvantageMemory = collections.namedtuple(
"AdvantageMemory", "info_state iteration advantage action")
StrategyMemory = collections.namedtuple(
"StrategyMemory", "info_state iteration strategy_action_probs")
# TODO(author3) Refactor into data structures lib.
class ReservoirBuffer(object):
"""Allows uniform sampling over a stream of data.
This class supports the storage of arbitrary elements, such as observation
tensors, integer actions, etc.
See https://en.wikipedia.org/wiki/Reservoir_sampling for more details.
"""
def __init__(self, reservoir_buffer_capacity):
self._reservoir_buffer_capacity = reservoir_buffer_capacity
self._data = []
self._add_calls = 0
def add(self, element):
"""Potentially adds `element` to the reservoir buffer.
Args:
element: data to be added to the reservoir buffer.
"""
if len(self._data) < self._reservoir_buffer_capacity:
self._data.append(element)
else:
idx = np.random.randint(0, self._add_calls + 1)
if idx < self._reservoir_buffer_capacity:
self._data[idx] = element
self._add_calls += 1
def sample(self, num_samples):
"""Returns `num_samples` uniformly sampled from the buffer.
Args:
num_samples: `int`, number of samples to draw.
Returns:
An iterable over `num_samples` random elements of the buffer.
Raises:
ValueError: If there are less than `num_samples` elements in the buffer
"""
if len(self._data) < num_samples:
raise ValueError("{} elements could not be sampled from size {}".format(
num_samples, len(self._data)))
return random.sample(self._data, num_samples)
def clear(self):
self._data = []
self._add_calls = 0
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
class DeepCFRSolver(policy.Policy):
"""Implements a solver for the Deep CFR Algorithm.
See https://arxiv.org/abs/1811.00164.
Define all networks and sampling buffers/memories. Derive losses & learning
steps. Initialize the game state and algorithmic variables.
Note: batch sizes default to `None` implying that training over the full
dataset in memory is done by default. To sample from the memories you
may set these values to something less than the full capacity of the
memory.
"""
def __init__(self,
session,
game,
policy_network_layers=(256, 256),
advantage_network_layers=(128, 128),
num_iterations: int = 100,
num_traversals: int = 20,
learning_rate: float = 1e-4,
batch_size_advantage=None,
batch_size_strategy=None,
memory_capacity: int = int(1e6),
policy_network_train_steps: int = 1,
advantage_network_train_steps: int = 1,
reinitialize_advantage_networks: bool = True):
"""Initialize the Deep CFR algorithm.
Args:
session: (tf.Session) TensorFlow session.
game: Open Spiel game.
policy_network_layers: (list[int]) Layer sizes of strategy net MLP.
advantage_network_layers: (list[int]) Layer sizes of advantage net MLP.
num_iterations: Number of iterations.
num_traversals: Number of traversals per iteration.
learning_rate: Learning rate.
batch_size_advantage: (int or None) Batch size to sample from advantage
memories.
batch_size_strategy: (int or None) Batch size to sample from strategy
memories.
memory_capacity: Number of samples that can be stored in memory.
policy_network_train_steps: Number of policy network training steps (per
iteration).
advantage_network_train_steps: Number of advantage network training steps
(per iteration).
reinitialize_advantage_networks: Whether to re-initialize the
advantage network before training on each iteration.
"""
all_players = list(range(game.num_players()))
super(DeepCFRSolver, self).__init__(game, all_players)
self._game = game
if game.get_type().dynamics == pyspiel.GameType.Dynamics.SIMULTANEOUS:
# `_traverse_game_tree` does not take into account this option.
raise ValueError("Simulatenous games are not supported.")
self._session = session
self._batch_size_advantage = batch_size_advantage
self._batch_size_strategy = batch_size_strategy
self._policy_network_train_steps = policy_network_train_steps
self._advantage_network_train_steps = advantage_network_train_steps
self._num_players = game.num_players()
self._root_node = self._game.new_initial_state()
# TODO(author6) Allow embedding size (and network) to be specified.
self._embedding_size = len(self._root_node.information_state_tensor(0))
self._num_iterations = num_iterations
self._num_traversals = num_traversals
self._reinitialize_advantage_networks = reinitialize_advantage_networks
self._num_actions = game.num_distinct_actions()
self._iteration = 1
self._environment_steps = 0
# Create required TensorFlow placeholders to perform the Q-network updates.
self._info_state_ph = tf.placeholder(
shape=[None, self._embedding_size],
dtype=tf.float32,
name="info_state_ph")
self._info_state_action_ph = tf.placeholder(
shape=[None, self._embedding_size + 1],
dtype=tf.float32,
name="info_state_action_ph")
self._action_probs_ph = tf.placeholder(
shape=[None, self._num_actions],
dtype=tf.float32,
name="action_probs_ph")
self._iter_ph = tf.placeholder(
shape=[None, 1], dtype=tf.float32, name="iter_ph")
self._advantage_ph = []
for p in range(self._num_players):
self._advantage_ph.append(
tf.placeholder(
shape=[None, self._num_actions],
dtype=tf.float32,
name="advantage_ph_" + str(p)))
# Define strategy network, loss & memory.
self._strategy_memories = ReservoirBuffer(memory_capacity)
self._policy_network = simple_nets.MLP(self._embedding_size,
list(policy_network_layers),
self._num_actions)
action_logits = self._policy_network(self._info_state_ph)
# Illegal actions are handled in the traversal code where expected payoff
# and sampled regret is computed from the advantage networks.
self._action_probs = tf.nn.softmax(action_logits)
self._loss_policy = tf.reduce_mean(
tf.losses.mean_squared_error(
labels=tf.math.sqrt(self._iter_ph) * self._action_probs_ph,
predictions=tf.math.sqrt(self._iter_ph) * self._action_probs))
self._optimizer_policy = tf.train.AdamOptimizer(learning_rate=learning_rate)
self._learn_step_policy = self._optimizer_policy.minimize(self._loss_policy)
# Define advantage network, loss & memory. (One per player)
self._advantage_memories = [
ReservoirBuffer(memory_capacity) for _ in range(self._num_players)
]
self._advantage_networks = [
simple_nets.MLP(self._embedding_size, list(advantage_network_layers),
self._num_actions) for _ in range(self._num_players)
]
self._advantage_outputs = [
self._advantage_networks[i](self._info_state_ph)
for i in range(self._num_players)
]
self._loss_advantages = []
self._optimizer_advantages = []
self._learn_step_advantages = []
for p in range(self._num_players):
self._loss_advantages.append(
tf.reduce_mean(
tf.losses.mean_squared_error(
labels=tf.math.sqrt(self._iter_ph) * self._advantage_ph[p],
predictions=tf.math.sqrt(self._iter_ph) *
self._advantage_outputs[p])))
self._optimizer_advantages.append(
tf.train.AdamOptimizer(learning_rate=learning_rate))
self._learn_step_advantages.append(self._optimizer_advantages[p].minimize(
self._loss_advantages[p]))
@property
def advantage_buffers(self):
return self._advantage_memories
@property
def strategy_buffer(self):
return self._strategy_memories
def clear_advantage_buffers(self):
for p in range(self._num_players):
self._advantage_memories[p].clear()
def reinitialize_advantage_networks(self):
for p in range(self._num_players):
self.reinitialize_advantage_network(p)
def reinitialize_advantage_network(self, player):
self._session.run(
tf.group(*[
var.initializer
for var in self._advantage_networks[player].variables
]))
def solve(self):
"""Solution logic for Deep CFR."""
advantage_losses = collections.defaultdict(list)
for _ in range(self._num_iterations):
for p in range(self._num_players):
for _ in range(self._num_traversals):
self._traverse_game_tree(self._root_node, p)
if self._reinitialize_advantage_networks:
# Re-initialize advantage network for player and train from scratch.
self.reinitialize_advantage_network(p)
advantage_losses[p].append(self._learn_advantage_network(p))
self._iteration += 1
# Train policy network.
policy_loss = self._learn_strategy_network()
return self._policy_network, advantage_losses, policy_loss
def get_environment_steps(self):
return self._environment_steps
def _traverse_game_tree(self, state, player):
"""Performs a traversal of the game tree.
Over a traversal the advantage and strategy memories are populated with
computed advantage values and matched regrets respectively.
Args:
state: Current OpenSpiel game state.
player: (int) Player index for this traversal.
Returns:
Recursively returns expected payoffs for each action.
"""
self._environment_steps += 1
expected_payoff = collections.defaultdict(float)
if state.is_terminal():
# Terminal state get returns.
return state.returns()[player]
elif state.is_chance_node():
# If this is a chance node, sample an action
action = np.random.choice([i[0] for i in state.chance_outcomes()])
return self._traverse_game_tree(state.child(action), player)
elif state.current_player() == player:
sampled_regret = collections.defaultdict(float)
# Update the policy over the info set & actions via regret matching.
_, strategy = self._sample_action_from_advantage(state, player)
for action in state.legal_actions():
expected_payoff[action] = self._traverse_game_tree(
state.child(action), player)
cfv = 0
for a_ in state.legal_actions():
cfv += strategy[a_] * expected_payoff[a_]
for action in state.legal_actions():
sampled_regret[action] = expected_payoff[action]
sampled_regret[action] -= cfv
sampled_regret_arr = [0] * self._num_actions
for action in sampled_regret:
sampled_regret_arr[action] = sampled_regret[action]
self._advantage_memories[player].add(
AdvantageMemory(state.information_state_tensor(), self._iteration,
sampled_regret_arr, action))
return cfv
else:
other_player = state.current_player()
_, strategy = self._sample_action_from_advantage(state, other_player)
# Recompute distribution dor numerical errors.
probs = np.array(strategy)
probs /= probs.sum()
sampled_action = np.random.choice(range(self._num_actions), p=probs)
self._strategy_memories.add(
StrategyMemory(
state.information_state_tensor(other_player), self._iteration,
strategy))
return self._traverse_game_tree(state.child(sampled_action), player)
def _sample_action_from_advantage(self, state, player):
"""Returns an info state policy by applying regret-matching.
Args:
state: Current OpenSpiel game state.
player: (int) Player index over which to compute regrets.
Returns:
1. (list) Advantage values for info state actions indexed by action.
2. (list) Matched regrets, prob for actions indexed by action.
"""
info_state = state.information_state_tensor(player)
legal_actions = state.legal_actions(player)
advantages_full = self._session.run(
self._advantage_outputs[player],
feed_dict={self._info_state_ph: np.expand_dims(info_state, axis=0)})[0]
advantages = [max(0., advantage) for advantage in advantages_full]
cumulative_regret = np.sum([advantages[action] for action in legal_actions])
matched_regrets = np.array([0.] * self._num_actions)
if cumulative_regret > 0.:
for action in legal_actions:
matched_regrets[action] = advantages[action] / cumulative_regret
else:
matched_regrets[max(legal_actions, key=lambda a: advantages_full[a])] = 1
return advantages, matched_regrets
def action_probabilities(self, state):
"""Returns action probabilities dict for a single batch."""
cur_player = state.current_player()
legal_actions = state.legal_actions(cur_player)
info_state_vector = np.array(state.information_state_tensor())
if len(info_state_vector.shape) == 1:
info_state_vector = np.expand_dims(info_state_vector, axis=0)
probs = self._session.run(
self._action_probs, feed_dict={self._info_state_ph: info_state_vector})
return {action: probs[0][action] for action in legal_actions}
def _learn_advantage_network(self, player):
"""Compute the loss on sampled transitions and perform a Q-network update.
If there are not enough elements in the buffer, no loss is computed and
`None` is returned instead.
Args:
player: (int) player index.
Returns:
The average loss over the advantage network.
"""
for _ in range(self._advantage_network_train_steps):
if self._batch_size_advantage:
if self._batch_size_advantage > len(self._advantage_memories[player]):
## Skip if there aren't enough samples
return None
samples = self._advantage_memories[player].sample(
self._batch_size_advantage)
else:
samples = self._advantage_memories[player]
info_states = []
advantages = []
iterations = []
for s in samples:
info_states.append(s.info_state)
advantages.append(s.advantage)
iterations.append([s.iteration])
# Ensure some samples have been gathered.
if not info_states:
return None
loss_advantages, _ = self._session.run(
[self._loss_advantages[player], self._learn_step_advantages[player]],
feed_dict={
self._info_state_ph: np.array(info_states),
self._advantage_ph[player]: np.array(advantages),
self._iter_ph: np.array(iterations),
})
return loss_advantages
def _learn_strategy_network(self):
"""Compute the loss over the strategy network.
Returns:
The average loss obtained on this batch of transitions or `None`.
"""
for _ in range(self._policy_network_train_steps):
if self._batch_size_strategy:
if self._batch_size_strategy > len(self._strategy_memories):
## Skip if there aren't enough samples
return None
samples = self._strategy_memories.sample(self._batch_size_strategy)
else:
samples = self._strategy_memories
info_states = []
action_probs = []
iterations = []
for s in samples:
info_states.append(s.info_state)
action_probs.append(s.strategy_action_probs)
iterations.append([s.iteration])
loss_strategy, _ = self._session.run(
[self._loss_policy, self._learn_step_policy],
feed_dict={
self._info_state_ph: np.array(info_states),
self._action_probs_ph: np.array(np.squeeze(action_probs)),
self._iter_ph: np.array(iterations),
})
return loss_strategy
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.