gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes extension."""
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
from nova import flags
from nova.openstack.common import log as logging
from nova import volume
from nova.volume import volume_types
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
authorize = extensions.extension_authorizer('compute', 'volumes')
def _translate_volume_detail_view(context, vol):
"""Maps keys for volumes details view."""
d = _translate_volume_summary_view(context, vol)
# No additional data / lookups at the moment
return d
def _translate_volume_summary_view(context, vol):
"""Maps keys for volumes summary view."""
d = {}
d['id'] = vol['id']
d['status'] = vol['status']
d['size'] = vol['size']
d['availabilityZone'] = vol['availability_zone']
d['createdAt'] = vol['created_at']
if vol['attach_status'] == 'attached':
d['attachments'] = [_translate_attachment_detail_view(vol['id'],
vol['instance_uuid'],
vol['mountpoint'])]
else:
d['attachments'] = [{}]
d['displayName'] = vol['display_name']
d['displayDescription'] = vol['display_description']
if vol['volume_type_id'] and vol.get('volume_type'):
d['volumeType'] = vol['volume_type']['name']
else:
d['volumeType'] = vol['volume_type_id']
d['snapshotId'] = vol['snapshot_id']
LOG.audit(_("vol=%s"), vol, context=context)
if vol.get('volume_metadata'):
meta_dict = {}
for i in vol['volume_metadata']:
meta_dict[i['key']] = i['value']
d['metadata'] = meta_dict
else:
d['metadata'] = {}
return d
def make_volume(elem):
elem.set('id')
elem.set('status')
elem.set('size')
elem.set('availabilityZone')
elem.set('createdAt')
elem.set('displayName')
elem.set('displayDescription')
elem.set('volumeType')
elem.set('snapshotId')
attachments = xmlutil.SubTemplateElement(elem, 'attachments')
attachment = xmlutil.SubTemplateElement(attachments, 'attachment',
selector='attachments')
make_attachment(attachment)
metadata = xmlutil.make_flat_dict('metadata')
elem.append(metadata)
class VolumeTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volume', selector='volume')
make_volume(root)
return xmlutil.MasterTemplate(root, 1)
class VolumesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumes')
elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes')
make_volume(elem)
return xmlutil.MasterTemplate(root, 1)
class VolumeController(object):
"""The Volumes API controller for the OpenStack API."""
def __init__(self):
self.volume_api = volume.API()
super(VolumeController, self).__init__()
@wsgi.serializers(xml=VolumeTemplate)
def show(self, req, id):
"""Return data about the given volume."""
context = req.environ['nova.context']
authorize(context)
try:
vol = self.volume_api.get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
return {'volume': _translate_volume_detail_view(context, vol)}
def delete(self, req, id):
"""Delete a volume."""
context = req.environ['nova.context']
authorize(context)
LOG.audit(_("Delete volume with id: %s"), id, context=context)
try:
volume = self.volume_api.get(context, id)
self.volume_api.delete(context, volume)
except exception.NotFound:
raise exc.HTTPNotFound()
return webob.Response(status_int=202)
@wsgi.serializers(xml=VolumesTemplate)
def index(self, req):
"""Returns a summary list of volumes."""
return self._items(req, entity_maker=_translate_volume_summary_view)
@wsgi.serializers(xml=VolumesTemplate)
def detail(self, req):
"""Returns a detailed list of volumes."""
return self._items(req, entity_maker=_translate_volume_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of volumes, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
volumes = self.volume_api.get_all(context)
limited_list = common.limited(volumes, req)
res = [entity_maker(context, vol) for vol in limited_list]
return {'volumes': res}
@wsgi.serializers(xml=VolumeTemplate)
def create(self, req, body):
"""Creates a new volume."""
context = req.environ['nova.context']
authorize(context)
if not body:
raise exc.HTTPUnprocessableEntity()
vol = body['volume']
size = vol['size']
LOG.audit(_("Create volume of %s GB"), size, context=context)
vol_type = vol.get('volume_type', None)
if vol_type:
try:
vol_type = volume_types.get_volume_type_by_name(context,
vol_type)
except exception.NotFound:
raise exc.HTTPNotFound()
metadata = vol.get('metadata', None)
snapshot_id = vol.get('snapshot_id')
if snapshot_id is not None:
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
else:
snapshot = None
availability_zone = vol.get('availability_zone', None)
new_volume = self.volume_api.create(context,
size,
vol.get('display_name'),
vol.get('display_description'),
snapshot=snapshot,
volume_type=vol_type,
metadata=metadata,
availability_zone=availability_zone
)
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
retval = _translate_volume_detail_view(context, dict(new_volume))
result = {'volume': retval}
location = '%s/%s' % (req.url, new_volume['id'])
return wsgi.ResponseObject(result, headers=dict(location=location))
def _translate_attachment_detail_view(volume_id, instance_uuid, mountpoint):
"""Maps keys for attachment details view."""
d = _translate_attachment_summary_view(volume_id,
instance_uuid,
mountpoint)
# No additional data / lookups at the moment
return d
def _translate_attachment_summary_view(volume_id, instance_uuid, mountpoint):
"""Maps keys for attachment summary view."""
d = {}
# NOTE(justinsb): We use the volume id as the id of the attachment object
d['id'] = volume_id
d['volumeId'] = volume_id
d['serverId'] = instance_uuid
if mountpoint:
d['device'] = mountpoint
return d
def make_attachment(elem):
elem.set('id')
elem.set('serverId')
elem.set('volumeId')
elem.set('device')
class VolumeAttachmentTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumeAttachment',
selector='volumeAttachment')
make_attachment(root)
return xmlutil.MasterTemplate(root, 1)
class VolumeAttachmentsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumeAttachments')
elem = xmlutil.SubTemplateElement(root, 'volumeAttachment',
selector='volumeAttachments')
make_attachment(elem)
return xmlutil.MasterTemplate(root, 1)
class VolumeAttachmentController(object):
"""The volume attachment API controller for the OpenStack API.
A child resource of the server. Note that we use the volume id
as the ID of the attachment (though this is not guaranteed externally)
"""
def __init__(self):
self.compute_api = compute.API()
super(VolumeAttachmentController, self).__init__()
@wsgi.serializers(xml=VolumeAttachmentsTemplate)
def index(self, req, server_id):
"""Returns the list of volume attachments for a given instance."""
return self._items(req, server_id,
entity_maker=_translate_attachment_summary_view)
@wsgi.serializers(xml=VolumeAttachmentTemplate)
def show(self, req, server_id, id):
"""Return data about the given volume attachment."""
context = req.environ['nova.context']
authorize(context)
volume_id = id
try:
instance = self.compute_api.get(context, server_id)
except exception.NotFound:
raise exc.HTTPNotFound()
bdms = self.compute_api.get_instance_bdms(context, instance)
if not bdms:
LOG.debug(_("Instance %s is not attached."), server_id)
raise exc.HTTPNotFound()
assigned_mountpoint = None
for bdm in bdms:
if bdm['volume_id'] == volume_id:
assigned_mountpoint = bdm['device_name']
break
if assigned_mountpoint is None:
LOG.debug("volume_id not found")
raise exc.HTTPNotFound()
return {'volumeAttachment': _translate_attachment_detail_view(
volume_id,
instance['uuid'],
assigned_mountpoint)}
@wsgi.serializers(xml=VolumeAttachmentTemplate)
def create(self, req, server_id, body):
"""Attach a volume to an instance."""
context = req.environ['nova.context']
authorize(context)
if not body:
raise exc.HTTPUnprocessableEntity()
volume_id = body['volumeAttachment']['volumeId']
device = body['volumeAttachment']['device']
msg = _("Attach volume %(volume_id)s to instance %(server_id)s"
" at %(device)s") % locals()
LOG.audit(msg, context=context)
try:
instance = self.compute_api.get(context, server_id)
self.compute_api.attach_volume(context, instance,
volume_id, device)
except exception.NotFound:
raise exc.HTTPNotFound()
# The attach is async
attachment = {}
attachment['id'] = volume_id
attachment['volumeId'] = volume_id
# NOTE(justinsb): And now, we have a problem...
# The attach is async, so there's a window in which we don't see
# the attachment (until the attachment completes). We could also
# get problems with concurrent requests. I think we need an
# attachment state, and to write to the DB here, but that's a bigger
# change.
# For now, we'll probably have to rely on libraries being smart
# TODO(justinsb): How do I return "accepted" here?
return {'volumeAttachment': attachment}
def update(self, req, server_id, id, body):
"""Update a volume attachment. We don't currently support this."""
raise exc.HTTPBadRequest()
def delete(self, req, server_id, id):
"""Detach a volume from an instance."""
context = req.environ['nova.context']
authorize(context)
volume_id = id
LOG.audit(_("Detach volume %s"), volume_id, context=context)
try:
instance = self.compute_api.get(context, server_id)
except exception.NotFound:
raise exc.HTTPNotFound()
bdms = self.compute_api.get_instance_bdms(context, instance)
if not bdms:
LOG.debug(_("Instance %s is not attached."), server_id)
raise exc.HTTPNotFound()
found = False
for bdm in bdms:
if bdm['volume_id'] == volume_id:
self.compute_api.detach_volume(context,
volume_id=volume_id)
found = True
break
if not found:
raise exc.HTTPNotFound()
else:
return webob.Response(status_int=202)
def _items(self, req, server_id, entity_maker):
"""Returns a list of attachments, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
try:
instance = self.compute_api.get(context, server_id)
except exception.NotFound:
raise exc.HTTPNotFound()
bdms = self.compute_api.get_instance_bdms(context, instance)
limited_list = common.limited(bdms, req)
results = []
for bdm in limited_list:
if bdm['volume_id']:
results.append(entity_maker(bdm['volume_id'],
bdm['instance_uuid'],
bdm['device_name']))
return {'volumeAttachments': results}
def _translate_snapshot_detail_view(context, vol):
"""Maps keys for snapshots details view."""
d = _translate_snapshot_summary_view(context, vol)
# NOTE(gagupta): No additional data / lookups at the moment
return d
def _translate_snapshot_summary_view(context, vol):
"""Maps keys for snapshots summary view."""
d = {}
d['id'] = vol['id']
d['volumeId'] = vol['volume_id']
d['status'] = vol['status']
# NOTE(gagupta): We map volume_size as the snapshot size
d['size'] = vol['volume_size']
d['createdAt'] = vol['created_at']
d['displayName'] = vol['display_name']
d['displayDescription'] = vol['display_description']
return d
def make_snapshot(elem):
elem.set('id')
elem.set('status')
elem.set('size')
elem.set('createdAt')
elem.set('displayName')
elem.set('displayDescription')
elem.set('volumeId')
class SnapshotTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('snapshot', selector='snapshot')
make_snapshot(root)
return xmlutil.MasterTemplate(root, 1)
class SnapshotsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('snapshots')
elem = xmlutil.SubTemplateElement(root, 'snapshot',
selector='snapshots')
make_snapshot(elem)
return xmlutil.MasterTemplate(root, 1)
class SnapshotController(object):
"""The Volumes API controller for the OpenStack API."""
def __init__(self):
self.volume_api = volume.API()
super(SnapshotController, self).__init__()
@wsgi.serializers(xml=SnapshotTemplate)
def show(self, req, id):
"""Return data about the given snapshot."""
context = req.environ['nova.context']
authorize(context)
try:
vol = self.volume_api.get_snapshot(context, id)
except exception.NotFound:
return exc.HTTPNotFound()
return {'snapshot': _translate_snapshot_detail_view(context, vol)}
def delete(self, req, id):
"""Delete a snapshot."""
context = req.environ['nova.context']
authorize(context)
LOG.audit(_("Delete snapshot with id: %s"), id, context=context)
try:
snapshot = self.volume_api.get_snapshot(context, id)
self.volume_api.delete_snapshot(context, snapshot)
except exception.NotFound:
return exc.HTTPNotFound()
return webob.Response(status_int=202)
@wsgi.serializers(xml=SnapshotsTemplate)
def index(self, req):
"""Returns a summary list of snapshots."""
return self._items(req, entity_maker=_translate_snapshot_summary_view)
@wsgi.serializers(xml=SnapshotsTemplate)
def detail(self, req):
"""Returns a detailed list of snapshots."""
return self._items(req, entity_maker=_translate_snapshot_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of snapshots, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
snapshots = self.volume_api.get_all_snapshots(context)
limited_list = common.limited(snapshots, req)
res = [entity_maker(context, snapshot) for snapshot in limited_list]
return {'snapshots': res}
@wsgi.serializers(xml=SnapshotTemplate)
def create(self, req, body):
"""Creates a new snapshot."""
context = req.environ['nova.context']
authorize(context)
if not body:
return exc.HTTPUnprocessableEntity()
snapshot = body['snapshot']
volume_id = snapshot['volume_id']
volume = self.volume_api.get(context, volume_id)
force = snapshot.get('force', False)
LOG.audit(_("Create snapshot from volume %s"), volume_id,
context=context)
if force:
new_snapshot = self.volume_api.create_snapshot_force(context,
volume,
snapshot.get('display_name'),
snapshot.get('display_description'))
else:
new_snapshot = self.volume_api.create_snapshot(context,
volume,
snapshot.get('display_name'),
snapshot.get('display_description'))
retval = _translate_snapshot_detail_view(context, new_snapshot)
return {'snapshot': retval}
class Volumes(extensions.ExtensionDescriptor):
"""Volumes support"""
name = "Volumes"
alias = "os-volumes"
namespace = "http://docs.openstack.org/compute/ext/volumes/api/v1.1"
updated = "2011-03-25T00:00:00+00:00"
def get_resources(self):
resources = []
# NOTE(justinsb): No way to provide singular name ('volume')
# Does this matter?
res = extensions.ResourceExtension('os-volumes',
VolumeController(),
collection_actions={'detail': 'GET'})
resources.append(res)
res = extensions.ResourceExtension('os-volume_attachments',
VolumeAttachmentController(),
parent=dict(
member_name='server',
collection_name='servers'))
resources.append(res)
res = extensions.ResourceExtension('os-volumes_boot',
inherits='servers')
resources.append(res)
res = extensions.ResourceExtension('os-snapshots',
SnapshotController(),
collection_actions={'detail': 'GET'})
resources.append(res)
return resources
|
|
# -*- test-case-name: go.routers.app_multiplexer.tests.test_vumi_app -*-
import json
from twisted.internet.defer import inlineCallbacks
from vumi import log
from vumi.config import ConfigDict, ConfigList, ConfigInt, ConfigText
from vumi.components.session import SessionManager
from vumi.message import TransportUserMessage
from go.vumitools.app_worker import GoRouterWorker
from go.routers.app_multiplexer.common import mkmenu, clean
class ApplicationMultiplexerConfig(GoRouterWorker.CONFIG_CLASS):
# Static configuration
session_expiry = ConfigInt(
"Maximum amount of time in seconds to keep session data around",
default=300, static=True)
# Dynamic, per-message configuration
menu_title = ConfigDict(
"Content for the menu title",
default={'content': "Please select a choice."})
entries = ConfigList(
"A list of application endpoints and associated labels",
default=[])
invalid_input_message = ConfigText(
"Prompt to display when warning about an invalid choice",
default=("That is an incorrect choice. Please enter the number "
"of the menu item you wish to choose.\n\n 1) Try Again"))
error_message = ConfigText(
("Prompt to display when a configuration change invalidates "
"an active session."),
default=("Oops! We experienced a temporary error. "
"Please try and dial the line again."))
class ApplicationMultiplexer(GoRouterWorker):
"""
Router that multiplexes between different endpoints on the outbound path.
State Diagram (for fun):
+----------------+
| |
| start |
| |
+----+-----------+
|
|
+----*-----------+ +----------------+
| *----+ |
| select | | bad_input |
| +----* |
+----+----*------+ +----------------+
| |
| |
+----*----+------+
| |
| selected +
| |
+----------------+
"""
CONFIG_CLASS = ApplicationMultiplexerConfig
worker_name = 'application_multiplexer'
STATE_START = "start"
STATE_SELECT = "select"
STATE_SELECTED = "selected"
STATE_BAD_INPUT = "bad_input"
def setup_router(self):
self.handlers = {
self.STATE_START: self.handle_state_start,
self.STATE_SELECT: self.handle_state_select,
self.STATE_SELECTED: self.handle_state_selected,
self.STATE_BAD_INPUT: self.handle_state_bad_input,
}
return super(ApplicationMultiplexer, self).setup_router()
def session_manager(self, config):
key_prefix = ':'.join((self.worker_name, config.router.key))
redis = self.redis.sub_manager(key_prefix)
return SessionManager(redis, max_session_length=config.session_expiry)
def target_endpoints(self, config):
"""
Make sure the currently active endpoint is still valid.
"""
return set([entry['endpoint'] for entry in config.entries])
@inlineCallbacks
def handle_inbound(self, config, msg, conn_name):
"""
Main delegation point for handling inbound messages and
managing the state machine.
"""
log.msg("Processing inbound message: %s" % (msg,))
user_id = msg['from_addr']
session_manager = yield self.session_manager(config)
session = yield session_manager.load_session(user_id)
session_event = msg['session_event']
if not session or session_event == TransportUserMessage.SESSION_NEW:
log.msg("Creating session for user %s" % user_id)
session = {}
state = self.STATE_START
yield session_manager.create_session(user_id, state=state)
elif session_event == TransportUserMessage.SESSION_CLOSE:
yield self.handle_session_close(config, session, msg)
return
else:
log.msg("Loading session for user %s: %s" % (user_id, session,))
state = session['state']
try:
# We must assume the state handlers might be async, even if the
# current implementations aren't. There is at least one test that
# depends on asynchrony here to hook into the state transition.
state_resp = yield self.handlers[state](config, session, msg)
if state_resp.next_state is None:
# Session terminated (right now, just in the case of a
# administrator-initiated configuration change
yield session_manager.clear_session(user_id)
else:
session['state'] = state_resp.next_state
session.update(state_resp.session_update)
if state != state_resp.next_state:
log.msg("State transition for user %s: %s => %s" %
(user_id, state, state_resp.next_state))
yield session_manager.save_session(user_id, session)
for msg, endpoint in state_resp.inbound:
yield self.publish_inbound(msg, endpoint)
for msg in state_resp.outbound:
yield self.publish_outbound(msg)
except:
log.err()
yield session_manager.clear_session(user_id)
yield self.publish_outbound(self.make_error_reply(msg, config))
def handle_state_start(self, config, session, msg):
"""
When presenting the menu, we also store the list of endpoints
in the session data. Later, in the SELECT state, we load
these endpoints and retrieve the candidate endpoint based
on the user's menu choice.
"""
reply_msg = msg.reply(self.create_menu(config))
endpoints = json.dumps(
[entry['endpoint'] for entry in config.entries]
)
return StateResponse(
self.STATE_SELECT, {'endpoints': endpoints}, outbound=[reply_msg])
def handle_state_select(self, config, session, msg):
endpoint = self.get_endpoint_for_choice(msg, session)
if endpoint is None:
reply_msg = msg.reply(config.invalid_input_message)
return StateResponse(self.STATE_BAD_INPUT, outbound=[reply_msg])
if endpoint not in self.target_endpoints(config):
log.msg(("Router configuration change forced session "
"termination for user %s" % msg['from_addr']))
error_reply_msg = self.make_error_reply(msg, config)
return StateResponse(None, outbound=[error_reply_msg])
forwarded_msg = self.forwarded_message(
msg, content=None,
session_event=TransportUserMessage.SESSION_NEW)
log.msg("Switched to endpoint '%s' for user %s" %
(endpoint, msg['from_addr']))
return StateResponse(
self.STATE_SELECTED, {'active_endpoint': endpoint},
inbound=[(forwarded_msg, endpoint)])
def handle_state_selected(self, config, session, msg):
active_endpoint = session['active_endpoint']
if active_endpoint not in self.target_endpoints(config):
log.msg(("Router configuration change forced session "
"termination for user %s" % msg['from_addr']))
error_reply_msg = self.make_error_reply(msg, config)
return StateResponse(None, outbound=[error_reply_msg])
else:
return StateResponse(
self.STATE_SELECTED, inbound=[(msg, active_endpoint)])
def handle_state_bad_input(self, config, session, msg):
choice = self.get_menu_choice(msg, (1, 1))
if choice is None:
reply_msg = msg.reply(config.invalid_input_message)
return StateResponse(self.STATE_BAD_INPUT, outbound=[reply_msg])
else:
return self.handle_state_start(config, session, msg)
@inlineCallbacks
def handle_outbound(self, config, msg, conn_name):
log.msg("Processing outbound message: %s" % (msg,))
user_id = msg['to_addr']
session_event = msg['session_event']
session_manager = yield self.session_manager(config)
session = yield session_manager.load_session(user_id)
if session and (session_event == TransportUserMessage.SESSION_CLOSE):
yield session_manager.clear_session(user_id)
yield self.publish_outbound(msg)
@inlineCallbacks
def handle_session_close(self, config, session, msg):
user_id = msg['from_addr']
if (session.get('state', None) == self.STATE_SELECTED and
session['active_endpoint'] in self.target_endpoints(config)):
yield self.publish_inbound(msg, session['active_endpoint'])
session_manager = yield self.session_manager(config)
yield session_manager.clear_session(user_id)
def publish_outbound(self, msg):
return super(ApplicationMultiplexer, self).publish_outbound(
msg, "default")
def make_error_reply(self, msg, config):
return msg.reply(config.error_message, continue_session=False)
def forwarded_message(self, msg, **kwargs):
copy = TransportUserMessage(**msg.payload)
for k, v in kwargs.items():
copy[k] = v
return copy
def get_endpoint_for_choice(self, msg, session):
"""
Retrieves the candidate endpoint based on the user's numeric choice
"""
endpoints = json.loads(session['endpoints'])
index = self.get_menu_choice(msg, (1, len(endpoints)))
if index is None:
return None
return endpoints[index - 1]
def get_menu_choice(self, msg, valid_range):
"""
Parse user input for selecting a numeric menu choice
"""
try:
value = int(clean(msg['content']))
except ValueError:
return None
else:
if value not in range(valid_range[0], valid_range[1] + 1):
return None
return value
def create_menu(self, config):
labels = [entry['label'] for entry in config.entries]
return (config.menu_title['content'] + "\n" + mkmenu(labels))
class StateResponse(object):
def __init__(self, state, session_update=None, inbound=(), outbound=()):
self.next_state = state
self.session_update = session_update or {}
self.inbound = inbound
self.outbound = outbound
|
|
"""
Custom `Screen` class for the `pyte` library.
Changes compared to the original `Screen` class:
- We store the layout in a prompt_toolkit.layout.screen.Screen instance.
This allows fast rendering in a prompt_toolkit user control.
- 256 colour and true color support.
- CPR support and device attributes.
"""
from __future__ import unicode_literals
from collections import defaultdict
from pyte import charsets as cs
from pyte import modes as mo
from pyte.screens import Margins
from six.moves import range
from prompt_toolkit.cache import FastDictCache
from prompt_toolkit.layout.screen import Screen, Char
from prompt_toolkit.styles import Attrs
from prompt_toolkit.output.vt100 import FG_ANSI_COLORS, BG_ANSI_COLORS
from prompt_toolkit.output.vt100 import _256_colors as _256_colors_table
from collections import namedtuple
__all__ = (
'BetterScreen',
)
class CursorPosition(object):
" Mutable CursorPosition. "
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __repr__(self):
return 'pymux.CursorPosition(x=%r, y=%r)' % (self.x, self.y)
# Intern dictionary for interning unicode strings. This should save memory and
# make our cache faster.
_intern_dict = {}
def unicode_intern(text):
try:
return _intern_dict[text]
except KeyError:
_intern_dict[text] = text
return text
# Cache for Char objects.
_CHAR_CACHE = FastDictCache(Char, size=1000 * 1000)
# Custom Savepoint that also stores the Attrs.
_Savepoint = namedtuple("_Savepoint", [
'cursor_x',
'cursor_y',
'g0_charset',
'g1_charset',
'charset',
'origin',
'wrap',
'attrs',
'style_str',
])
class BetterScreen(object):
"""
Custom screen class. Most of the methods are called from a vt100 Pyte
stream.
The data buffer is stored in a :class:`prompt_toolkit.layout.screen.Screen`
class, because this way, we can send it to the renderer without any
transformation.
"""
swap_variables = [
'mode',
'margins',
'charset',
'g0_charset',
'g1_charset',
'tabstops',
'data_buffer',
'pt_cursor_position',
'max_y',
]
def __init__(self, lines, columns, write_process_input, bell_func=None,
get_history_limit=None):
assert isinstance(lines, int)
assert isinstance(columns, int)
assert callable(write_process_input)
assert bell_func is None or callable(bell_func)
assert get_history_limit is None or callable(get_history_limit)
bell_func = bell_func or (lambda: None)
get_history_limit = get_history_limit or (lambda: 2000)
self._history_cleanup_counter = 0
self.savepoints = []
self.lines = lines
self.columns = columns
self.write_process_input = write_process_input
self.bell_func = bell_func
self.get_history_limit = get_history_limit
self.reset()
@property
def in_application_mode(self):
"""
True when we are in application mode. This means that the process is
expecting some other key sequences as input. (Like for the arrows.)
"""
# Not in cursor mode.
return (1 << 5) in self.mode
@property
def mouse_support_enabled(self):
" True when mouse support has been enabled by the application. "
return (1000 << 5) in self.mode
@property
def urxvt_mouse_support_enabled(self):
return (1015 << 5) in self.mode
@property
def sgr_mouse_support_enabled(self):
" Xterm Sgr mouse support. "
return (1006 << 5) in self.mode
@property
def bracketed_paste_enabled(self):
return (2004 << 5) in self.mode
@property
def has_reverse_video(self):
" The whole screen is set to reverse video. "
return mo.DECSCNM in self.mode
def reset(self):
"""Resets the terminal to its initial state.
* Scroll margins are reset to screen boundaries.
* Cursor is moved to home location -- ``(0, 0)`` and its
attributes are set to defaults (see :attr:`default_char`).
* Screen is cleared -- each character is reset to
:attr:`default_char`.
* Tabstops are reset to "every eight columns".
.. note::
Neither VT220 nor VT102 manuals mentioned that terminal modes
and tabstops should be reset as well, thanks to
:manpage:`xterm` -- we now know that.
"""
self._reset_screen()
self.title = ''
self.icon_name = ''
# Reset modes.
self.mode = set([
mo.DECAWM, # Autowrap mode. (default: disabled).
mo.DECTCEM # Text cursor enable mode. (default enabled).
])
# According to VT220 manual and ``linux/drivers/tty/vt.c``
# the default G0 charset is latin-1, but for reasons unknown
# latin-1 breaks ascii-graphics; so G0 defaults to cp437.
# XXX: The comment above comes from the original Pyte implementation,
# it seems for us that LAT1_MAP should indeed be the default, if
# not a French version of Vim would incorrectly show some
# characters.
self.charset = 0
# self.g0_charset = cs.IBMPC_MAP
self.g0_charset = cs.LAT1_MAP
self.g1_charset = cs.VT100_MAP
# From ``man terminfo`` -- "... hardware tabs are initially
# set every `n` spaces when the terminal is powered up. Since
# we aim to support VT102 / VT220 and linux -- we use n = 8.
# (We choose to create tab stops until x=1000, because we keep the
# tab stops when the screen increases in size. The OS X 'ls' command
# relies on the stops to be there.)
self.tabstops = set(range(8, 1000, 8))
# The original Screen instance, when going to the alternate screen.
self._original_screen = None
def _reset_screen(self):
""" Reset the Screen content. (also called when switching from/to
alternate buffer. """
self.pt_screen = Screen(default_char=Char(' ', '')) # TODO: maybe stop using this Screen class.
self.pt_screen.cursor_position = CursorPosition(0, 0)
self.pt_screen.show_cursor = True
self.data_buffer = self.pt_screen.data_buffer
self.pt_cursor_position = self.pt_screen.cursor_position
self.wrapped_lines = [] # List of line indexes that were wrapped.
self._attrs = Attrs(
color=None, bgcolor=None, bold=False, underline=False,
italic=False, blink=False, reverse=False, hidden=False)
self._style_str = ''
self.margins = None
self.max_y = 0 # Max 'y' position to which is written.
def resize(self, lines=None, columns=None):
# Save the dimensions.
lines = lines if lines is not None else self.lines
columns = columns if columns is not None else self.columns
if self.lines != lines or self.columns != columns:
self.lines = lines
self.columns = columns
self._reset_offset_and_margins()
# If the height was reduced, and there are lines below
# `cursor_position_y+lines`. Remove them by setting 'max_y'.
# (If we don't do this. Clearing the screen, followed by reducing
# the height will keep the cursor at the top, hiding some content.)
self.max_y = min(
self.max_y,
self.pt_cursor_position.y + lines - 1)
self._reflow()
@property
def line_offset(self):
" Return the index of the first visible line. "
cpos_y = self.pt_cursor_position.y
# NOTE: the +1 is required because `max_y` starts counting at 0 for the
# first line, while `self.lines` is the number of lines, starting
# at 1 for one line. The offset refers to the index of the first
# visible line.
# For instance, if we have: max_y=14 and lines=15. Then all lines
# from 0..14 have been used. This means 15 lines are used, and
# the first index should be 0.
return max(0, min(cpos_y, self.max_y - self.lines + 1))
def set_margins(self, top=None, bottom=None):
"""Selects top and bottom margins for the scrolling region.
Margins determine which screen lines move during scrolling
(see :meth:`index` and :meth:`reverse_index`). Characters added
outside the scrolling region do not cause the screen to scroll.
:param int top: the smallest line number that is scrolled.
:param int bottom: the biggest line number that is scrolled.
"""
if top is None and bottom is None:
return
margins = self.margins or Margins(0, self.lines - 1)
top = margins.top if top is None else top - 1
bottom = margins.bottom if bottom is None else bottom - 1
# Arguments are 1-based, while :attr:`margins` are zero based --
# so we have to decrement them by one. We also make sure that
# both of them is bounded by [0, lines - 1].
top = max(0, min(top, self.lines - 1))
bottom = max(0, min(bottom, self.lines - 1))
# Even though VT102 and VT220 require DECSTBM to ignore regions
# of width less than 2, some programs (like aptitude for example)
# rely on it. Practicality beats purity.
if bottom - top >= 1:
self.margins = Margins(top, bottom)
# The cursor moves to the home position when the top and
# bottom margins of the scrolling region (DECSTBM) changes.
self.cursor_position()
def _reset_offset_and_margins(self):
"""
Recalculate offset and move cursor (make sure that the bottom is
visible.)
"""
self.margins = None
def set_charset(self, code, mode):
"""Set active ``G0`` or ``G1`` charset.
:param str code: character set code, should be a character
from ``"B0UK"`` -- otherwise ignored.
:param str mode: if ``"("`` ``G0`` charset is set, if
``")"`` -- we operate on ``G1``.
.. warning:: User-defined charsets are currently not supported.
"""
if code in cs.MAPS:
charset_map = cs.MAPS[code]
if mode == '(':
self.g0_charset = charset_map
elif mode == ')':
self.g1_charset = charset_map
def set_mode(self, *modes, **kwargs):
# Private mode codes are shifted, to be distingiushed from non
# private ones.
if kwargs.get("private"):
modes = [mode << 5 for mode in modes]
self.mode.update(modes)
# When DECOLM mode is set, the screen is erased and the cursor
# moves to the home position.
if mo.DECCOLM in modes:
self.resize(columns=132)
self.erase_in_display(2)
self.cursor_position()
# According to `vttest`, DECOM should also home the cursor, see
# vttest/main.c:303.
if mo.DECOM in modes:
self.cursor_position()
# Make the cursor visible.
if mo.DECTCEM in modes:
self.pt_screen.show_cursor = True
# On "\e[?1049h", enter alternate screen mode. Backup the current state,
if (1049 << 5) in modes:
self._original_screen = self.pt_screen
self._original_screen_vars = \
dict((v, getattr(self, v)) for v in self.swap_variables)
self._reset_screen()
self._reset_offset_and_margins()
def reset_mode(self, *modes, **kwargs):
"""Resets (disables) a given list of modes.
:param list modes: modes to reset -- hopefully, each mode is a
constant from :mod:`pyte.modes`.
"""
# Private mode codes are shifted, to be distingiushed from non
# private ones.
if kwargs.get("private"):
modes = [mode << 5 for mode in modes]
self.mode.difference_update(modes)
# Lines below follow the logic in :meth:`set_mode`.
if mo.DECCOLM in modes:
self.resize(columns=80)
self.erase_in_display(2)
self.cursor_position()
if mo.DECOM in modes:
self.cursor_position()
# Hide the cursor.
if mo.DECTCEM in modes:
self.pt_screen.show_cursor = False
# On "\e[?1049l", restore from alternate screen mode.
if (1049 << 5) in modes and self._original_screen:
for k, v in self._original_screen_vars.items():
setattr(self, k, v)
self.pt_screen = self._original_screen
self._original_screen = None
self._original_screen_vars = {}
self._reset_offset_and_margins()
@property
def in_alternate_screen(self):
return bool(self._original_screen)
def shift_in(self):
" Activates ``G0`` character set. "
self.charset = 0
def shift_out(self):
" Activates ``G1`` character set. "
self.charset = 1
def draw(self, chars):
"""
Draw characters.
`chars` is supposed to *not* contain any special characters.
No newlines or control codes.
"""
# Aliases for variables that are used more than once in this function.
# Local lookups are always faster.
# (This draw function is called for every printable character that a
# process outputs; it should be as performant as possible.)
pt_screen = self.pt_screen
data_buffer = pt_screen.data_buffer
cursor_position = pt_screen.cursor_position
cursor_position_x = cursor_position.x
cursor_position_y = cursor_position.y
in_irm = mo.IRM in self.mode
char_cache = _CHAR_CACHE
columns = self.columns
# Translating a given character.
if self.charset:
chars = chars.translate(self.g1_charset)
else:
chars = chars.translate(self.g0_charset)
style = self._style_str
for char in chars:
# Create 'Char' instance.
pt_char = char_cache[char, style]
char_width = pt_char.width
# If this was the last column in a line and auto wrap mode is
# enabled, move the cursor to the beginning of the next line,
# otherwise replace characters already displayed with newly
# entered.
if cursor_position_x >= columns:
if mo.DECAWM in self.mode:
self.carriage_return()
self.linefeed()
cursor_position = pt_screen.cursor_position
cursor_position_x = cursor_position.x
cursor_position_y = cursor_position.y
self.wrapped_lines.append(cursor_position_y)
else:
cursor_position_x -= max(0, char_width)
# If Insert mode is set, new characters move old characters to
# the right, otherwise terminal is in Replace mode and new
# characters replace old characters at cursor position.
if in_irm:
self.insert_characters(max(0, char_width))
row = data_buffer[cursor_position_y]
if char_width == 1:
row[cursor_position_x] = pt_char
elif char_width > 1: # 2
# Double width character. Put an empty string in the second
# cell, because this is different from every character and
# causes the render engine to clear this character, when
# overwritten.
row[cursor_position_x] = pt_char
row[cursor_position_x + 1] = char_cache['', style]
elif char_width == 0:
# This is probably a part of a decomposed unicode character.
# Merge into the previous cell.
# See: https://en.wikipedia.org/wiki/Unicode_equivalence
prev_char = row[cursor_position_x - 1]
row[cursor_position_x - 1] = char_cache[
prev_char.char + pt_char.char, prev_char.style]
else: # char_width < 0
# (Should not happen.)
char_width = 0
# .. note:: We can't use :meth:`cursor_forward()`, because that
# way, we'll never know when to linefeed.
cursor_position_x += char_width
# Update max_y. (Don't use 'max()' for comparing only two values, that
# is less efficient.)
if cursor_position_y > self.max_y:
self.max_y = cursor_position_y
cursor_position.x = cursor_position_x
def carriage_return(self):
" Move the cursor to the beginning of the current line. "
self.pt_cursor_position.x = 0
def index(self):
"""Move the cursor down one line in the same column. If the
cursor is at the last line, create a new line at the bottom.
"""
margins = self.margins
# When scrolling over the full screen height -> keep history.
if margins is None:
# Simply move the cursor one position down.
cursor_position = self.pt_cursor_position
cursor_position.y += 1
self.max_y = max(self.max_y, cursor_position.y)
# Cleanup the history, but only every 100 calls.
self._history_cleanup_counter += 1
if self._history_cleanup_counter == 100:
self._remove_old_lines_from_history()
self._history_cleanup_counter = 0
else:
# Move cursor down, but scroll in the scrolling region.
top, bottom = self.margins
line_offset = self.line_offset
if self.pt_cursor_position.y - line_offset == bottom:
data_buffer = self.data_buffer
for line in range(top, bottom):
data_buffer[line + line_offset] = \
data_buffer[line + line_offset + 1]
data_buffer.pop(line + line_offset + 1, None)
else:
self.cursor_down()
def _remove_old_lines_from_history(self):
"""
Remove top from the scroll buffer. (Outside bounds of history limit.)
"""
remove_above = max(0, self.pt_cursor_position.y - self.get_history_limit())
data_buffer = self.pt_screen.data_buffer
for line in list(data_buffer):
if line < remove_above:
data_buffer.pop(line, None)
def clear_history(self):
"""
Delete all history from the scroll buffer.
"""
for line in list(self.data_buffer):
if line < self.line_offset:
self.data_buffer.pop(line, None)
def reverse_index(self):
margins = self.margins or Margins(0, self.lines - 1)
top, bottom = margins
line_offset = self.line_offset
# When scrolling over the full screen -> keep history.
if self.pt_cursor_position.y - line_offset == top:
for i in range(bottom - 1, top - 1, -1):
self.data_buffer[i + line_offset + 1] = self.data_buffer[i + line_offset]
self.data_buffer.pop(i + line_offset, None)
else:
self.cursor_up()
def linefeed(self):
"""Performs an index and, if :data:`~pyte.modes.LNM` is set, a
carriage return.
"""
self.index()
if mo.LNM in self.mode:
self.carriage_return()
def next_line(self):
""" When `EscE` has been received. Go to the next line, even when LNM has
not been set. """
self.index()
self.carriage_return()
self.ensure_bounds()
def tab(self):
"""Move to the next tab space, or the end of the screen if there
aren't anymore left.
"""
for stop in sorted(self.tabstops):
if self.pt_cursor_position.x < stop:
column = stop
break
else:
column = self.columns - 1
self.pt_cursor_position.x = column
def backspace(self):
"""Move cursor to the left one or keep it in it's position if
it's at the beginning of the line already.
"""
self.cursor_back()
def save_cursor(self):
"""Push the current cursor position onto the stack."""
self.savepoints.append(_Savepoint(
self.pt_cursor_position.x,
self.pt_cursor_position.y,
self.g0_charset,
self.g1_charset,
self.charset,
mo.DECOM in self.mode,
mo.DECAWM in self.mode,
self._attrs,
self._style_str))
def restore_cursor(self):
"""Set the current cursor position to whatever cursor is on top
of the stack.
"""
if self.savepoints:
savepoint = self.savepoints.pop()
self.g0_charset = savepoint.g0_charset
self.g1_charset = savepoint.g1_charset
self.charset = savepoint.charset
self._attrs = savepoint.attrs
self._style_str = savepoint.style_str
if savepoint.origin:
self.set_mode(mo.DECOM)
if savepoint.wrap:
self.set_mode(mo.DECAWM)
self.pt_cursor_position.x = savepoint.cursor_x
self.pt_cursor_position.y = savepoint.cursor_y
self.ensure_bounds(use_margins=True)
else:
# If nothing was saved, the cursor moves to home position;
# origin mode is reset. :todo: DECAWM?
self.reset_mode(mo.DECOM)
self.cursor_position()
def insert_lines(self, count=None):
"""Inserts the indicated # of lines at line with cursor. Lines
displayed **at** and below the cursor move down. Lines moved
past the bottom margin are lost.
:param count: number of lines to delete.
"""
count = count or 1
top, bottom = self.margins or Margins(0, self.lines - 1)
data_buffer = self.data_buffer
line_offset = self.line_offset
pt_cursor_position = self.pt_cursor_position
# If cursor is outside scrolling margins it -- do nothing.
if top <= pt_cursor_position.y - self.line_offset <= bottom:
for line in range(bottom, pt_cursor_position.y - line_offset, -1):
if line - count < top:
data_buffer.pop(line + line_offset, None)
else:
data_buffer[line + line_offset] = data_buffer[line + line_offset - count]
data_buffer.pop(line + line_offset - count, None)
self.carriage_return()
def delete_lines(self, count=None):
"""Deletes the indicated # of lines, starting at line with
cursor. As lines are deleted, lines displayed below cursor
move up. Lines added to bottom of screen have spaces with same
character attributes as last line moved up.
:param int count: number of lines to delete.
"""
count = count or 1
top, bottom = self.margins or Margins(0, self.lines - 1)
line_offset = self.line_offset
pt_cursor_position = self.pt_cursor_position
# If cursor is outside scrolling margins it -- do nothin'.
if top <= pt_cursor_position.y - line_offset <= bottom:
data_buffer = self.data_buffer
# Iterate from the cursor Y position until the end of the visible input.
for line in range(pt_cursor_position.y - line_offset, bottom + 1):
# When 'x' lines further are out of the margins, replace by an empty line,
# Otherwise copy the line from there.
if line + count > bottom:
data_buffer.pop(line + line_offset, None)
else:
data_buffer[line + line_offset] = self.data_buffer[line + count + line_offset]
def insert_characters(self, count=None):
"""Inserts the indicated # of blank characters at the cursor
position. The cursor does not move and remains at the beginning
of the inserted blank characters. Data on the line is shifted
forward.
:param int count: number of characters to insert.
"""
count = count or 1
line = self.data_buffer[self.pt_cursor_position.y]
if line:
max_columns = max(line.keys())
for i in range(max_columns, self.pt_cursor_position.x - 1, -1):
line[i + count] = line[i]
del line[i]
def delete_characters(self, count=None):
count = count or 1
line = self.data_buffer[self.pt_cursor_position.y]
if line:
max_columns = max(line.keys())
for i in range(self.pt_cursor_position.x, max_columns + 1):
line[i] = line[i + count]
del line[i + count]
def cursor_position(self, line=None, column=None):
"""Set the cursor to a specific `line` and `column`.
Cursor is allowed to move out of the scrolling region only when
:data:`~pyte.modes.DECOM` is reset, otherwise -- the position
doesn't change.
:param int line: line number to move the cursor to.
:param int column: column number to move the cursor to.
"""
column = (column or 1) - 1
line = (line or 1) - 1
# If origin mode (DECOM) is set, line number are relative to
# the top scrolling margin.
margins = self.margins
if margins is not None and mo.DECOM in self.mode:
line += margins.top
# Cursor is not allowed to move out of the scrolling region.
if not (margins.top <= line <= margins.bottom):
return
self.pt_cursor_position.x = column
self.pt_cursor_position.y = line + self.line_offset
self.ensure_bounds()
def cursor_to_column(self, column=None):
"""Moves cursor to a specific column in the current line.
:param int column: column number to move the cursor to.
"""
self.pt_cursor_position.x = (column or 1) - 1
self.ensure_bounds()
def cursor_to_line(self, line=None):
"""Moves cursor to a specific line in the current column.
:param int line: line number to move the cursor to.
"""
self.pt_cursor_position.y = (line or 1) - 1 + self.line_offset
# If origin mode (DECOM) is set, line number are relative to
# the top scrolling margin.
margins = self.margins
if mo.DECOM in self.mode and margins is not None:
self.pt_cursor_position.y += margins.top
# FIXME: should we also restrict the cursor to the scrolling
# region?
self.ensure_bounds()
def bell(self, *args):
" Bell "
self.bell_func()
def cursor_down(self, count=None):
"""Moves cursor down the indicated # of lines in same column.
Cursor stops at bottom margin.
:param int count: number of lines to skip.
"""
cursor_position = self.pt_cursor_position
margins = self.margins or Margins(0, self.lines - 1)
# Ensure bounds.
# (Following code is faster than calling `self.ensure_bounds`.)
_, bottom = margins
cursor_position.y = min(cursor_position.y + (count or 1),
bottom + self.line_offset + 1)
self.max_y = max(self.max_y, cursor_position.y)
def cursor_down1(self, count=None):
"""Moves cursor down the indicated # of lines to column 1.
Cursor stops at bottom margin.
:param int count: number of lines to skip.
"""
self.cursor_down(count)
self.carriage_return()
def cursor_up(self, count=None):
"""Moves cursor up the indicated # of lines in same column.
Cursor stops at top margin.
:param int count: number of lines to skip.
"""
self.pt_cursor_position.y -= count or 1
self.ensure_bounds(use_margins=True)
def cursor_up1(self, count=None):
"""Moves cursor up the indicated # of lines to column 1. Cursor
stops at bottom margin.
:param int count: number of lines to skip.
"""
self.cursor_up(count)
self.carriage_return()
def cursor_back(self, count=None):
"""Moves cursor left the indicated # of columns. Cursor stops
at left margin.
:param int count: number of columns to skip.
"""
self.pt_cursor_position.x = max(
0, self.pt_cursor_position.x - (count or 1))
self.ensure_bounds()
def cursor_forward(self, count=None):
"""Moves cursor right the indicated # of columns. Cursor stops
at right margin.
:param int count: number of columns to skip.
"""
self.pt_cursor_position.x += count or 1
self.ensure_bounds()
def erase_characters(self, count=None):
"""Erases the indicated # of characters, starting with the
character at cursor position. Character attributes are set
cursor attributes. The cursor remains in the same position.
:param int count: number of characters to erase.
.. warning::
Even though *ALL* of the VTXXX manuals state that character
attributes **should be reset to defaults**, ``libvte``,
``xterm`` and ``ROTE`` completely ignore this. Same applies
too all ``erase_*()`` and ``delete_*()`` methods.
"""
count = count or 1
cursor_position = self.pt_cursor_position
row = self.data_buffer[cursor_position.y]
for column in range(cursor_position.x,
min(cursor_position.x + count, self.columns)):
row[column] = Char(style=row[column].style)
def erase_in_line(self, type_of=0, private=False):
"""Erases a line in a specific way.
:param int type_of: defines the way the line should be erased in:
* ``0`` -- Erases from cursor to end of line, including cursor
position.
* ``1`` -- Erases from beginning of line to cursor,
including cursor position.
* ``2`` -- Erases complete line.
:param bool private: when ``True`` character attributes aren left
unchanged **not implemented**.
"""
data_buffer = self.data_buffer
pt_cursor_position = self.pt_cursor_position
if type_of == 2:
# Delete line completely.
data_buffer.pop(pt_cursor_position.y, None)
else:
line = data_buffer[pt_cursor_position.y]
def should_we_delete(column): # TODO: check for off-by-one errors!
if type_of == 0:
return column >= pt_cursor_position.x
if type_of == 1:
return column <= pt_cursor_position.x
for column in list(line.keys()):
if should_we_delete(column):
line.pop(column, None)
def erase_in_display(self, type_of=0, private=False):
"""Erases display in a specific way.
:param int type_of: defines the way the line should be erased in:
* ``0`` -- Erases from cursor to end of screen, including
cursor position.
* ``1`` -- Erases from beginning of screen to cursor,
including cursor position.
* ``2`` -- Erases complete display. All lines are erased
and changed to single-width. Cursor does not move.
* ``3`` -- Erase saved lines. (Xterm) Clears the history.
:param bool private: when ``True`` character attributes aren left
unchanged **not implemented**.
"""
line_offset = self.line_offset
pt_cursor_position = self.pt_cursor_position
try:
max_line = max(self.pt_screen.data_buffer)
except ValueError:
# max() called on empty sequence. Screen is empty. Nothing to erase.
return
if type_of == 3:
# Clear data buffer.
for y in list(self.data_buffer):
self.data_buffer.pop(y, None)
# Reset line_offset.
pt_cursor_position.y = 0
self.max_y = 0
else:
try:
interval = (
# a) erase from cursor to the end of the display, including
# the cursor,
range(pt_cursor_position.y + 1, max_line + 1),
# b) erase from the beginning of the display to the cursor,
# including it,
range(line_offset, pt_cursor_position.y),
# c) erase the whole display.
range(line_offset, max_line + 1)
)[type_of]
except IndexError:
return
data_buffer = self.data_buffer
for line in interval:
data_buffer[line] = defaultdict(lambda: Char(' '))
# In case of 0 or 1 we have to erase the line with the cursor.
if type_of in [0, 1]:
self.erase_in_line(type_of)
def set_tab_stop(self):
" Set a horizontal tab stop at cursor position. "
self.tabstops.add(self.pt_cursor_position.x)
def clear_tab_stop(self, type_of=None):
"""Clears a horizontal tab stop in a specific way, depending
on the ``type_of`` value:
* ``0`` or nothing -- Clears a horizontal tab stop at cursor
position.
* ``3`` -- Clears all horizontal tab stops.
"""
if not type_of:
# Clears a horizontal tab stop at cursor position, if it's
# present, or silently fails if otherwise.
self.tabstops.discard(self.pt_cursor_position.x)
elif type_of == 3:
self.tabstops = set() # Clears all horizontal tab stops.
def ensure_bounds(self, use_margins=None):
"""Ensure that current cursor position is within screen bounds.
:param bool use_margins: when ``True`` or when
:data:`~pyte.modes.DECOM` is set,
cursor is bounded by top and and bottom
margins, instead of ``[0; lines - 1]``.
"""
margins = self.margins
if margins and (use_margins or mo.DECOM in self.mode):
top, bottom = margins
else:
top, bottom = 0, self.lines - 1
cursor_position = self.pt_cursor_position
line_offset = self.line_offset
cursor_position.x = min(max(0, cursor_position.x), self.columns - 1)
cursor_position.y = min(max(top + line_offset, cursor_position.y),
bottom + line_offset + 1)
def alignment_display(self):
for y in range(0, self.lines):
line = self.data_buffer[y + self.line_offset]
for x in range(0, self.columns):
line[x] = Char('E')
# Mapping of the ANSI color codes to their names.
_fg_colors = dict((v, '#' + k) for k, v in FG_ANSI_COLORS.items())
_bg_colors = dict((v, '#' + k) for k, v in BG_ANSI_COLORS.items())
# Mapping of the escape codes for 256colors to their '#ffffff' value.
_256_colors = {}
for i, (r, g, b) in enumerate(_256_colors_table.colors):
_256_colors[1024 + i] = '#%02x%02x%02x' % (r, g, b)
def select_graphic_rendition(self, *attrs):
""" Support 256 colours """
replace = {}
if not attrs:
attrs = [0]
else:
attrs = list(attrs[::-1])
while attrs:
attr = attrs.pop()
if attr in self._fg_colors:
replace["color"] = self._fg_colors[attr]
elif attr in self._bg_colors:
replace["bgcolor"] = self._bg_colors[attr]
elif attr == 1:
replace["bold"] = True
elif attr == 3:
replace["italic"] = True
elif attr == 4:
replace["underline"] = True
elif attr == 5:
replace["blink"] = True
elif attr == 6:
replace["blink"] = True # Fast blink.
elif attr == 7:
replace["reverse"] = True
elif attr == 8:
replace["hidden"] = True
elif attr == 22:
replace["bold"] = False
elif attr == 23:
replace["italic"] = False
elif attr == 24:
replace["underline"] = False
elif attr == 25:
replace["blink"] = False
elif attr == 27:
replace["reverse"] = False
elif not attr:
replace = {}
self._attrs = Attrs(
color=None, bgcolor=None, bold=False, underline=False,
italic=False, blink=False, reverse=False, hidden=False)
elif attr in (38, 48):
n = attrs.pop()
# 256 colors.
if n == 5:
if attr == 38:
m = attrs.pop()
replace["color"] = self._256_colors.get(1024 + m)
elif attr == 48:
m = attrs.pop()
replace["bgcolor"] = self._256_colors.get(1024 + m)
# True colors.
if n == 2:
try:
color_str = '#%02x%02x%02x' % (attrs.pop(), attrs.pop(), attrs.pop())
except IndexError:
pass
else:
if attr == 38:
replace["color"] = color_str
elif attr == 48:
replace["bgcolor"] = color_str
attrs = self._attrs._replace(**replace)
# Build style string.
style_str = ''
if attrs.color:
style_str += '%s ' % attrs.color
if attrs.bgcolor:
style_str += 'bg:%s ' % attrs.bgcolor
if attrs.bold:
style_str += 'bold '
if attrs.italic:
style_str += 'italic '
if attrs.underline:
style_str += 'underline '
if attrs.blink:
style_str += 'blink '
if attrs.reverse:
style_str += 'reverse '
if attrs.hidden:
style_str += 'hidden '
self._style_str = unicode_intern(style_str)
self._attrs = attrs
def report_device_status(self, data):
"""
Report cursor position.
"""
if data == 6:
y = self.pt_cursor_position.y - self.line_offset + 1
x = self.pt_cursor_position.x + 1
response = '\x1b[%i;%iR' % (y, x)
self.write_process_input(response)
def report_device_attributes(self, *args, **kwargs):
response = '\x1b[>84;0;0c'
self.write_process_input(response)
def set_icon_name(self, param):
self.icon_name = param
def set_title(self, param):
self.title = param
def define_charset(self, *a, **kw):
pass
def charset_default(self, *a, **kw):
" Not implemented. "
def charset_utf8(self, *a, **kw):
" Not implemented. "
def debug(self, *args, **kwargs):
pass
def _reflow(self):
"""
Reflow the screen using the given width.
"""
width = self.columns
data_buffer = self.pt_screen.data_buffer
new_data_buffer = Screen(default_char=Char(' ', '')).data_buffer
cursor_position = self.pt_screen.cursor_position
cy, cx = (cursor_position.y, cursor_position.x)
cursor_character = data_buffer[cursor_position.y][cursor_position.x].char
# Ensure that the cursor position is present.
# (and avoid calling min() on empty collection.)
data_buffer[cursor_position.y][cursor_position.y]
# Unwrap all the lines.
offset = min(data_buffer)
line = []
all_lines = [line]
for row_index in range(min(data_buffer), max(data_buffer) + 1):
row = data_buffer[row_index]
row[0] # Avoid calling max() on empty collection.
for column_index in range(0, max(row) + 1):
if cy == row_index and cx == column_index:
cy = len(all_lines) - 1
cx = len(line)
line.append(row[column_index])
# Create new line if the next line was not a wrapped line.
if row_index + 1 not in self.wrapped_lines:
line = []
all_lines.append(line)
# Remove trailing whitespace (unless it contains the cursor).
# Also make sure that lines consist of at lesat one character,
# otherwise we can't calculate `max_y` correctly. (This is important
# for the `clear` command.)
for row_index, line in enumerate(all_lines):
# We do this only if no special styling given.
while len(line) > 1 and line[-1].char.isspace() and not line[-1].style:
if row_index == cy and len(line) - 1 == cx:
break
line.pop()
# Wrap lines again according to the screen width.
new_row_index = offset
new_column_index = 0
new_wrapped_lines = []
for row_index, line in enumerate(all_lines):
for column_index, char in enumerate(line):
# Check for space on the current line.
if new_column_index + char.width > width:
new_row_index += 1
new_column_index = 0
new_wrapped_lines.append(new_row_index)
if cy == row_index and cx == column_index:
cy = new_row_index
cx = new_column_index
# Add character to new buffer.
new_data_buffer[new_row_index][new_column_index] = char
new_column_index += char.width
new_row_index += 1
new_column_index = 0
# TODO: when the window gets smaller, and the cursor is at the top of the screen,
# remove lines at the bottom.
for row_index in range(min(data_buffer), max(data_buffer) + 1):
if row_index > cy + self.lines:
del data_buffer[row_index]
self.pt_screen.data_buffer = new_data_buffer
self.data_buffer = new_data_buffer
self.wrapped_lines = new_wrapped_lines
cursor_position.y, cursor_position.x = cy, cx
self.pt_screen.cursor_position = cursor_position # XXX: not needed.
self.pt_cursor_position = self.pt_screen.cursor_position
# If everything goes well, the cursor should still be on the same character.
if cursor_character != new_data_buffer[cursor_position.y][cursor_position.x].char:
# FIXME:
raise Exception('Reflow failed: %r %r' % (cursor_character, new_data_buffer[cursor_position.y][cursor_position.x].char))
self.max_y = max(self.data_buffer)
self.max_y = min(
self.max_y,
cursor_position.y + self.lines - 1)
|
|
"""SCons.Tool.FortranCommon
Stuff for processing Fortran, common to all fortran dialects.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import re
import os.path
import SCons.Action
import SCons.Defaults
import SCons.Scanner.Fortran
import SCons.Tool
import SCons.Util
def isfortran(env, source):
"""Return 1 if any of code in source has fortran files in it, 0
otherwise."""
try:
fsuffixes = env['FORTRANSUFFIXES']
except KeyError:
# If no FORTRANSUFFIXES, no fortran tool, so there is no need to look
# for fortran sources.
return 0
if not source:
# Source might be None for unusual cases like SConf.
return 0
for s in source:
if s.sources:
ext = os.path.splitext(str(s.sources[0]))[1]
if ext in fsuffixes:
return 1
return 0
def _fortranEmitter(target, source, env):
node = source[0].rfile()
if not node.exists() and not node.is_derived():
print "Could not locate " + str(node.name)
return ([], [])
mod_regex = """(?i)^\s*MODULE\s+(?!PROCEDURE)(\w+)"""
cre = re.compile(mod_regex,re.M)
# Retrieve all USE'd module names
modules = cre.findall(node.get_text_contents())
# Remove unique items from the list
modules = SCons.Util.unique(modules)
# Convert module name to a .mod filename
suffix = env.subst('$FORTRANMODSUFFIX', target=target, source=source)
moddir = env.subst('$FORTRANMODDIR', target=target, source=source)
modules = [x.lower() + suffix for x in modules]
for m in modules:
target.append(env.fs.File(m, moddir))
return (target, source)
def FortranEmitter(target, source, env):
target, source = _fortranEmitter(target, source, env)
return SCons.Defaults.StaticObjectEmitter(target, source, env)
def ShFortranEmitter(target, source, env):
target, source = _fortranEmitter(target, source, env)
return SCons.Defaults.SharedObjectEmitter(target, source, env)
def ComputeFortranSuffixes(suffixes, ppsuffixes):
"""suffixes are fortran source files, and ppsuffixes the ones to be
pre-processed. Both should be sequences, not strings."""
assert len(suffixes) > 0
s = suffixes[0]
sup = s.upper()
upper_suffixes = [_.upper() for _ in suffixes]
if SCons.Util.case_sensitive_suffixes(s, sup):
ppsuffixes.extend(upper_suffixes)
else:
suffixes.extend(upper_suffixes)
def CreateDialectActions(dialect):
"""Create dialect specific actions."""
CompAction = SCons.Action.Action('$%sCOM ' % dialect, '$%sCOMSTR' % dialect)
CompPPAction = SCons.Action.Action('$%sPPCOM ' % dialect, '$%sPPCOMSTR' % dialect)
ShCompAction = SCons.Action.Action('$SH%sCOM ' % dialect, '$SH%sCOMSTR' % dialect)
ShCompPPAction = SCons.Action.Action('$SH%sPPCOM ' % dialect, '$SH%sPPCOMSTR' % dialect)
return CompAction, CompPPAction, ShCompAction, ShCompPPAction
def DialectAddToEnv(env, dialect, suffixes, ppsuffixes, support_module = 0):
"""Add dialect specific construction variables."""
ComputeFortranSuffixes(suffixes, ppsuffixes)
fscan = SCons.Scanner.Fortran.FortranScan("%sPATH" % dialect)
for suffix in suffixes + ppsuffixes:
SCons.Tool.SourceFileScanner.add_scanner(suffix, fscan)
env.AppendUnique(FORTRANSUFFIXES = suffixes + ppsuffixes)
compaction, compppaction, shcompaction, shcompppaction = \
CreateDialectActions(dialect)
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in suffixes:
static_obj.add_action(suffix, compaction)
shared_obj.add_action(suffix, shcompaction)
static_obj.add_emitter(suffix, FortranEmitter)
shared_obj.add_emitter(suffix, ShFortranEmitter)
for suffix in ppsuffixes:
static_obj.add_action(suffix, compppaction)
shared_obj.add_action(suffix, shcompppaction)
static_obj.add_emitter(suffix, FortranEmitter)
shared_obj.add_emitter(suffix, ShFortranEmitter)
if '%sFLAGS' % dialect not in env:
env['%sFLAGS' % dialect] = SCons.Util.CLVar('')
if 'SH%sFLAGS' % dialect not in env:
env['SH%sFLAGS' % dialect] = SCons.Util.CLVar('$%sFLAGS' % dialect)
# If a tool does not define fortran prefix/suffix for include path, use C ones
if 'INC%sPREFIX' % dialect not in env:
env['INC%sPREFIX' % dialect] = '$INCPREFIX'
if 'INC%sSUFFIX' % dialect not in env:
env['INC%sSUFFIX' % dialect] = '$INCSUFFIX'
env['_%sINCFLAGS' % dialect] = '$( ${_concat(INC%sPREFIX, %sPATH, INC%sSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)' % (dialect, dialect, dialect)
if support_module == 1:
env['%sCOM' % dialect] = '$%s -o $TARGET -c $%sFLAGS $_%sINCFLAGS $_FORTRANMODFLAG $SOURCES' % (dialect, dialect, dialect)
env['%sPPCOM' % dialect] = '$%s -o $TARGET -c $%sFLAGS $CPPFLAGS $_CPPDEFFLAGS $_%sINCFLAGS $_FORTRANMODFLAG $SOURCES' % (dialect, dialect, dialect)
env['SH%sCOM' % dialect] = '$SH%s -o $TARGET -c $SH%sFLAGS $_%sINCFLAGS $_FORTRANMODFLAG $SOURCES' % (dialect, dialect, dialect)
env['SH%sPPCOM' % dialect] = '$SH%s -o $TARGET -c $SH%sFLAGS $CPPFLAGS $_CPPDEFFLAGS $_%sINCFLAGS $_FORTRANMODFLAG $SOURCES' % (dialect, dialect, dialect)
else:
env['%sCOM' % dialect] = '$%s -o $TARGET -c $%sFLAGS $_%sINCFLAGS $SOURCES' % (dialect, dialect, dialect)
env['%sPPCOM' % dialect] = '$%s -o $TARGET -c $%sFLAGS $CPPFLAGS $_CPPDEFFLAGS $_%sINCFLAGS $SOURCES' % (dialect, dialect, dialect)
env['SH%sCOM' % dialect] = '$SH%s -o $TARGET -c $SH%sFLAGS $_%sINCFLAGS $SOURCES' % (dialect, dialect, dialect)
env['SH%sPPCOM' % dialect] = '$SH%s -o $TARGET -c $SH%sFLAGS $CPPFLAGS $_CPPDEFFLAGS $_%sINCFLAGS $SOURCES' % (dialect, dialect, dialect)
def add_fortran_to_env(env):
"""Add Builders and construction variables for Fortran to an Environment."""
try:
FortranSuffixes = env['FORTRANFILESUFFIXES']
except KeyError:
FortranSuffixes = ['.f', '.for', '.ftn']
#print "Adding %s to fortran suffixes" % FortranSuffixes
try:
FortranPPSuffixes = env['FORTRANPPFILESUFFIXES']
except KeyError:
FortranPPSuffixes = ['.fpp', '.FPP']
DialectAddToEnv(env, "FORTRAN", FortranSuffixes,
FortranPPSuffixes, support_module = 1)
env['FORTRANMODPREFIX'] = '' # like $LIBPREFIX
env['FORTRANMODSUFFIX'] = '.mod' # like $LIBSUFFIX
env['FORTRANMODDIR'] = '' # where the compiler should place .mod files
env['FORTRANMODDIRPREFIX'] = '' # some prefix to $FORTRANMODDIR - similar to $INCPREFIX
env['FORTRANMODDIRSUFFIX'] = '' # some suffix to $FORTRANMODDIR - similar to $INCSUFFIX
env['_FORTRANMODFLAG'] = '$( ${_concat(FORTRANMODDIRPREFIX, FORTRANMODDIR, FORTRANMODDIRSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)'
def add_f77_to_env(env):
"""Add Builders and construction variables for f77 to an Environment."""
try:
F77Suffixes = env['F77FILESUFFIXES']
except KeyError:
F77Suffixes = ['.f77']
#print "Adding %s to f77 suffixes" % F77Suffixes
try:
F77PPSuffixes = env['F77PPFILESUFFIXES']
except KeyError:
F77PPSuffixes = []
DialectAddToEnv(env, "F77", F77Suffixes, F77PPSuffixes)
def add_f90_to_env(env):
"""Add Builders and construction variables for f90 to an Environment."""
try:
F90Suffixes = env['F90FILESUFFIXES']
except KeyError:
F90Suffixes = ['.f90']
#print "Adding %s to f90 suffixes" % F90Suffixes
try:
F90PPSuffixes = env['F90PPFILESUFFIXES']
except KeyError:
F90PPSuffixes = []
DialectAddToEnv(env, "F90", F90Suffixes, F90PPSuffixes,
support_module = 1)
def add_f95_to_env(env):
"""Add Builders and construction variables for f95 to an Environment."""
try:
F95Suffixes = env['F95FILESUFFIXES']
except KeyError:
F95Suffixes = ['.f95']
#print "Adding %s to f95 suffixes" % F95Suffixes
try:
F95PPSuffixes = env['F95PPFILESUFFIXES']
except KeyError:
F95PPSuffixes = []
DialectAddToEnv(env, "F95", F95Suffixes, F95PPSuffixes,
support_module = 1)
def add_f03_to_env(env):
"""Add Builders and construction variables for f03 to an Environment."""
try:
F03Suffixes = env['F03FILESUFFIXES']
except KeyError:
F03Suffixes = ['.f03']
#print "Adding %s to f95 suffixes" % F95Suffixes
try:
F03PPSuffixes = env['F03PPFILESUFFIXES']
except KeyError:
F03PPSuffixes = []
DialectAddToEnv(env, "F03", F03Suffixes, F03PPSuffixes,
support_module = 1)
def add_all_to_env(env):
"""Add builders and construction variables for all supported fortran
dialects."""
add_fortran_to_env(env)
add_f77_to_env(env)
add_f90_to_env(env)
add_f95_to_env(env)
add_f03_to_env(env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
# coding: utf-8
# In[1]:
# The PCA approach seems to fail in bright regions. As you expect...
# In[2]:
# imports
import os
from shfi_otf_pipeline import make_apex_cubes
import time # for performance analysis
from astropy import units as u
import FITS_tools
import image_tools
import numpy as np
# for bigger figures
import pylab as pl
pl.rcParams['figure.figsize'] = (12.0, 10.0)
remake_cubes = False
if remake_cubes:
make_apex_cubes.build_cube_2013(datasets=['M-091.F-0019-2013-2013-06-08',],
lowhigh='high',
pca_clean=True,
pcakwargs={'smoothing_scale':25.,},
extra_suffix='_3PCA')
# In[4]:
make_apex_cubes.build_cube_2013(datasets=['M-091.F-0019-2013-2013-06-08',],
lowhigh='high',
pca_clean=False,
extra_suffix='_noPCA')
# In[5]:
make_apex_cubes.build_cube_2013(datasets=['M-091.F-0019-2013-2013-06-08',],
lowhigh='high',
pca_clean=True,
pcakwargs={'smoothing_scale':25.,
'ncomponents':1},
extra_suffix='_1PCA')
# In[6]:
import spectral_cube
from astropy.io import fits
import aplpy
# In[7]:
dpath = make_apex_cubes.june2013path
cube = spectral_cube.SpectralCube.read(os.path.join(dpath,'APEX_H2CO_2013_high_noPCA_sub.fits'))
h2cocube = cube.with_spectral_unit(u.km/u.s, rest_value=218.22219*u.GHz, velocity_convention='radio')
h2cocubecut = h2cocube.spectral_slab(15*u.km/u.s, 65*u.km/u.s)
inth2co = h2cocubecut.moment0()
peakh2co = fits.PrimaryHDU(h2cocubecut.max(axis=0).value, header=inth2co.hdu.header)
# In[8]:
F = aplpy.FITSFigure(inth2co.hdu)
F._ax1.set_title("Integrated h2co")
F.show_grayscale()
F.add_colorbar()
F = aplpy.FITSFigure(peakh2co)
F._ax1.set_title("Peak h2co")
F.show_grayscale()
F.add_colorbar()
# In[9]:
pcacube = spectral_cube.SpectralCube.read(os.path.join(dpath,'APEX_H2CO_2013_high_3PCA_sub.fits'))
h2copcacube = pcacube.with_spectral_unit(u.km/u.s, rest_value=218.22219*u.GHz, velocity_convention='radio')
h2copcacubecut = h2copcacube.spectral_slab(15*u.km/u.s, 65*u.km/u.s)
pcainth2co = h2copcacubecut.moment0()
pcapeakh2co = fits.PrimaryHDU(h2copcacubecut.max(axis=0).value, header=pcainth2co.hdu.header)
# In[10]:
F = aplpy.FITSFigure(pcainth2co.hdu)
F._ax1.set_title("Integrated h2co")
F.show_grayscale()
F.add_colorbar()
F = aplpy.FITSFigure(pcapeakh2co)
F._ax1.set_title("Peak h2co")
F.show_grayscale()
F.add_colorbar()
# In[11]:
pca1cube = spectral_cube.SpectralCube.read(os.path.join(dpath,'APEX_H2CO_2013_high_1PCA_sub.fits'))
h2copca1cube = pca1cube.with_spectral_unit(u.km/u.s, rest_value=218.22219*u.GHz, velocity_convention='radio')
h2copca1cubecut = h2copca1cube.spectral_slab(15*u.km/u.s, 65*u.km/u.s)
pca1inth2co = h2copca1cubecut.moment0()
pca1peakh2co = fits.PrimaryHDU(h2copca1cubecut.max(axis=0).value, header=pca1inth2co.hdu.header)
# In[12]:
F = aplpy.FITSFigure(pca1inth2co.hdu)
F._ax1.set_title("Integrated h2co")
F.show_grayscale()
F.add_colorbar()
F = aplpy.FITSFigure(pca1peakh2co)
F._ax1.set_title("Peak h2co")
F.show_grayscale()
F.add_colorbar()
# In[13]:
diffh2co = fits.PrimaryHDU(inth2co.hdu.data - pcainth2co.hdu.data, header=inth2co.hdu.header)
diffh2co2 = fits.PrimaryHDU(inth2co.hdu.data - pca1inth2co.hdu.data, header=inth2co.hdu.header)
# In[14]:
fig = pl.figure(figsize=(16,8))
subplot_diff2 = 0.4
subplot_bounds2 = [0.05,0.5]
subplot_diff3 = 0.25
subplot_bounds3 = [0.05,0.35,0.65]
subplot_diff4 = 0.22
subplot_bounds4 = [0.05,0.275,0.5,0.725]
F1 = aplpy.FITSFigure(inth2co.hdu, subplot=[0.05,0.1,0.25,0.8], figure=fig)
F2 = aplpy.FITSFigure(pcainth2co.hdu, subplot=[0.35,0.1,0.25,0.8], figure=fig)
F3 = aplpy.FITSFigure(pca1inth2co.hdu, subplot=[0.65,0.1,0.25,0.8], figure=fig)
for F in (F1,F2,F3):
F.show_grayscale()
F.add_colorbar()
F.set_tick_labels_format('d.dd','d.dd')
F.set_tick_xspacing(0.1)
for F in (F2,F3,):
F.tick_labels.hide_y()
F.axis_labels.hide_y()
F1._ax1.set_title("Before PCA")
F2._ax1.set_title("After PCA-3")
F3._ax1.set_title("After PCA-1")
fig.savefig("Before_vs_After_PCA_clean_3panel.pdf", bbox_inches='tight', dpi=144)
# In[15]:
fig = pl.figure(figsize=(16,8))
F1 = aplpy.FITSFigure(diffh2co, subplot=[0.05,0.1,0.4,0.8], figure=fig)
F2 = aplpy.FITSFigure(diffh2co2, subplot=[0.5,0.1,0.4,0.8], figure=fig)
for F in (F1,F2,):
F.show_grayscale()
F.add_colorbar()
F.set_tick_labels_format('d.dd','d.dd')
F.set_tick_xspacing(0.1)
for F in (F2,):
F.tick_labels.hide_y()
F.axis_labels.hide_y()
F1._ax1.set_title("raw-PCA3")
F2._ax1.set_title("raw-PCA1")
fig.savefig("PCA_Diffs_3pca_1pca_2panel.pdf", bbox_inches='tight', dpi=144)
# In[16]:
spec = h2cocube[:,40:60,100:120].moment0(axis=2).mean(axis=1)
pcaspec = pcacube[:,40:60,100:120].moment0(axis=2).mean(axis=1)
pca1spec = pca1cube[:,40:60,100:120].moment0(axis=2).mean(axis=1)
pl.figure()
pl.subplot(2,2,1)
pl.plot(spec.value, linewidth=0.5)
pl.plot((spec-pcaspec).value)
pl.ylim(-0.005,0.007)
pl.xlim(1500,2500)
pl.subplot(2,2,3)
pl.plot(spec.value, linewidth=0.5)
pl.plot((spec-pca1spec).value)
pl.ylim(-0.005,0.007)
pl.xlim(1500,2500)
#pl.subplot(3,2,5)
#pl.plot(spec.value, linewidth=0.5)
#pl.plot((spec-timepcaspec).value)
#pl.ylim(-0.005,0.007)
#pl.xlim(1500,2500)
pl.subplot(2,2,2)
pl.plot(spec.value, linewidth=0.5)
pl.plot((pcaspec).value, alpha=0.5)
pl.ylim(-0.005,0.007)
pl.xlim(1500,2500)
pl.subplot(2,2,4)
pl.plot(spec.value, linewidth=0.5)
pl.plot((pca1spec).value, alpha=0.5)
pl.ylim(-0.005,0.007)
pl.xlim(1500,2500)
#pl.subplot(3,2,6)
#pl.plot(spec.value, linewidth=0.5)
#pl.plot((timepcaspec).value, alpha=0.5)
#pl.ylim(-0.005,0.007)
#pl.xlim(1500,2500)
|
|
"""
This module provies `sunpy.timeseries.GenericTimeSeries` which all other
`sunpy.timeseries.TimeSeries` classes inherit from.
"""
import copy
import warnings
from collections import OrderedDict
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import astropy
import astropy.units as u
from astropy.table import Column, Table
from sunpy import config
from sunpy.time import TimeRange
from sunpy.timeseries import TimeSeriesMetaData
from sunpy.util.exceptions import SunpyUserWarning
from sunpy.util.metadata import MetaDict
from sunpy.visualization import peek_show
# define and register a new unit, needed for RHESSI
det = u.def_unit('detector')
u.add_enabled_units([det])
TIME_FORMAT = config.get("general", "time_format")
__all__ = ["GenericTimeSeries"]
class GenericTimeSeries:
"""
A generic time series object.
Parameters
----------
data : `~pandas.DataFrame`
A `pandas.DataFrame` representing one or more fields as a function of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`, optional
The metadata giving details about the time series data/instrument.
Defaults to `None`.
units : `dict`, optional
A mapping from column names in ``data`` to the physical units of that column.
Defaults to `None`.
Attributes
----------
data : `~pandas.DataFrame`
A `pandas.DataFrame` representing one or more fields as a function of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`
The metadata giving details about the time series data/instrument.
units : `dict`
A mapping from column names in ``data`` to the physical units ofthat column.
Examples
--------
>>> from sunpy.timeseries import TimeSeries
>>> from sunpy.time import parse_time
>>> from astropy.time import TimeDelta
>>> import numpy as np
>>> import pandas as pd
>>> times = parse_time("now") - TimeDelta(np.arange(24 * 60)*u.minute)
>>> intensity = np.sin(np.arange(0, 12 * np.pi, step=(12 * np.pi) / (24 * 60)))
>>> df = pd.DataFrame(intensity, index=times, columns=['intensity'])
>>> ts = TimeSeries(df)
>>> ts.peek() # doctest: +SKIP
References
----------
* `Pandas Documentation <https://pandas.pydata.org/pandas-docs/stable/>`_
"""
# Class attribute used to specify the source class of the TimeSeries.
_source = None
_registry = dict()
def __init_subclass__(cls, **kwargs):
"""
An __init_subclass__ hook initializes all of the subclasses of a given
class.
So for each subclass, it will call this block of code on import.
This replicates some metaclass magic without the need to be
aware of metaclasses. Here we use this to register each subclass
in a dict that has the `is_datasource_for` attribute. This is
then passed into the TimeSeries Factory so we can register them.
"""
super().__init_subclass__(**kwargs)
if hasattr(cls, 'is_datasource_for'):
cls._registry[cls] = cls.is_datasource_for
# kwargs are not used here but are passed in for sources.
def __init__(self, data, meta=None, units=None, **kwargs):
self.data = data
tr = self.time_range
# Check metadata input
if meta is None:
# No meta given, so default
self.meta = TimeSeriesMetaData(MetaDict(), tr, list(self.data.columns.values))
elif isinstance(meta, (dict, OrderedDict, MetaDict)):
# Given the values for metadata (dict) and infer timerange and colnames from the data
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
elif isinstance(meta, tuple):
# Given the values all in a tuple
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
else:
# Should have a list of 3-tuples giving a complex metadata list.
self.meta = meta
if units is None:
self.units = {}
else:
self.units = units
# TODO: Fix this?
# Validate input data
# self._validate_meta()
# self._validate_units()
# #### Attribute definitions #### #
@property
def source(self):
"""
A string/object used to specify the source class of the TimeSeries.
"""
return self._source
@property
def columns(self):
"""
A list of all the names of the columns in the data.
"""
return list(self.data.columns.values)
@property
def index(self):
"""
The time index of the data.
"""
return self.data.index
@property
def time_range(self):
"""
The start and end times of the TimeSeries as a `~sunpy.time.TimeRange`.
"""
if len(self.data) > 0:
return TimeRange(self.data.index.min(), self.data.index.max())
else:
return None
# #### Data Access, Selection and Organisation Methods #### #
def quantity(self, colname, **kwargs):
"""
Return a `~astropy.units.quantity.Quantity` for the given column.
Parameters
----------
colname : `str`
The heading of the column you want to output.
Returns
-------
`~astropy.units.quantity.Quantity`
"""
values = self.data[colname].values
unit = self.units[colname]
return u.Quantity(values, unit)
def add_column(self, colname, quantity, unit=False, overwrite=True, **kwargs):
"""
Return a new `~sunpy.timeseries.TimeSeries` with the given column added
or updated.
Parameters
----------
colname : `str`
The heading of the column you want output.
quantity : `~astropy.units.quantity.Quantity` or `~numpy.ndarray`
The values to be placed within the column.
If updating values only then a numpy array is permitted.
overwrite : `bool`, optional
Defaults to `True`, allowing the method to overwrite a column already present in the `~sunpy.timeseries.TimeSeries`.
Returns
-------
`sunpy.timeseries.TimeSeries`
A new `~sunpy.timeseries.TimeSeries`.
"""
# Get the expected units from the quantity if required
if not unit and isinstance(quantity, astropy.units.quantity.Quantity):
unit = quantity.unit
elif not unit:
unit = u.dimensionless_unscaled
# Make a copy of all the TimeSeries components.
data = copy.copy(self.data)
meta = TimeSeriesMetaData(copy.copy(self.meta.metadata))
units = copy.copy(self.units)
# Add the unit to the units dictionary if already there.
if not (colname in self.data.columns):
units[colname] = unit
# Convert the given quantity into values for given units if necessary.
values = quantity
if isinstance(values, astropy.units.quantity.Quantity) and overwrite:
values = values.to(units[colname]).value
# Update or add the data.
if not (colname in self.data.columns) or overwrite:
data[colname] = values
# Return a new TimeSeries with the given updated/added column.
return self.__class__(data, meta, units)
def sort_index(self, **kwargs):
"""
Returns a sorted version of a `~sunpy.timeseries.TimeSeries`. Generally
this shouldn't be necessary as most `~sunpy.timeseries.TimeSeries`
operations sort the data anyway to ensure consistent behavior when
truncating.
Returns
-------
`~sunpy.timeseries.TimeSeries`
A new `~sunpy.timeseries.TimeSeries` in ascending chronological order.
"""
return GenericTimeSeries(self.data.sort_index(**kwargs),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
def truncate(self, a, b=None, int=None):
"""
Returns a truncated version of the TimeSeries object.
Parameters
----------
a : `sunpy.time.TimeRange`, `str`, `int`
Either a time range to truncate to, or a start time in some format recognized by pandas, or a index integer.
b : `str` or `int`, optional
If specified, the end time of the time range in some format recognized by pandas, or a index integer.
Defaults to `None`.
int : `int`, optional
If specified, the integer indicating the slicing intervals.
Defaults to `None`.
Returns
-------
`~sunpy.timeseries.TimeSeries`
A new `~sunpy.timeseries.TimeSeries` with only the selected times.
"""
# Evaluate inputs
# If given strings, then use to create a sunpy.time.timerange.TimeRange
# for the SunPy text date parser.
if isinstance(a, str) and isinstance(b, str):
a = TimeRange(a, b)
if isinstance(a, TimeRange):
# If we have a TimeRange, extract the values
start = a.start.datetime
end = a.end.datetime
else:
# Otherwise we already have the values
start = a
end = b
# If an interval integer was given then use in truncation.
truncated_data = self.data.sort_index()[start:end:int]
# Truncate the metadata
# Check there is data still
truncated_meta = TimeSeriesMetaData([])
if len(truncated_data) > 0:
tr = TimeRange(truncated_data.index.min(), truncated_data.index.max())
truncated_meta = TimeSeriesMetaData(copy.deepcopy(self.meta.metadata))
truncated_meta._truncate(tr)
# Build similar TimeSeries object and sanatise metadata and units.
object = self.__class__(truncated_data.sort_index(), truncated_meta, copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def extract(self, column_name):
"""
Returns a new time series with the chosen column.
Parameters
----------
column_name : `str`
A valid column name.
Returns
-------
`~sunpy.timeseries.TimeSeries`
A new `~sunpy.timeseries.TimeSeries` with only the selected column.
"""
# TODO: allow the extract function to pick more than one column
# TODO: Fix this?
# if isinstance(self, pandas.Series):
# return self
# else:
# return GenericTimeSeries(self.data[column_name], TimeSeriesMetaData(self.meta.metadata.copy()))
# Extract column and remove empty rows
data = self.data[[column_name]].dropna()
# Build generic TimeSeries object and sanatise metadata and units.
object = GenericTimeSeries(data.sort_index(),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def concatenate(self, otherts, same_source=False, **kwargs):
"""
Concatenate with another `~sunpy.timeseries.TimeSeries`. This function
will check and remove any duplicate times. It will keep the column
values from the original timeseries to which the new time series is
being added.
Parameters
----------
otherts : `~sunpy.timeseries.TimeSeries`
Another `~sunpy.timeseries.TimeSeries`.
same_source : `bool`, optional
Set to `True` to check if the sources of the time series match. Defaults to `False`.
Returns
-------
`~sunpy.timeseries.TimeSeries`
A new `~sunpy.timeseries.TimeSeries`.
Notes
-----
Extra keywords are passed to `pandas.concat`.
"""
# TODO: decide if we want to be able to concatenate multiple time series at once.
# check to see if nothing needs to be done
if self == otherts:
return self
# Check the sources match if specified.
if same_source and not (isinstance(otherts, self.__class__)):
raise TypeError("TimeSeries classes must match if specified.")
# Concatenate the metadata and data
kwargs['sort'] = kwargs.pop('sort', False)
meta = self.meta.concatenate(otherts.meta)
data = pd.concat([self.data.copy(), otherts.data], **kwargs)
# Add all the new units to the dictionary.
units = OrderedDict()
units.update(self.units)
units.update(otherts.units)
# If sources match then build similar TimeSeries.
if self.__class__ == otherts.__class__:
object = self.__class__(data.sort_index(), meta, units)
else:
# Build generic time series if the sources don't match.
object = GenericTimeSeries(data.sort_index(), meta, units)
# Sanatise metadata and units
object._sanitize_metadata()
object._sanitize_units()
return object
# #### Plotting Methods #### #
def plot(self, axes=None, **plot_args):
"""
Plot a plot of the `~sunpy.timeseries.TimeSeries`.
Parameters
----------
axes : `~matplotlib.axes.Axes`, optional
If provided the image will be plotted on the given axes.
Defaults to `None`, so the current axes will be used.
**plot_args : `dict`, optional
Any additional plot arguments that should be used when plotting.
Returns
-------
axes : `~matplotlib.axes.Axes`
The plot axes.
"""
# Get current axes
if axes is None:
axes = plt.gca()
axes = self.data.plot(ax=axes, **plot_args)
return axes
@peek_show
def peek(self, **kwargs):
"""
Displays a graphical overview of the data in this object for user evaluation.
For the creation of plots, users should instead use the
`~sunpy.timeseries.GenericTimeSeries.plot` method and Matplotlib's pyplot framework.
Parameters
----------
**kwargs : `dict`
Any additional plot arguments that should be used when plotting.
"""
# Check we have a timeseries valid for plotting
self._validate_data_for_ploting()
# Now make the plot
figure = plt.figure()
self.plot(**kwargs)
return figure
def _validate_data_for_ploting(self):
"""
Raises an exception if the `~sunpy.timeseries.TimeSeries` is invalid
for plotting.
This should be added into all `~sunpy.timeseries.TimeSeries`
peek methods.
"""
# Check we have a valid TS
if len(self.data) == 0:
raise ValueError("The timeseries can't be plotted as it has no data present. "
"(len(self.data) == 0)")
# #### Miscellaneous #### #
def _validate_meta(self):
"""
Validates the meta-information associated with a
`~sunpy.timeseries.TimeSeries`.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-
specific validation should be handled in the relevant file in
the "sunpy.timeseries.sources".
"""
warnings.simplefilter('always', Warning)
for meta_property in ('cunit1', 'cunit2', 'waveunit'):
if (self.meta.get(meta_property) and
u.Unit(self.meta.get(meta_property),
parse_strict='silent').physical_type == 'unknown'):
warnings.warn(f"Unknown value for {meta_property.upper()}.", SunpyUserWarning)
def _validate_units(self, units, **kwargs):
"""
Validates the astropy unit-information associated with a
`~sunpy.timeseries.TimeSeries`.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-
specific validation should be handled in the relevant file in
the "sunpy.timeseries.sources".
"""
warnings.simplefilter('always', Warning)
result = True
for key in units:
if not isinstance(units[key], astropy.units.UnitBase):
# If this is not a unit then this can't be a valid units dict.
result = False
warnings.warn(f"Invalid unit given for {key}.", SunpyUserWarning)
return result
def _sanitize_units(self, **kwargs):
"""
Sanitizes the `collections.OrderedDict` used to store the units.
Primarily this method will:
* Remove entries that don't match up to a column.
* Add unitless entries for columns with no units defined.
* Re-arrange the order of the dictionary to match the columns.
"""
warnings.simplefilter('always', Warning)
# Populate unspecified units:
for column in set(self.data.columns.tolist()) - set(self.units.keys()):
# For all columns not present in the units dictionary.
self.units[column] = u.dimensionless_unscaled
warnings.warn(f"Unknown units for {column}.", SunpyUserWarning)
# Re-arrange so it's in the same order as the columns and removed unused.
units = OrderedDict()
for column in self.data.columns.tolist():
units.update({column: self.units[column]})
# Now use the amended units Ordered Dictionary
self.units = units
def _sanitize_metadata(self, **kwargs):
"""
Sanitizes the `~sunpy.timeseries.TimeSeriesMetaData` used to store the
metadata.
Primarily this method will:
* Remove entries outside of the dates or truncate if the metadata overflows past the data.
* Remove column references in the metadata that don't match to a column in the data.
* Remove metadata entries that have no columns matching the data.
"""
warnings.simplefilter('always', Warning)
# Truncate the metadata
self.meta._truncate(self.time_range)
# Remove non-existant columns
redundant_cols = list(set(self.meta.columns) - set(self.columns))
self.meta._remove_columns(redundant_cols)
# #### Export/Output Methods #### #
def to_table(self, **kwargs):
"""
Return an `astropy.table.Table` of the given
`~sunpy.timeseries.TimeSeries`.
Returns
-------
`~astropy.table.Table`
A new `astropy.table.Table` containing the data from the `~sunpy.timeseries.TimeSeries`.
The table will include units where relevant.
"""
# TODO: Table.from_pandas(df) doesn't include the index column. Add request?
# Get data columns
table = Table.from_pandas(self.data)
# Get index column and add to table.
index_col = Column(self.data.index.values, name='date')
table.add_column(index_col, index=0)
# Add in units.
for key in self.units:
table[key].unit = self.units[key]
# Output the table
return table
def to_dataframe(self, **kwargs):
"""
Return a `~pandas.core.frame.DataFrame` of the given
`~sunpy.timeseries.TimeSeries`.
Returns
-------
`~pandas.core.frame.DataFrame`
A `~pandas.core.frame.DataFrame` containing the data.
"""
return self.data
def to_array(self, **kwargs):
"""
Return a `numpy.array` of the given `~sunpy.timeseries.TimeSeries`.
Parameters
----------
kwargs : `dict`
All keyword arguments are passed to `pandas.DataFrame.to_numpy`.
Returns
-------
`~numpy.ndarray`
If the data is heterogeneous and contains booleans or objects, the result will be of ``dtype=object``.
"""
if hasattr(self.data, "to_numpy"):
return self.data.to_numpy(**kwargs)
else:
return self.data.values
def __eq__(self, other):
"""
Check two `~sunpy.timeseries.TimeSeries` are the same, they have
matching type, data, metadata and units entries.
Parameters
----------
other : `~sunpy.timeseries.TimeSeries`
The second `~sunpy.timeseries.TimeSeries` to compare with.
Returns
-------
`bool`
"""
match = True
if isinstance(other, type(self)):
if ((not self.data.equals(other.data)) or
(self.meta != other.meta) or
(self.units != other.units)):
match = False
else:
match = False
return match
def __ne__(self, other):
"""
Check two `~sunpy.timeseries.TimeSeries` are not the same, they don't
have matching type, data, metadata and/or units entries.
Parameters
----------
other : `~sunpy.timeseries.TimeSeries`
The second `~sunpy.timeseries.TimeSeries` to compare with.
Returns
-------
`bool`
"""
return not self == other
@classmethod
def _parse_file(cls, filepath):
"""
Parses a file - to be implemented in any subclass that may use files.
Parameters
----------
filepath : `str`
The path to the file you want to parse.
"""
return NotImplemented
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Dirichlet distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Dirichlet",
]
_dirichlet_sample_note = """Note: `value` must be a non-negative tensor with
dtype `self.dtype` and be in the `(self.event_shape() - 1)`-simplex, i.e.,
`tf.reduce_sum(value, -1) = 1`. It must have a shape compatible with
`self.batch_shape() + self.event_shape()`."""
@tf_export("distributions.Dirichlet")
class Dirichlet(distribution.Distribution):
"""Dirichlet distribution.
The Dirichlet distribution is defined over the
[`(k-1)`-simplex](https://en.wikipedia.org/wiki/Simplex) using a positive,
length-`k` vector `concentration` (`k > 1`). The Dirichlet is identically the
Beta distribution when `k = 2`.
#### Mathematical Details
The Dirichlet is a distribution over the open `(k-1)`-simplex, i.e.,
```none
S^{k-1} = { (x_0, ..., x_{k-1}) in R^k : sum_j x_j = 1 and all_j x_j > 0 }.
```
The probability density function (pdf) is,
```none
pdf(x; alpha) = prod_j x_j**(alpha_j - 1) / Z
Z = prod_j Gamma(alpha_j) / Gamma(sum_j alpha_j)
```
where:
* `x in S^{k-1}`, i.e., the `(k-1)`-simplex,
* `concentration = alpha = [alpha_0, ..., alpha_{k-1}]`, `alpha_j > 0`,
* `Z` is the normalization constant aka the [multivariate beta function](
https://en.wikipedia.org/wiki/Beta_function#Multivariate_beta_function),
and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The `concentration` represents mean total counts of class occurrence, i.e.,
```none
concentration = alpha = mean * total_concentration
```
where `mean` in `S^{k-1}` and `total_concentration` is a positive real number
representing a mean total count.
Distribution parameters are automatically broadcast in all functions; see
examples for details.
Warning: Some components of the samples can be zero due to finite precision.
This happens more often when some of the concentrations are very small.
Make sure to round the samples to `np.finfo(dtype).tiny` before computing the
density.
Samples of this distribution are reparameterized (pathwise differentiable).
The derivatives are computed using the approach described in the paper
[Michael Figurnov, Shakir Mohamed, Andriy Mnih.
Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498)
#### Examples
```python
# Create a single trivariate Dirichlet, with the 3rd class being three times
# more frequent than the first. I.e., batch_shape=[], event_shape=[3].
alpha = [1., 2, 3]
dist = tf.distributions.Dirichlet(alpha)
dist.sample([4, 5]) # shape: [4, 5, 3]
# x has one sample, one batch, three classes:
x = [.2, .3, .5] # shape: [3]
dist.prob(x) # shape: []
# x has two samples from one batch:
x = [[.1, .4, .5],
[.2, .3, .5]]
dist.prob(x) # shape: [2]
# alpha will be broadcast to shape [5, 7, 3] to match x.
x = [[...]] # shape: [5, 7, 3]
dist.prob(x) # shape: [5, 7]
```
```python
# Create batch_shape=[2], event_shape=[3]:
alpha = [[1., 2, 3],
[4, 5, 6]] # shape: [2, 3]
dist = tf.distributions.Dirichlet(alpha)
dist.sample([4, 5]) # shape: [4, 5, 2, 3]
x = [.2, .3, .5]
# x will be broadcast as [[.2, .3, .5],
# [.2, .3, .5]],
# thus matching batch_shape [2, 3].
dist.prob(x) # shape: [2]
```
Compute the gradients of samples w.r.t. the parameters:
```python
alpha = tf.constant([1.0, 2.0, 3.0])
dist = tf.distributions.Dirichlet(alpha)
samples = dist.sample(5) # Shape [5, 3]
loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function
# Unbiased stochastic gradients of the loss function
grads = tf.gradients(loss, alpha)
```
"""
def __init__(self,
concentration,
validate_args=False,
allow_nan_stats=True,
name="Dirichlet"):
"""Initialize a batch of Dirichlet distributions.
Args:
concentration: Positive floating-point `Tensor` indicating mean number
of class occurrences; aka "alpha". Implies `self.dtype`, and
`self.batch_shape`, `self.event_shape`, i.e., if
`concentration.shape = [N1, N2, ..., Nm, k]` then
`batch_shape = [N1, N2, ..., Nm]` and
`event_shape = [k]`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[concentration]) as name:
self._concentration = self._maybe_assert_valid_concentration(
ops.convert_to_tensor(concentration, name="concentration"),
validate_args)
self._total_concentration = math_ops.reduce_sum(self._concentration, -1)
super(Dirichlet, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._concentration,
self._total_concentration],
name=name)
@property
def concentration(self):
"""Concentration parameter; expected counts for that coordinate."""
return self._concentration
@property
def total_concentration(self):
"""Sum of last dim of concentration parameter."""
return self._total_concentration
def _batch_shape_tensor(self):
return array_ops.shape(self.total_concentration)
def _batch_shape(self):
return self.total_concentration.get_shape()
def _event_shape_tensor(self):
return array_ops.shape(self.concentration)[-1:]
def _event_shape(self):
return self.concentration.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
gamma_sample = random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
dtype=self.dtype,
seed=seed)
return gamma_sample / math_ops.reduce_sum(gamma_sample, -1, keepdims=True)
@distribution_util.AppendDocstring(_dirichlet_sample_note)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
@distribution_util.AppendDocstring(_dirichlet_sample_note)
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_unnormalized_prob(self, x):
x = self._maybe_assert_valid_sample(x)
return math_ops.reduce_sum((self.concentration - 1.) * math_ops.log(x), -1)
def _log_normalization(self):
return special_math_ops.lbeta(self.concentration)
def _entropy(self):
k = math_ops.cast(self.event_shape_tensor()[0], self.dtype)
return (
self._log_normalization()
+ ((self.total_concentration - k)
* math_ops.digamma(self.total_concentration))
- math_ops.reduce_sum(
(self.concentration - 1.) * math_ops.digamma(self.concentration),
axis=-1))
def _mean(self):
return self.concentration / self.total_concentration[..., array_ops.newaxis]
def _covariance(self):
x = self._variance_scale_term() * self._mean()
return array_ops.matrix_set_diag(
-math_ops.matmul(x[..., array_ops.newaxis],
x[..., array_ops.newaxis, :]), # outer prod
self._variance())
def _variance(self):
scale = self._variance_scale_term()
x = scale * self._mean()
return x * (scale - x)
def _variance_scale_term(self):
"""Helper to `_covariance` and `_variance` which computes a shared scale."""
return math_ops.rsqrt(1. + self.total_concentration[..., array_ops.newaxis])
@distribution_util.AppendDocstring(
"""Note: The mode is undefined when any `concentration <= 1`. If
`self.allow_nan_stats` is `True`, `NaN` is used for undefined modes. If
`self.allow_nan_stats` is `False` an exception is raised when one or more
modes are undefined.""")
def _mode(self):
k = math_ops.cast(self.event_shape_tensor()[0], self.dtype)
mode = (self.concentration - 1.) / (
self.total_concentration[..., array_ops.newaxis] - k)
if self.allow_nan_stats:
nan = array_ops.fill(
array_ops.shape(mode),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where(
math_ops.reduce_all(self.concentration > 1., axis=-1),
mode, nan)
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], self.dtype),
self.concentration,
message="Mode undefined when any concentration <= 1"),
], mode)
def _maybe_assert_valid_concentration(self, concentration, validate_args):
"""Checks the validity of the concentration parameter."""
if not validate_args:
return concentration
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
concentration,
message="Concentration parameter must be positive."),
check_ops.assert_rank_at_least(
concentration, 1,
message="Concentration parameter must have >=1 dimensions."),
check_ops.assert_less(
1, array_ops.shape(concentration)[-1],
message="Concentration parameter must have event_size >= 2."),
], concentration)
def _maybe_assert_valid_sample(self, x):
"""Checks the validity of a sample."""
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_positive(x, message="samples must be positive"),
check_ops.assert_near(
array_ops.ones([], dtype=self.dtype),
math_ops.reduce_sum(x, -1),
message="sample last-dimension must sum to `1`"),
], x)
@kullback_leibler.RegisterKL(Dirichlet, Dirichlet)
def _kl_dirichlet_dirichlet(d1, d2, name=None):
"""Batchwise KL divergence KL(d1 || d2) with d1 and d2 Dirichlet.
Args:
d1: instance of a Dirichlet distribution object.
d2: instance of a Dirichlet distribution object.
name: (optional) Name to use for created operations.
default is "kl_dirichlet_dirichlet".
Returns:
Batchwise KL(d1 || d2)
"""
with ops.name_scope(name, "kl_dirichlet_dirichlet", values=[
d1.concentration, d2.concentration]):
# The KL between Dirichlet distributions can be derived as follows. We have
#
# Dir(x; a) = 1 / B(a) * prod_i[x[i]^(a[i] - 1)]
#
# where B(a) is the multivariate Beta function:
#
# B(a) = Gamma(a[1]) * ... * Gamma(a[n]) / Gamma(a[1] + ... + a[n])
#
# The KL is
#
# KL(Dir(x; a), Dir(x; b)) = E_Dir(x; a){log(Dir(x; a) / Dir(x; b))}
#
# so we'll need to know the log density of the Dirichlet. This is
#
# log(Dir(x; a)) = sum_i[(a[i] - 1) log(x[i])] - log B(a)
#
# The only term that matters for the expectations is the log(x[i]). To
# compute the expectation of this term over the Dirichlet density, we can
# use the following facts about the Dirichlet in exponential family form:
# 1. log(x[i]) is a sufficient statistic
# 2. expected sufficient statistics (of any exp family distribution) are
# equal to derivatives of the log normalizer with respect to
# corresponding natural parameters: E{T[i](x)} = dA/d(eta[i])
#
# To proceed, we can rewrite the Dirichlet density in exponential family
# form as follows:
#
# Dir(x; a) = exp{eta(a) . T(x) - A(a)}
#
# where '.' is the dot product of vectors eta and T, and A is a scalar:
#
# eta[i](a) = a[i] - 1
# T[i](x) = log(x[i])
# A(a) = log B(a)
#
# Now, we can use fact (2) above to write
#
# E_Dir(x; a)[log(x[i])]
# = dA(a) / da[i]
# = d/da[i] log B(a)
# = d/da[i] (sum_j lgamma(a[j])) - lgamma(sum_j a[j])
# = digamma(a[i])) - digamma(sum_j a[j])
#
# Putting it all together, we have
#
# KL[Dir(x; a) || Dir(x; b)]
# = E_Dir(x; a){log(Dir(x; a) / Dir(x; b)}
# = E_Dir(x; a){sum_i[(a[i] - b[i]) log(x[i])} - (lbeta(a) - lbeta(b))
# = sum_i[(a[i] - b[i]) * E_Dir(x; a){log(x[i])}] - lbeta(a) + lbeta(b)
# = sum_i[(a[i] - b[i]) * (digamma(a[i]) - digamma(sum_j a[j]))]
# - lbeta(a) + lbeta(b))
digamma_sum_d1 = math_ops.digamma(
math_ops.reduce_sum(d1.concentration, axis=-1, keepdims=True))
digamma_diff = math_ops.digamma(d1.concentration) - digamma_sum_d1
concentration_diff = d1.concentration - d2.concentration
return (math_ops.reduce_sum(concentration_diff * digamma_diff, axis=-1) -
special_math_ops.lbeta(d1.concentration) +
special_math_ops.lbeta(d2.concentration))
|
|
"""
Module :mod:`pyesgf.logon`
==========================
Manage the client's interaction with ESGF's security system. Using this
module requires installing the MyProxyClient_ library.
.. _MyProxyClient: http://pypi.python.org/pypi/MyProxyClient
To obtain ESGF credentials create a :class:`LogonManager` instance and supply
it with logon details::
>>> lm = LogonManager()
>>> lm.is_logged_on()
False
>>> lm.logon(username, password, myproxy_hostname)
>>> lm.is_logged_on()
True
Logon parameters that aren't specified will be prompted for at the terminal
by default. The :class:`LogonManager` object also writes a ``.httprc`` file
configuring OPeNDAP access through the NetCDF API.
You can use your OpenID to logon instead. The logon details will be deduced
from the OpenID where possible::
>>> lm.logoff()
>>> lm.is_logged_on()
False
>>> lm.logon_with_openid(openid, password)
>>> lm.is_logged_on()
True
"""
import os
import os.path as op
import shutil
from xml.etree import ElementTree
import requests
from six.moves import input
import re
from getpass import getpass
try:
from myproxy.client import MyProxyClient
import OpenSSL
_has_myproxy = True
except (ImportError, SyntaxError):
_has_myproxy = False
from .exceptions import OpenidResolutionError
# -----------------------------------------------------------------------------
# Constants
ESGF_DIR = op.join(os.environ['HOME'], '.esg')
ESGF_CERTS_DIR = 'certificates'
ESGF_CREDENTIALS = 'credentials.pem'
DAP_CONFIG = op.join(os.environ['HOME'], '.dodsrc')
DAP_CONFIG_MARKER = '<<< Managed by esgf-pyclient >>>'
XRI_NS = 'xri://$xrd*($v*2.0)'
MYPROXY_URN = 'urn:esg:security:myproxy-service'
ESGF_OPENID_REXP = r'https://.*/esgf-idp/openid/(.*)'
MYPROXY_URI_REXP = r'socket://([^:]*):?(\d+)?'
# -----------------------------------------------------------------------------
# classes
class LogonManager(object):
"""
Manages ESGF crendentials and security configuration files.
Also integrates with NetCDF's secure OPeNDAP configuration.
"""
STATE_LOGGED_ON = 0
STATE_NO_CREDENTIALS = 1
STATE_EXPIRED_CREDENTIALS = 2
STATE_INVALID_CREDENTIALS = 3
def __init__(self, esgf_dir=ESGF_DIR, dap_config=DAP_CONFIG,
verify=True):
"""
:param esgf_dir: Root directory of ESGF state. Default ~/.esg
:param dap_config: Set the location of .httprc. Defaults to ~/.httprc
:param verify: SSL verification option. Default ``True``.
See the ``requests`` documenation to configure the
``verify`` option:
http://docs.python-requests.org/en/master/user/advanced/#ssl-cert-verification
Note if dap_config is defined your current working directory must be
the same as the location as the dap_config file when OPeNDAP is
initialised.
"""
if not _has_myproxy:
raise ImportError('pyesgf.logon requires MyProxyClient')
self.esgf_dir = esgf_dir
self.esgf_credentials = op.join(self.esgf_dir, ESGF_CREDENTIALS)
self.esgf_certs_dir = op.join(self.esgf_dir, ESGF_CERTS_DIR)
self.dap_config = dap_config
self.verify = verify
self._write_dap_config()
@property
def state(self):
if not op.exists(self.esgf_credentials):
return self.STATE_NO_CREDENTIALS
else:
with open(self.esgf_credentials) as fh:
data = fh.read()
cert = OpenSSL.crypto.load_certificate(
OpenSSL.SSL.FILETYPE_PEM, data)
if cert.has_expired():
return self.STATE_EXPIRED_CREDENTIALS
# !TODO: check credentials against certificates
return self.STATE_LOGGED_ON
def is_logged_on(self):
return self.state == self.STATE_LOGGED_ON
def logon_with_openid(self, openid, password=None,
bootstrap=False, update_trustroots=True,
interactive=True):
"""
Obtains ESGF credentials by detecting the MyProxy parameters from
the users OpenID. Some ESGF compatible OpenIDs do not contain enough
information to obtain credentials. In this case the user is prompted
for missing information if ``interactive == True``, otherwise an
exception is raised.
:param openid: OpenID to login with See :meth:`logon` for parameters
``interactive``, ``bootstrap`` and ``update_trustroots``.
"""
username, myproxy = self._get_logon_details(openid)
return self.logon(username, password, myproxy,
bootstrap=bootstrap,
update_trustroots=update_trustroots,
interactive=interactive)
def logon(self, username=None, password=None, hostname=None,
bootstrap=False, update_trustroots=True,
interactive=True):
"""
Obtain ESGF credentials from the specified MyProxy service.
If ``interactive == True`` then any missing parameters of ``password``,
``username`` or ``hostname`` will be prompted for at the terminal.
:param interactive: Whether to ask for input at the terminal for
any missing information. I.e. username, password or hostname.
:param bootstrap: Whether to bootstrap the trustroots for this
MyProxy service.
:param update_trustroots: Whether to update the trustroots for this
MyProxy service.
"""
if interactive:
if hostname is None:
print('Enter myproxy hostname:'),
hostname = input()
if username is None:
print('Enter myproxy username:'),
username = input()
if password is None:
password = getpass('Enter password for %s: ' % username)
if None in (hostname, username, password):
raise OpenidResolutionError('Full logon details not available')
c = MyProxyClient(hostname=hostname, caCertDir=self.esgf_certs_dir)
creds = c.logon(username, password,
bootstrap=bootstrap,
updateTrustRoots=update_trustroots)
with open(self.esgf_credentials, 'w') as fh:
for cred in creds:
fh.write(cred)
def logoff(self, clear_trustroots=False):
"""
Remove any obtained credentials from the ESGF environment.
:param clear_trustroots: If True also remove trustroots.
"""
if op.exists(self.esgf_credentials):
os.remove(self.esgf_credentials)
if clear_trustroots:
shutil.rmtree(self.esgf_certs_dir)
def _get_logon_details(self, openid):
response = requests.get(openid, verify=self.verify)
xml = ElementTree.fromstring(response.content)
hostname = None
port = None
username = None
services = xml.findall('.//{%s}Service' % XRI_NS)
for service in services:
try:
service_type = service.find('{%s}Type' % XRI_NS).text
except AttributeError:
continue
# Detect myproxy hostname and port
if service_type == MYPROXY_URN:
myproxy_uri = service.find('{%s}URI' % XRI_NS).text
mo = re.match(MYPROXY_URI_REXP, myproxy_uri)
if mo:
hostname, port = mo.groups()
# If the OpenID matches the standard ESGF pattern assume it contains
# the username, otherwise prompt or raise an exception
mo = re.match(ESGF_OPENID_REXP, openid)
if mo:
username = mo.group(1)
# !TODO maybe support different myproxy port
if port is not None:
assert int(port) == 7512
return username, hostname
def _write_dap_config(self, verbose=False, validate=False):
preamble, managed, postamble = self._parse_dap_config()
with open(self.dap_config, 'w') as fh:
fh.write(('{preamble}\n'
'# BEGIN {marker}\n'
'HTTP.VERBOSE={verbose}\n'
'HTTP.COOKIEJAR={esgf_dir}/.dods_cookies\n'
'HTTP.SSL.VALIDATE=0\n'
'HTTP.SSL.CERTIFICATE={esgf_dir}/credentials.pem\n'
'HTTP.SSL.KEY={esgf_dir}/credentials.pem\n'
'HTTP.SSL.CAPATH={esgf_certs_dir}\n'
'# END {marker}\n'
'{postamble}\n')
.format(verbose=1 if verbose else 0,
validate=1 if validate else 0,
esgf_certs_dir=self.esgf_certs_dir,
esgf_dir=self.esgf_dir,
marker=DAP_CONFIG_MARKER,
preamble=preamble,
postamble=postamble))
def _parse_dap_config(self, config_str=None):
"""
Read the DAP_CONFIG file and extract the parts not controlled
by esgf-pyclient.
:return: (preamble, managed, postamble), three strings of
configuration lines before, within and after the esgf-pyclient
controlled block.
"""
if config_str is None:
if not op.exists(self.dap_config):
return ('', '', '')
config_str = open(self.dap_config).read()
# !NOTE: The flags keyword argument to re.split was introduced
# in Python2.7
# Keep with call non-keyword arguments for compatibility with
# Python2.6
sections = re.split(r'^# (?:BEGIN|END) {0}$\n'
.format(DAP_CONFIG_MARKER),
config_str, re.M)
if len(sections) < 2:
preamble, managed, postamble = sections[0], '', ''
elif len(sections) == 2:
preamble, managed, postamble = sections + ['']
elif len(sections) == 3:
preamble, managed, postamble = sections
else:
# In odd circumstances there might be more than 3 parts of the
# config so assume the final config is the one to keep
managed, unmanaged = [], []
sections.reverse()
while sections:
unmanaged.append(sections.pop())
if sections:
managed.append(sections.pop())
preamble = '\n'.join(unmanaged[:-1])
postamble = unmanaged[-1]
managed = managed[-1]
return preamble.strip(), managed.strip(), postamble.strip()
|
|
"""
=============================================================
Online Latent Dirichlet Allocation with variational inference
=============================================================
This implementation is modified from Matthew D. Hoffman's onlineldavb code
Link: http://matthewdhoffman.com/code/onlineldavb.tar
"""
# Author: Chyi-Kwei Yau
# Author: Matthew D. Hoffman (original onlineldavb implementation)
import numpy as np
import scipy.sparse as sp
from scipy.special import gammaln
import warnings
from ..base import BaseEstimator, TransformerMixin
from ..utils import (check_random_state, check_array,
gen_batches, gen_even_slices, _get_n_jobs)
from ..utils.fixes import logsumexp
from ..utils.validation import check_non_negative
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
from ..exceptions import NotFittedError
from ._online_lda import (mean_change, _dirichlet_expectation_1d,
_dirichlet_expectation_2d)
EPS = np.finfo(np.float).eps
def _update_doc_distribution(X, exp_topic_word_distr, doc_topic_prior,
max_iters,
mean_change_tol, cal_sstats, random_state):
"""E-step: update document-topic distribution.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
exp_topic_word_distr : dense matrix, shape=(n_topics, n_features)
Exponential value of expection of log topic word distribution.
In the literature, this is `exp(E[log(beta)])`.
doc_topic_prior : float
Prior of document topic distribution `theta`.
max_iters : int
Max number of iterations for updating document topic distribution in
the E-step.
mean_change_tol : float
Stopping tolerance for updating document topic distribution in E-setp.
cal_sstats : boolean
Parameter that indicate to calculate sufficient statistics or not.
Set `cal_sstats` to `True` when we need to run M-step.
random_state : RandomState instance or None
Parameter that indicate how to initialize document topic distribution.
Set `random_state` to None will initialize document topic distribution
to a constant number.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each document.
In the literature, this is `gamma`. we can calculate `E[log(theta)]`
from it.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, this will be None.
"""
is_sparse_x = sp.issparse(X)
n_samples, n_features = X.shape
n_topics = exp_topic_word_distr.shape[0]
if random_state:
doc_topic_distr = random_state.gamma(100., 0.01, (n_samples, n_topics))
else:
doc_topic_distr = np.ones((n_samples, n_topics))
# In the literature, this is `exp(E[log(theta)])`
exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr))
# diff on `component_` (only calculate it when `cal_diff` is True)
suff_stats = np.zeros(exp_topic_word_distr.shape) if cal_sstats else None
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
for idx_d in xrange(n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
doc_topic_d = doc_topic_distr[idx_d, :]
# The next one is a copy, since the inner loop overwrites it.
exp_doc_topic_d = exp_doc_topic[idx_d, :].copy()
exp_topic_word_d = exp_topic_word_distr[:, ids]
# Iterate between `doc_topic_d` and `norm_phi` until convergence
for _ in xrange(0, max_iters):
last_d = doc_topic_d
# The optimal phi_{dwk} is proportional to
# exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]).
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
doc_topic_d = (exp_doc_topic_d *
np.dot(cnts / norm_phi, exp_topic_word_d.T))
# Note: adds doc_topic_prior to doc_topic_d, in-place.
_dirichlet_expectation_1d(doc_topic_d, doc_topic_prior,
exp_doc_topic_d)
if mean_change(last_d, doc_topic_d) < mean_change_tol:
break
doc_topic_distr[idx_d, :] = doc_topic_d
# Contribution of document d to the expected sufficient
# statistics for the M step.
if cal_sstats:
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi)
return (doc_topic_distr, suff_stats)
class LatentDirichletAllocation(BaseEstimator, TransformerMixin):
"""Latent Dirichlet Allocation with online variational Bayes algorithm
.. versionadded:: 0.17
Read more in the :ref:`User Guide <LatentDirichletAllocation>`.
Parameters
----------
n_components : int, optional (default=10)
Number of topics.
doc_topic_prior : float, optional (default=None)
Prior of document topic distribution `theta`. If the value is None,
defaults to `1 / n_components`.
In the literature, this is called `alpha`.
topic_word_prior : float, optional (default=None)
Prior of topic word distribution `beta`. If the value is None, defaults
to `1 / n_components`.
In the literature, this is called `eta`.
learning_method : 'batch' | 'online', default='online'
Method used to update `_component`. Only used in `fit` method.
In general, if the data size is large, the online update will be much
faster than the batch update.
The default learning method is going to be changed to 'batch' in the
0.20 release.
Valid options::
'batch': Batch variational Bayes method. Use all training data in
each EM update.
Old `components_` will be overwritten in each iteration.
'online': Online variational Bayes method. In each EM update, use
mini-batch of training data to update the ``components_``
variable incrementally. The learning rate is controlled by the
``learning_decay`` and the ``learning_offset`` parameters.
learning_decay : float, optional (default=0.7)
It is a parameter that control learning rate in the online learning
method. The value should be set between (0.5, 1.0] to guarantee
asymptotic convergence. When the value is 0.0 and batch_size is
``n_samples``, the update method is same as batch learning. In the
literature, this is called kappa.
learning_offset : float, optional (default=10.)
A (positive) parameter that downweights early iterations in online
learning. It should be greater than 1.0. In the literature, this is
called tau_0.
max_iter : integer, optional (default=10)
The maximum number of iterations.
batch_size : int, optional (default=128)
Number of documents to use in each EM iteration. Only used in online
learning.
evaluate_every : int, optional (default=0)
How often to evaluate perplexity. Only used in `fit` method.
set it to 0 or negative number to not evalute perplexity in
training at all. Evaluating perplexity can help you check convergence
in training process, but it will also increase total training time.
Evaluating perplexity in every iteration might increase training time
up to two-fold.
total_samples : int, optional (default=1e6)
Total number of documents. Only used in the `partial_fit` method.
perp_tol : float, optional (default=1e-1)
Perplexity tolerance in batch learning. Only used when
``evaluate_every`` is greater than 0.
mean_change_tol : float, optional (default=1e-3)
Stopping tolerance for updating document topic distribution in E-step.
max_doc_update_iter : int (default=100)
Max number of iterations for updating document topic distribution in
the E-step.
n_jobs : int, optional (default=1)
The number of jobs to use in the E-step. If -1, all CPUs are used. For
``n_jobs`` below -1, (n_cpus + 1 + n_jobs) are used.
verbose : int, optional (default=0)
Verbosity level.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
n_topics : int, optional (default=None)
This parameter has been renamed to n_components and will
be removed in version 0.21.
.. deprecated:: 0.19
Attributes
----------
components_ : array, [n_components, n_features]
Variational parameters for topic word distribution. Since the complete
conditional for topic word distribution is a Dirichlet,
``components_[i, j]`` can be viewed as pseudocount that represents the
number of times word `j` was assigned to topic `i`.
It can also be viewed as distribution over the words for each topic
after normalization:
``model.components_ / model.components_.sum(axis=1)[:, np.newaxis]``.
n_batch_iter_ : int
Number of iterations of the EM step.
n_iter_ : int
Number of passes over the dataset.
References
----------
[1] "Online Learning for Latent Dirichlet Allocation", Matthew D. Hoffman,
David M. Blei, Francis Bach, 2010
[2] "Stochastic Variational Inference", Matthew D. Hoffman, David M. Blei,
Chong Wang, John Paisley, 2013
[3] Matthew D. Hoffman's onlineldavb code. Link:
http://matthewdhoffman.com//code/onlineldavb.tar
"""
def __init__(self, n_components=10, doc_topic_prior=None,
topic_word_prior=None, learning_method=None,
learning_decay=.7, learning_offset=10., max_iter=10,
batch_size=128, evaluate_every=-1, total_samples=1e6,
perp_tol=1e-1, mean_change_tol=1e-3, max_doc_update_iter=100,
n_jobs=1, verbose=0, random_state=None, n_topics=None):
self.n_components = n_components
self.doc_topic_prior = doc_topic_prior
self.topic_word_prior = topic_word_prior
self.learning_method = learning_method
self.learning_decay = learning_decay
self.learning_offset = learning_offset
self.max_iter = max_iter
self.batch_size = batch_size
self.evaluate_every = evaluate_every
self.total_samples = total_samples
self.perp_tol = perp_tol
self.mean_change_tol = mean_change_tol
self.max_doc_update_iter = max_doc_update_iter
self.n_jobs = n_jobs
self.verbose = verbose
self.random_state = random_state
self.n_topics = n_topics
def _check_params(self):
"""Check model parameters."""
if self.n_topics is not None:
self._n_components = self.n_topics
warnings.warn("n_topics has been renamed to n_components in "
"version 0.19 and will be removed in 0.21",
DeprecationWarning)
else:
self._n_components = self.n_components
if self._n_components <= 0:
raise ValueError("Invalid 'n_components' parameter: %r"
% self._n_components)
if self.total_samples <= 0:
raise ValueError("Invalid 'total_samples' parameter: %r"
% self.total_samples)
if self.learning_offset < 0:
raise ValueError("Invalid 'learning_offset' parameter: %r"
% self.learning_offset)
if self.learning_method not in ("batch", "online", None):
raise ValueError("Invalid 'learning_method' parameter: %r"
% self.learning_method)
def _init_latent_vars(self, n_features):
"""Initialize latent variables."""
self.random_state_ = check_random_state(self.random_state)
self.n_batch_iter_ = 1
self.n_iter_ = 0
if self.doc_topic_prior is None:
self.doc_topic_prior_ = 1. / self._n_components
else:
self.doc_topic_prior_ = self.doc_topic_prior
if self.topic_word_prior is None:
self.topic_word_prior_ = 1. / self._n_components
else:
self.topic_word_prior_ = self.topic_word_prior
init_gamma = 100.
init_var = 1. / init_gamma
# In the literature, this is called `lambda`
self.components_ = self.random_state_.gamma(
init_gamma, init_var, (self._n_components, n_features))
# In the literature, this is `exp(E[log(beta)])`
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
def _e_step(self, X, cal_sstats, random_init, parallel=None):
"""E-step in EM update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
cal_sstats : boolean
Parameter that indicate whether to calculate sufficient statistics
or not. Set ``cal_sstats`` to True when we need to run M-step.
random_init : boolean
Parameter that indicate whether to initialize document topic
distribution randomly in the E-step. Set it to True in training
steps.
parallel : joblib.Parallel (optional)
Pre-initialized instance of joblib.Parallel.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each
document. In the literature, this is called `gamma`.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, it will be None.
"""
# Run e-step in parallel
random_state = self.random_state_ if random_init else None
# TODO: make Parallel._effective_n_jobs public instead?
n_jobs = _get_n_jobs(self.n_jobs)
if parallel is None:
parallel = Parallel(n_jobs=n_jobs, verbose=max(0,
self.verbose - 1))
results = parallel(
delayed(_update_doc_distribution)(X[idx_slice, :],
self.exp_dirichlet_component_,
self.doc_topic_prior_,
self.max_doc_update_iter,
self.mean_change_tol, cal_sstats,
random_state)
for idx_slice in gen_even_slices(X.shape[0], n_jobs))
# merge result
doc_topics, sstats_list = zip(*results)
doc_topic_distr = np.vstack(doc_topics)
if cal_sstats:
# This step finishes computing the sufficient statistics for the
# M-step.
suff_stats = np.zeros(self.components_.shape)
for sstats in sstats_list:
suff_stats += sstats
suff_stats *= self.exp_dirichlet_component_
else:
suff_stats = None
return (doc_topic_distr, suff_stats)
def _em_step(self, X, total_samples, batch_update, parallel=None):
"""EM update for 1 iteration.
update `_component` by batch VB or online VB.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
total_samples : integer
Total number of documents. It is only used when
batch_update is `False`.
batch_update : boolean
Parameter that controls updating method.
`True` for batch learning, `False` for online learning.
parallel : joblib.Parallel
Pre-initialized instance of joblib.Parallel
Returns
-------
doc_topic_distr : array, shape=(n_samples, n_components)
Unnormalized document topic distribution.
"""
# E-step
_, suff_stats = self._e_step(X, cal_sstats=True, random_init=True,
parallel=parallel)
# M-step
if batch_update:
self.components_ = self.topic_word_prior_ + suff_stats
else:
# online update
# In the literature, the weight is `rho`
weight = np.power(self.learning_offset + self.n_batch_iter_,
-self.learning_decay)
doc_ratio = float(total_samples) / X.shape[0]
self.components_ *= (1 - weight)
self.components_ += (weight * (self.topic_word_prior_
+ doc_ratio * suff_stats))
# update `component_` related variables
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
self.n_batch_iter_ += 1
return
def _check_non_neg_array(self, X, whom):
"""check X format
check X format and make sure no negative value in X.
Parameters
----------
X : array-like or sparse matrix
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, whom)
return X
def partial_fit(self, X, y=None):
"""Online VB with Mini-Batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
y : Ignored
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.partial_fit")
n_samples, n_features = X.shape
batch_size = self.batch_size
# initialize parameters or check
if not hasattr(self, 'components_'):
self._init_latent_vars(n_features)
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=max(0,
self.verbose - 1)) as parallel:
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :],
total_samples=self.total_samples,
batch_update=False,
parallel=parallel)
return self
def fit(self, X, y=None):
"""Learn model for the data X with variational Bayes method.
When `learning_method` is 'online', use mini-batch update.
Otherwise, use batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
y : Ignored
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X, "LatentDirichletAllocation.fit")
n_samples, n_features = X.shape
max_iter = self.max_iter
evaluate_every = self.evaluate_every
learning_method = self.learning_method
if learning_method is None:
warnings.warn("The default value for 'learning_method' will be "
"changed from 'online' to 'batch' in the release "
"0.20. This warning was introduced in 0.18.",
DeprecationWarning)
learning_method = 'online'
batch_size = self.batch_size
# initialize parameters
self._init_latent_vars(n_features)
# change to perplexity later
last_bound = None
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=max(0,
self.verbose - 1)) as parallel:
for i in xrange(max_iter):
if learning_method == 'online':
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :], total_samples=n_samples,
batch_update=False, parallel=parallel)
else:
# batch update
self._em_step(X, total_samples=n_samples,
batch_update=True, parallel=parallel)
# check perplexity
if evaluate_every > 0 and (i + 1) % evaluate_every == 0:
doc_topics_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False,
parallel=parallel)
bound = self._perplexity_precomp_distr(X, doc_topics_distr,
sub_sampling=False)
if self.verbose:
print('iteration: %d of max_iter: %d, perplexity: %.4f'
% (i + 1, max_iter, bound))
if last_bound and abs(last_bound - bound) < self.perp_tol:
break
last_bound = bound
elif self.verbose:
print('iteration: %d of max_iter: %d' % (i + 1, max_iter))
self.n_iter_ += 1
# calculate final perplexity value on train set
doc_topics_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False,
parallel=parallel)
self.bound_ = self._perplexity_precomp_distr(X, doc_topics_distr,
sub_sampling=False)
return self
def _unnormalized_transform(self, X):
"""Transform data X according to fitted model.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : shape=(n_samples, n_components)
Document topic distribution for X.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
# make sure feature size is the same in fitted model and in X
X = self._check_non_neg_array(X, "LatentDirichletAllocation.transform")
n_samples, n_features = X.shape
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
doc_topic_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False)
return doc_topic_distr
def transform(self, X):
"""Transform data X according to the fitted model.
.. versionchanged:: 0.18
*doc_topic_distr* is now normalized
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : shape=(n_samples, n_components)
Document topic distribution for X.
"""
doc_topic_distr = self._unnormalized_transform(X)
doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis]
return doc_topic_distr
def _approx_bound(self, X, doc_topic_distr, sub_sampling):
"""Estimate the variational bound.
Estimate the variational bound over "all documents" using only the
documents passed in as X. Since log-likelihood of each word cannot
be computed directly, we use this bound to estimate it.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
doc_topic_distr : array, shape=(n_samples, n_components)
Document topic distribution. In the literature, this is called
gamma.
sub_sampling : boolean, optional, (default=False)
Compensate for subsampling of documents.
It is used in calculate bound in online learning.
Returns
-------
score : float
"""
def _loglikelihood(prior, distr, dirichlet_distr, size):
# calculate log-likelihood
score = np.sum((prior - distr) * dirichlet_distr)
score += np.sum(gammaln(distr) - gammaln(prior))
score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
return score
is_sparse_x = sp.issparse(X)
n_samples, n_components = doc_topic_distr.shape
n_features = self.components_.shape[1]
score = 0
dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
doc_topic_prior = self.doc_topic_prior_
topic_word_prior = self.topic_word_prior_
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
# E[log p(docs | theta, beta)]
for idx_d in xrange(0, n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
temp = (dirichlet_doc_topic[idx_d, :, np.newaxis]
+ dirichlet_component_[:, ids])
norm_phi = logsumexp(temp, axis=0)
score += np.dot(cnts, norm_phi)
# compute E[log p(theta | alpha) - log q(theta | gamma)]
score += _loglikelihood(doc_topic_prior, doc_topic_distr,
dirichlet_doc_topic, self._n_components)
# Compensate for the subsampling of the population of documents
if sub_sampling:
doc_ratio = float(self.total_samples) / n_samples
score *= doc_ratio
# E[log p(beta | eta) - log q (beta | lambda)]
score += _loglikelihood(topic_word_prior, self.components_,
dirichlet_component_, n_features)
return score
def score(self, X, y=None):
"""Calculate approximate log-likelihood as score.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
y : Ignored
Returns
-------
score : float
Use approximate bound as score.
"""
X = self._check_non_neg_array(X, "LatentDirichletAllocation.score")
doc_topic_distr = self._unnormalized_transform(X)
score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)
return score
def _perplexity_precomp_distr(self, X, doc_topic_distr=None,
sub_sampling=False):
"""Calculate approximate perplexity for data X with ability to accept
precomputed doc_topic_distr
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
----------
X : array-like or sparse matrix, [n_samples, n_features]
Document word matrix.
doc_topic_distr : None or array, shape=(n_samples, n_components)
Document topic distribution.
If it is None, it will be generated by applying transform on X.
Returns
-------
score : float
Perplexity score.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.perplexity")
if doc_topic_distr is None:
doc_topic_distr = self._unnormalized_transform(X)
else:
n_samples, n_components = doc_topic_distr.shape
if n_samples != X.shape[0]:
raise ValueError("Number of samples in X and doc_topic_distr"
" do not match.")
if n_components != self._n_components:
raise ValueError("Number of topics does not match.")
current_samples = X.shape[0]
bound = self._approx_bound(X, doc_topic_distr, sub_sampling)
if sub_sampling:
word_cnt = X.sum() * (float(self.total_samples) / current_samples)
else:
word_cnt = X.sum()
perword_bound = bound / word_cnt
return np.exp(-1.0 * perword_bound)
def perplexity(self, X, doc_topic_distr='deprecated', sub_sampling=False):
"""Calculate approximate perplexity for data X.
Perplexity is defined as exp(-1. * log-likelihood per word)
.. versionchanged:: 0.19
*doc_topic_distr* argument has been deprecated and is ignored
because user no longer has access to unnormalized distribution
Parameters
----------
X : array-like or sparse matrix, [n_samples, n_features]
Document word matrix.
doc_topic_distr : None or array, shape=(n_samples, n_components)
Document topic distribution.
This argument is deprecated and is currently being ignored.
.. deprecated:: 0.19
sub_sampling : bool
Do sub-sampling or not.
Returns
-------
score : float
Perplexity score.
"""
if doc_topic_distr != 'deprecated':
warnings.warn("Argument 'doc_topic_distr' is deprecated and is "
"being ignored as of 0.19. Support for this "
"argument will be removed in 0.21.",
DeprecationWarning)
return self._perplexity_precomp_distr(X, sub_sampling=sub_sampling)
|
|
# coding=utf-8
"""
Download mode implementation.
"""
from __future__ import division
import os
import re
import sys
import mimetypes
import threading
from time import sleep, time
from .output import RawStream
from .models import HTTPResponse
from .utils import humanize_bytes
from .compat import urlsplit
PARTIAL_CONTENT = 206
CLEAR_LINE = '\r\033[K'
PROGRESS = (
'{percentage: 6.2f} %'
' {downloaded: >10}'
' {speed: >10}/s'
' {eta: >8} ETA'
)
PROGRESS_NO_CONTENT_LENGTH = '{downloaded: >10} {speed: >10}/s'
SUMMARY = 'Done. {downloaded} in {time:0.5f}s ({speed}/s)\n'
SPINNER = '|/-\\'
class ContentRangeError(ValueError):
pass
def parse_content_range(content_range, resumed_from):
"""
Parse and validate Content-Range header.
<http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html>
:param content_range: the value of a Content-Range response header
eg. "bytes 21010-47021/47022"
:param resumed_from: first byte pos. from the Range request header
:return: total size of the response body when fully downloaded.
"""
if content_range is None:
raise ContentRangeError('Missing Content-Range')
pattern = (
'^bytes (?P<first_byte_pos>\d+)-(?P<last_byte_pos>\d+)'
'/(\*|(?P<instance_length>\d+))$'
)
match = re.match(pattern, content_range)
if not match:
raise ContentRangeError(
'Invalid Content-Range format %r' % content_range)
content_range_dict = match.groupdict()
first_byte_pos = int(content_range_dict['first_byte_pos'])
last_byte_pos = int(content_range_dict['last_byte_pos'])
instance_length = (
int(content_range_dict['instance_length'])
if content_range_dict['instance_length']
else None
)
# "A byte-content-range-spec with a byte-range-resp-spec whose
# last- byte-pos value is less than its first-byte-pos value,
# or whose instance-length value is less than or equal to its
# last-byte-pos value, is invalid. The recipient of an invalid
# byte-content-range- spec MUST ignore it and any content
# transferred along with it."
if (first_byte_pos >= last_byte_pos
or (instance_length is not None
and instance_length <= last_byte_pos)):
raise ContentRangeError(
'Invalid Content-Range returned: %r' % content_range)
if (first_byte_pos != resumed_from
or (instance_length is not None
and last_byte_pos + 1 != instance_length)):
# Not what we asked for.
raise ContentRangeError(
'Unexpected Content-Range returned (%r)'
' for the requested Range ("bytes=%d-")'
% (content_range, resumed_from)
)
return last_byte_pos + 1
def filename_from_content_disposition(content_disposition):
"""
Extract and validate filename from a Content-Disposition header.
:param content_disposition: Content-Disposition value
:return: the filename if present and valid, otherwise `None`
"""
# attachment; filename=jkbr-httpie-0.4.1-20-g40bd8f6.tar.gz
match = re.search('filename=(\S+)', content_disposition)
if match and match.group(1):
fn = match.group(1).strip('."')
if re.match('^[a-zA-Z0-9._-]+$', fn):
return fn
def filename_from_url(url, content_type):
fn = urlsplit(url).path.rstrip('/')
fn = os.path.basename(fn) if fn else 'index'
if '.' not in fn and content_type:
content_type = content_type.split(';')[0]
if content_type == 'text/plain':
# mimetypes returns '.ksh'
ext = '.txt'
else:
ext = mimetypes.guess_extension(content_type)
if ext == '.htm': # Python 3
ext = '.html'
if ext:
fn += ext
return fn
def get_unique_filename(fn, exists=os.path.exists):
attempt = 0
while True:
suffix = '-' + str(attempt) if attempt > 0 else ''
if not exists(fn + suffix):
return fn + suffix
attempt += 1
class Download(object):
def __init__(self, output_file=None,
resume=False, progress_file=sys.stderr):
"""
:param resume: Should the download resume if partial download
already exists.
:type resume: bool
:param output_file: The file to store response body in. If not
provided, it will be guessed from the response.
:type output_file: file
:param progress_file: Where to report download progress.
:type progress_file: file
"""
self._output_file = output_file
self._resume = resume
self._resumed_from = 0
self.finished = False
self.status = Status()
self._progress_reporter = ProgressReporterThread(
status=self.status,
output=progress_file
)
def pre_request(self, request_headers):
"""Called just before the HTTP request is sent.
Might alter `request_headers`.
:type request_headers: dict
"""
# Disable content encoding so that we can resume, etc.
request_headers['Accept-Encoding'] = None
if self._resume:
bytes_have = os.path.getsize(self._output_file.name)
if bytes_have:
# Set ``Range`` header to resume the download
# TODO: Use "If-Range: mtime" to make sure it's fresh?
request_headers['Range'] = 'bytes=%d-' % bytes_have
self._resumed_from = bytes_have
def start(self, response):
"""
Initiate and return a stream for `response` body with progress
callback attached. Can be called only once.
:param response: Initiated response object with headers already fetched
:type response: requests.models.Response
:return: RawStream, output_file
"""
assert not self.status.time_started
try:
total_size = int(response.headers['Content-Length'])
except (KeyError, ValueError, TypeError):
total_size = None
if self._output_file:
if self._resume and response.status_code == PARTIAL_CONTENT:
total_size = parse_content_range(
response.headers.get('Content-Range'),
self._resumed_from
)
else:
self._resumed_from = 0
try:
self._output_file.seek(0)
self._output_file.truncate()
except IOError:
pass # stdout
else:
# TODO: Should the filename be taken from response.history[0].url?
# Output file not specified. Pick a name that doesn't exist yet.
fn = None
if 'Content-Disposition' in response.headers:
fn = filename_from_content_disposition(
response.headers['Content-Disposition'])
if not fn:
fn = filename_from_url(
url=response.url,
content_type=response.headers.get('Content-Type'),
)
self._output_file = open(get_unique_filename(fn), mode='a+b')
self.status.started(
resumed_from=self._resumed_from,
total_size=total_size
)
stream = RawStream(
msg=HTTPResponse(response),
with_headers=False,
with_body=True,
on_body_chunk_downloaded=self._chunk_downloaded,
chunk_size=1024 * 8
)
self._progress_reporter.output.write(
'Downloading %sto "%s"\n' % (
(humanize_bytes(total_size) + ' '
if total_size is not None
else ''),
self._output_file.name
)
)
self._progress_reporter.start()
return stream, self._output_file
def finish(self):
assert not self.finished
self.finished = True
self.status.finished()
def failed(self):
self._progress_reporter.stop()
@property
def interrupted(self):
return (
self.finished
and self.status.total_size
and self.status.total_size != self.status.downloaded
)
def _chunk_downloaded(self, chunk):
"""
A download progress callback.
:param chunk: A chunk of response body data that has just
been downloaded and written to the output.
:type chunk: bytes
"""
self.status.chunk_downloaded(len(chunk))
class Status(object):
"""Holds details about the downland status."""
def __init__(self):
self.downloaded = 0
self.total_size = None
self.resumed_from = 0
self.time_started = None
self.time_finished = None
def started(self, resumed_from=0, total_size=None):
assert self.time_started is None
if total_size is not None:
self.total_size = total_size
self.downloaded = self.resumed_from = resumed_from
self.time_started = time()
def chunk_downloaded(self, size):
assert self.time_finished is None
self.downloaded += size
@property
def has_finished(self):
return self.time_finished is not None
def finished(self):
assert self.time_started is not None
assert self.time_finished is None
self.time_finished = time()
class ProgressReporterThread(threading.Thread):
"""
Reports download progress based on its status.
Uses threading to periodically update the status (speed, ETA, etc.).
"""
def __init__(self, status, output, tick=.1, update_interval=1):
"""
:type status: Status
:type output: file
"""
super(ProgressReporterThread, self).__init__()
self.status = status
self.output = output
self._tick = tick
self._update_interval = update_interval
self._spinner_pos = 0
self._status_line = ''
self._prev_bytes = 0
self._prev_time = time()
self._should_stop = threading.Event()
def stop(self):
"""Stop reporting on next tick."""
self._should_stop.set()
def run(self):
while not self._should_stop.is_set():
if self.status.has_finished:
self.sum_up()
break
self.report_speed()
sleep(self._tick)
def report_speed(self):
now = time()
if now - self._prev_time >= self._update_interval:
downloaded = self.status.downloaded
try:
speed = ((downloaded - self._prev_bytes)
/ (now - self._prev_time))
except ZeroDivisionError:
speed = 0
if not self.status.total_size:
self._status_line = PROGRESS_NO_CONTENT_LENGTH.format(
downloaded=humanize_bytes(downloaded),
speed=humanize_bytes(speed),
)
else:
try:
percentage = downloaded / self.status.total_size * 100
except ZeroDivisionError:
percentage = 0
if not speed:
eta = '-:--:--'
else:
s = int((self.status.total_size - downloaded) / speed)
h, s = divmod(s, 60 * 60)
m, s = divmod(s, 60)
eta = '{0}:{1:0>2}:{2:0>2}'.format(h, m, s)
self._status_line = PROGRESS.format(
percentage=percentage,
downloaded=humanize_bytes(downloaded),
speed=humanize_bytes(speed),
eta=eta,
)
self._prev_time = now
self._prev_bytes = downloaded
self.output.write(
CLEAR_LINE
+ ' '
+ SPINNER[self._spinner_pos]
+ ' '
+ self._status_line
)
self.output.flush()
self._spinner_pos = (self._spinner_pos + 1
if self._spinner_pos + 1 != len(SPINNER)
else 0)
def sum_up(self):
actually_downloaded = (self.status.downloaded
- self.status.resumed_from)
time_taken = self.status.time_finished - self.status.time_started
self.output.write(CLEAR_LINE)
self.output.write(SUMMARY.format(
downloaded=humanize_bytes(actually_downloaded),
total=(self.status.total_size
and humanize_bytes(self.status.total_size)),
speed=humanize_bytes(actually_downloaded / time_taken),
time=time_taken,
))
self.output.flush()
|
|
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves import urllib
from itertools import ifilter
from swift.common import bufferedhttp
from swift.common import exceptions
from swift.common import http
def encode_missing(object_hash, ts_data, ts_meta=None):
"""
Returns a string representing the object hash, its data file timestamp
and the delta forwards to its metafile timestamp, if non-zero, in the form:
``<hash> <timestamp> m:<hex delta>``
The decoder for this line is
:py:func:`~swift.obj.ssync_receiver.decode_missing`
"""
msg = ('%s %s'
% (urllib.parse.quote(object_hash),
urllib.parse.quote(ts_data.internal)))
if ts_meta and ts_meta != ts_data:
delta = ts_meta.raw - ts_data.raw
msg = '%s m:%x' % (msg, delta)
return msg
def decode_wanted(parts):
"""
Parse missing_check line parts to determine which parts of local
diskfile were wanted by the receiver.
The encoder for parts is
:py:func:`~swift.obj.ssync_receiver.encode_wanted`
"""
wanted = {}
key_map = dict(d='data', m='meta')
if parts:
# receiver specified data and/or meta wanted, so use those as
# conditions for sending PUT and/or POST subrequests
for k in key_map:
if k in parts[0]:
wanted[key_map[k]] = True
if not wanted:
# assume legacy receiver which will only accept PUTs. There is no
# way to send any meta file content without morphing the timestamp
# of either the data or the metadata, so we just send data file
# content to a legacy receiver. Once the receiver gets updated we
# will be able to send it the meta file content.
wanted['data'] = True
return wanted
class Sender(object):
"""
Sends SSYNC requests to the object server.
These requests are eventually handled by
:py:mod:`.ssync_receiver` and full documentation about the
process is there.
"""
def __init__(self, daemon, node, job, suffixes, remote_check_objs=None):
self.daemon = daemon
self.df_mgr = self.daemon._diskfile_mgr
self.node = node
self.job = job
self.suffixes = suffixes
self.connection = None
self.response = None
self.response_buffer = ''
self.response_chunk_left = 0
# available_map has an entry for each object in given suffixes that
# is available to be sync'd; each entry is a hash => dict of timestamps
# of data file or tombstone file and/or meta file
self.available_map = {}
# When remote_check_objs is given in job, ssync_sender trys only to
# make sure those objects exist or not in remote.
self.remote_check_objs = remote_check_objs
# send_map has an entry for each object that the receiver wants to
# be sync'ed; each entry maps an object hash => dict of wanted parts
self.send_map = {}
self.failures = 0
def __call__(self):
"""
Perform ssync with remote node.
:returns: a 2-tuple, in the form (success, can_delete_objs) where
success is a boolean and can_delete_objs is the map of
objects that are in sync with the receiver. Each entry in
can_delete_objs maps a hash => timestamp of data file or
tombstone file
"""
if not self.suffixes:
return True, {}
try:
# Double try blocks in case our main error handler fails.
try:
# The general theme for these functions is that they should
# raise exceptions.MessageTimeout for client timeouts and
# exceptions.ReplicationException for common issues that will
# abort the replication attempt and log a simple error. All
# other exceptions will be logged with a full stack trace.
self.connect()
self.missing_check()
if self.remote_check_objs is None:
self.updates()
can_delete_obj = self.available_map
else:
# when we are initialized with remote_check_objs we don't
# *send* any requested updates; instead we only collect
# what's already in sync and safe for deletion
in_sync_hashes = (set(self.available_map.keys()) -
set(self.send_map.keys()))
can_delete_obj = dict((hash_, self.available_map[hash_])
for hash_ in in_sync_hashes)
if not self.failures:
return True, can_delete_obj
else:
return False, {}
except (exceptions.MessageTimeout,
exceptions.ReplicationException) as err:
self.daemon.logger.error(
'%s:%s/%s/%s %s', self.node.get('replication_ip'),
self.node.get('replication_port'), self.node.get('device'),
self.job.get('partition'), err)
except Exception:
# We don't want any exceptions to escape our code and possibly
# mess up the original replicator code that called us since it
# was originally written to shell out to rsync which would do
# no such thing.
self.daemon.logger.exception(
'%s:%s/%s/%s EXCEPTION in replication.Sender',
self.node.get('replication_ip'),
self.node.get('replication_port'),
self.node.get('device'), self.job.get('partition'))
finally:
self.disconnect()
except Exception:
# We don't want any exceptions to escape our code and possibly
# mess up the original replicator code that called us since it
# was originally written to shell out to rsync which would do
# no such thing.
# This particular exception handler does the minimal amount as it
# would only get called if the above except Exception handler
# failed (bad node or job data).
self.daemon.logger.exception('EXCEPTION in replication.Sender')
return False, {}
def connect(self):
"""
Establishes a connection and starts an SSYNC request
with the object server.
"""
with exceptions.MessageTimeout(
self.daemon.conn_timeout, 'connect send'):
self.connection = bufferedhttp.BufferedHTTPConnection(
'%s:%s' % (self.node['replication_ip'],
self.node['replication_port']))
self.connection.putrequest('SSYNC', '/%s/%s' % (
self.node['device'], self.job['partition']))
self.connection.putheader('Transfer-Encoding', 'chunked')
self.connection.putheader('X-Backend-Storage-Policy-Index',
int(self.job['policy']))
# a sync job must use the node's index for the frag_index of the
# rebuilt fragments instead of the frag_index from the job which
# will be rebuilding them
frag_index = self.node.get('index', self.job.get('frag_index'))
if frag_index is None:
# replication jobs will not have a frag_index key;
# reconstructor jobs with only tombstones will have a
# frag_index key explicitly set to the value of None - in both
# cases on the wire we write the empty string which
# ssync_receiver will translate to None
frag_index = ''
self.connection.putheader('X-Backend-Ssync-Frag-Index',
frag_index)
# a revert job to a handoff will not have a node index
self.connection.putheader('X-Backend-Ssync-Node-Index',
self.node.get('index', ''))
self.connection.endheaders()
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'connect receive'):
self.response = self.connection.getresponse()
if self.response.status != http.HTTP_OK:
err_msg = self.response.read()[:1024]
raise exceptions.ReplicationException(
'Expected status %s; got %s (%s)' %
(http.HTTP_OK, self.response.status, err_msg))
def readline(self):
"""
Reads a line from the SSYNC response body.
httplib has no readline and will block on read(x) until x is
read, so we have to do the work ourselves. A bit of this is
taken from Python's httplib itself.
"""
data = self.response_buffer
self.response_buffer = ''
while '\n' not in data and len(data) < self.daemon.network_chunk_size:
if self.response_chunk_left == -1: # EOF-already indicator
break
if self.response_chunk_left == 0:
line = self.response.fp.readline()
i = line.find(';')
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
self.response_chunk_left = int(line.strip(), 16)
except ValueError:
# close the connection as protocol synchronisation is
# probably lost
self.response.close()
raise exceptions.ReplicationException('Early disconnect')
if self.response_chunk_left == 0:
self.response_chunk_left = -1
break
chunk = self.response.fp.read(min(
self.response_chunk_left,
self.daemon.network_chunk_size - len(data)))
if not chunk:
# close the connection as protocol synchronisation is
# probably lost
self.response.close()
raise exceptions.ReplicationException('Early disconnect')
self.response_chunk_left -= len(chunk)
if self.response_chunk_left == 0:
self.response.fp.read(2) # discard the trailing \r\n
data += chunk
if '\n' in data:
data, self.response_buffer = data.split('\n', 1)
data += '\n'
return data
def missing_check(self):
"""
Handles the sender-side of the MISSING_CHECK step of a
SSYNC request.
Full documentation of this can be found at
:py:meth:`.Receiver.missing_check`.
"""
# First, send our list.
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'missing_check start'):
msg = ':MISSING_CHECK: START\r\n'
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
hash_gen = self.df_mgr.yield_hashes(
self.job['device'], self.job['partition'],
self.job['policy'], self.suffixes,
frag_index=self.job.get('frag_index'))
if self.remote_check_objs is not None:
hash_gen = ifilter(
lambda path_objhash_timestamps:
path_objhash_timestamps[1] in
self.remote_check_objs, hash_gen)
for path, object_hash, timestamps in hash_gen:
self.available_map[object_hash] = timestamps
with exceptions.MessageTimeout(
self.daemon.node_timeout,
'missing_check send line'):
msg = '%s\r\n' % encode_missing(object_hash, **timestamps)
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'missing_check end'):
msg = ':MISSING_CHECK: END\r\n'
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
# Now, retrieve the list of what they want.
while True:
with exceptions.MessageTimeout(
self.daemon.http_timeout, 'missing_check start wait'):
line = self.readline()
if not line:
raise exceptions.ReplicationException('Early disconnect')
line = line.strip()
if line == ':MISSING_CHECK: START':
break
elif line:
raise exceptions.ReplicationException(
'Unexpected response: %r' % line[:1024])
while True:
with exceptions.MessageTimeout(
self.daemon.http_timeout, 'missing_check line wait'):
line = self.readline()
if not line:
raise exceptions.ReplicationException('Early disconnect')
line = line.strip()
if line == ':MISSING_CHECK: END':
break
parts = line.split()
if parts:
self.send_map[parts[0]] = decode_wanted(parts[1:])
def updates(self):
"""
Handles the sender-side of the UPDATES step of an SSYNC
request.
Full documentation of this can be found at
:py:meth:`.Receiver.updates`.
"""
# First, send all our subrequests based on the send_map.
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'updates start'):
msg = ':UPDATES: START\r\n'
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
for object_hash, want in self.send_map.items():
object_hash = urllib.parse.unquote(object_hash)
try:
df = self.df_mgr.get_diskfile_from_hash(
self.job['device'], self.job['partition'], object_hash,
self.job['policy'], frag_index=self.job.get('frag_index'))
except exceptions.DiskFileNotExist:
continue
url_path = urllib.parse.quote(
'/%s/%s/%s' % (df.account, df.container, df.obj))
try:
df.open()
if want.get('data'):
# EC reconstructor may have passed a callback to build an
# alternative diskfile - construct it using the metadata
# from the data file only.
df_alt = self.job.get(
'sync_diskfile_builder', lambda *args: df)(
self.job, self.node, df.get_datafile_metadata())
self.send_put(url_path, df_alt)
if want.get('meta') and df.data_timestamp != df.timestamp:
self.send_post(url_path, df)
except exceptions.DiskFileDeleted as err:
if want.get('data'):
self.send_delete(url_path, err.timestamp)
except exceptions.DiskFileError:
pass
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'updates end'):
msg = ':UPDATES: END\r\n'
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
# Now, read their response for any issues.
while True:
with exceptions.MessageTimeout(
self.daemon.http_timeout, 'updates start wait'):
line = self.readline()
if not line:
raise exceptions.ReplicationException('Early disconnect')
line = line.strip()
if line == ':UPDATES: START':
break
elif line:
raise exceptions.ReplicationException(
'Unexpected response: %r' % line[:1024])
while True:
with exceptions.MessageTimeout(
self.daemon.http_timeout, 'updates line wait'):
line = self.readline()
if not line:
raise exceptions.ReplicationException('Early disconnect')
line = line.strip()
if line == ':UPDATES: END':
break
elif line:
raise exceptions.ReplicationException(
'Unexpected response: %r' % line[:1024])
def send_delete(self, url_path, timestamp):
"""
Sends a DELETE subrequest with the given information.
"""
msg = ['DELETE ' + url_path, 'X-Timestamp: ' + timestamp.internal]
msg = '\r\n'.join(msg) + '\r\n\r\n'
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'send_delete'):
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
def send_put(self, url_path, df):
"""
Sends a PUT subrequest for the url_path using the source df
(DiskFile) and content_length.
"""
msg = ['PUT ' + url_path, 'Content-Length: ' + str(df.content_length)]
# Sorted to make it easier to test.
for key, value in sorted(df.get_datafile_metadata().items()):
if key not in ('name', 'Content-Length'):
msg.append('%s: %s' % (key, value))
msg = '\r\n'.join(msg) + '\r\n\r\n'
with exceptions.MessageTimeout(self.daemon.node_timeout, 'send_put'):
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
for chunk in df.reader():
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'send_put chunk'):
self.connection.send('%x\r\n%s\r\n' % (len(chunk), chunk))
def send_post(self, url_path, df):
metadata = df.get_metafile_metadata()
if metadata is None:
return
msg = ['POST ' + url_path]
# Sorted to make it easier to test.
for key, value in sorted(metadata.items()):
msg.append('%s: %s' % (key, value))
msg = '\r\n'.join(msg) + '\r\n\r\n'
with exceptions.MessageTimeout(self.daemon.node_timeout, 'send_post'):
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
def disconnect(self):
"""
Closes down the connection to the object server once done
with the SSYNC request.
"""
if not self.connection:
return
try:
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'disconnect'):
self.connection.send('0\r\n\r\n')
except (Exception, exceptions.Timeout):
pass # We're okay with the above failing.
self.connection.close()
|
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set of converters between db models, Python and JSON dictionaries, etc."""
__author__ = 'Pavel Simakov ([email protected])'
import datetime
import json
from google.appengine.ext import db
JSON_DATE_FORMAT = '%Y/%m/%d'
JSON_TYPES = ['string', 'date', 'text', 'html', 'boolean', 'integer', 'array']
# Prefix to add to all JSON responses to guard against XSSI. Must be kept in
# sync with modules/oeditor/oeditor.html.
_JSON_XSSI_PREFIX = ")]}'\n"
SIMPLE_TYPES = (int, long, float, bool, dict, basestring, list)
SUPPORTED_TYPES = (db.GeoPt, datetime.date)
def dict_to_json(source_dict, unused_schema):
"""Converts Python dictionary into JSON dictionary using schema."""
output = {}
for key, value in source_dict.items():
if value is None or isinstance(value, SIMPLE_TYPES):
output[key] = value
elif isinstance(value, datetime.date):
output[key] = value.strftime(JSON_DATE_FORMAT)
elif isinstance(value, db.GeoPt):
output[key] = {'lat': value.lat, 'lon': value.lon}
else:
raise ValueError(
'Failed to encode key \'%s\' with value \'%s\'.' % (key, value))
return output
def dumps(*args, **kwargs):
"""Wrapper around json.dumps.
No additional behavior; present here so this module is a drop-in replacement
for json.dumps|loads. Clients should never use json.dumps|loads directly.
See usage docs at http://docs.python.org/2/library/json.html.
Args:
*args: positional arguments delegated to json.dumps.
**kwargs: keyword arguments delegated to json.dumps.
Returns:
string. The converted JSON.
"""
return json.dumps(*args, **kwargs)
def loads(s, prefix=_JSON_XSSI_PREFIX, **kwargs):
"""Wrapper around json.loads that handles XSSI-protected responses.
To prevent XSSI we insert a prefix before our JSON responses during server-
side rendering. This loads() removes the prefix and should always be used in
place of json.loads. See usage docs at
http://docs.python.org/2/library/json.html.
Args:
s: str or unicode. JSON contents to convert.
prefix: string. The XSSI prefix we remove before conversion.
**kwargs: keyword arguments delegated to json.loads.
Returns:
object. Python object reconstituted from the given JSON string.
"""
if s.startswith(prefix):
s = s.lstrip(prefix)
return json.loads(s, **kwargs)
def json_to_dict(source_dict, schema):
"""Converts JSON dictionary into Python dictionary using schema."""
output = {}
for key, attr in schema['properties'].items():
# Skip schema elements that don't exist in source.
if not key in source_dict:
continue
attr_type = attr['type']
if not attr_type in JSON_TYPES:
raise ValueError('Unsupported JSON type: %s' % attr_type)
if attr_type == 'date':
output[key] = datetime.datetime.strptime(
source_dict[key], JSON_DATE_FORMAT).date()
elif attr_type == 'array':
subschema = attr['items']
array = []
for item in source_dict[key]:
array.append(json_to_dict(item, subschema))
output[key] = array
else:
output[key] = source_dict[key]
return output
def entity_to_dict(entity):
"""Puts model object attributes into a Python dictionary."""
output = {}
for key, prop in entity.properties().iteritems():
value = getattr(entity, key)
if value is None or isinstance(value, SIMPLE_TYPES) or isinstance(
value, SUPPORTED_TYPES):
output[key] = value
else:
raise ValueError('Failed to encode: %s' % prop)
# explicitly add entity key as a 'string' attribute
output['key'] = str(entity.key())
return output
def dict_to_entity(entity, source_dict):
"""Sets model object attributes from a Python dictionary."""
for key, value in source_dict.items():
if value is None or isinstance(value, SIMPLE_TYPES) or isinstance(
value, SUPPORTED_TYPES):
setattr(entity, key, value)
else:
raise ValueError('Failed to encode: %s' % value)
return entity
def string_to_value(string, value_type):
"""Converts string representation to a value."""
if value_type == str:
if not string:
return ''
else:
return string
elif value_type == bool:
if string == '1' or string == 'True' or string == 1:
return True
else:
return False
elif value_type == int or value_type == long:
if not string:
return 0
else:
return long(string)
else:
raise ValueError('Unknown type: %s' % value_type)
def value_to_string(value, value_type):
"""Converts value to a string representation."""
if value_type == str:
return value
elif value_type == bool:
if value:
return 'True'
else:
return 'False'
elif value_type == int or value_type == long:
return str(value)
else:
raise ValueError('Unknown type: %s' % value_type)
def dict_to_instance(adict, instance):
"""Populates instance attributes using data dictionary."""
for key, unused_value in instance.__dict__.iteritems():
if not key.startswith('_'):
setattr(instance, key, adict[key])
def instance_to_dict(instance):
"""Populates data dictionary from instance attrs."""
adict = {}
for key, unused_value in instance.__dict__.iteritems():
if not key.startswith('_'):
adict[key] = getattr(instance, key)
return adict
def send_json_response(
handler, status_code, message, payload_dict=None, xsrf_token=None):
"""Formats and sends out a JSON REST response envelope and body."""
handler.response.headers['Content-Type'] = 'application/json, charset=utf-8'
response = {}
response['status'] = status_code
response['message'] = message
if payload_dict:
response['payload'] = dumps(payload_dict)
if xsrf_token:
response['xsrf_token'] = xsrf_token
handler.response.write(_JSON_XSSI_PREFIX + dumps(response))
def run_all_unit_tests():
"""Runs all unit tests."""
assert value_to_string(True, bool) == 'True'
assert value_to_string(False, bool) == 'False'
assert value_to_string(None, bool) == 'False'
assert string_to_value('True', bool)
assert string_to_value('1', bool)
assert string_to_value(1, bool)
assert not string_to_value('False', bool)
assert not string_to_value('0', bool)
assert not string_to_value('5', bool)
assert not string_to_value(0, bool)
assert not string_to_value(5, bool)
assert not string_to_value(None, bool)
assert string_to_value('15', int) == 15
assert string_to_value(15, int) == 15
assert string_to_value(None, int) == 0
assert string_to_value('foo', str) == 'foo'
assert string_to_value(None, str) == str('')
if __name__ == '__main__':
run_all_unit_tests()
|
|
from core.himesis import Himesis, HimesisPostConditionPattern
import cPickle as pickle
from uuid import UUID
class HMoveOneOutputIndirectMatchDiffRulesRHS(HimesisPostConditionPattern):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HMoveOneOutputIndirectMatchDiffRulesRHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HMoveOneOutputIndirectMatchDiffRulesRHS, self).__init__(name='HMoveOneOutputIndirectMatchDiffRulesRHS', num_nodes=4, edges=[])
# Add the edges
self.add_edges([(1, 0), (0, 3)])
# Set the graph attributes
self["mm__"] = pickle.loads("""(lp1
S'MT_post__GM2AUTOSAR_MM'
p2
aS'MoTifRule'
p3
a.""")
self["MT_action__"] = """#===============================================================================
# This code is executed after the rule has been applied.
# You can access a node labelled n matched by this rule by: PostNode('n').
# To access attribute x of node n, use: PostNode('n')['x'].
#===============================================================================
pass
"""
self["name"] = """"""
self["GUID__"] = UUID('1006ed39-28da-4560-9a31-36c6e493a483')
# Set the node attributes
self.vs[0]["MT_label__"] = """19"""
self.vs[0]["mm__"] = """MT_post__indirectLink_S"""
self.vs[0]["GUID__"] = UUID('f763e1b9-1e85-4183-bb2f-6caad388f27b')
self.vs[1]["MT_pivotOut__"] = """element1"""
self.vs[1]["MT_label__"] = """3"""
self.vs[1]["MT_post__name"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[1]["mm__"] = """MT_post__MetaModelElement_S"""
self.vs[1]["MT_post__classtype"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[1]["GUID__"] = UUID('dd7911d8-0d49-4263-8474-acba1e390ae9')
self.vs[2]["MT_pivotOut__"] = """element2"""
self.vs[2]["MT_label__"] = """4"""
self.vs[2]["MT_post__name"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[2]["mm__"] = """MT_post__MetaModelElement_S"""
self.vs[2]["MT_post__classtype"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[2]["GUID__"] = UUID('f8efeb0c-a2c2-40a3-9bf2-fd2d5b712b46')
self.vs[3]["MT_label__"] = """5"""
self.vs[3]["MT_post__name"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[3]["mm__"] = """MT_post__MetaModelElement_S"""
self.vs[3]["MT_post__classtype"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[3]["GUID__"] = UUID('239628de-5ce8-4111-bc7f-01964486dd05')
from HMoveOneOutputIndirectMatchDiffRulesLHS import HMoveOneOutputIndirectMatchDiffRulesLHS
self.pre = HMoveOneOutputIndirectMatchDiffRulesLHS()
def action(self, PostNode, graph):
"""
Executable constraint code.
@param PostNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the rule has been applied.
# You can access a node labelled n matched by this rule by: PostNode('n').
# To access attribute x of node n, use: PostNode('n')['x'].
#===============================================================================
pass
def execute(self, packet, match):
"""
Transforms the current match of the packet according to the rule %s.
Pivots are also assigned, if any.
@param packet: The input packet.
@param match: The match to rewrite.
"""
graph = packet.graph
# Build a dictionary {label: node index} mapping each label of the pattern to a node in the graph to rewrite.
# Because of the uniqueness property of labels in a rule, we can store all LHS labels
# and subsequently add the labels corresponding to the nodes to be created.
labels = match.copy()
#===============================================================================
# Update attribute values
#===============================================================================
#===============================================================================
# Create new nodes
#===============================================================================
# indirectLink_S19
new_node = graph.add_node()
labels['19'] = new_node
graph.vs[new_node][Himesis.Constants.META_MODEL] = 'indirectLink_S'
#===============================================================================
# Create new edges
#===============================================================================
# MetaModelElement_S3 -> indirectLink_S19
graph.add_edges((labels['3'], labels['19']))
# indirectLink_S19 -> MetaModelElement_S5
graph.add_edges((labels['19'], labels['5']))
#===============================================================================
# Set the output pivots
#===============================================================================
# MetaModelElement_S3
packet.global_pivots['element1'] = graph.vs[labels['3']][Himesis.Constants.GUID]
# MetaModelElement_S4
packet.global_pivots['element2'] = graph.vs[labels['4']][Himesis.Constants.GUID]
#===============================================================================
# Perform the post-action
#===============================================================================
try:
self.action(lambda i: graph.vs[labels[i]], graph)
except Exception, e:
raise Exception('An error has occurred while applying the post-action', e)
#===============================================================================
# Finally, delete nodes (this will automatically delete the adjacent edges)
#===============================================================================
# MT_pre__indirectLink_S9
graph.delete_nodes([labels["9"]])
|
|
from flask import redirect, render_template, request, \
url_for, Blueprint, jsonify, session, Response
from project import utils, database_wrapper
from project.services.auth import Auth
from flask.ext.api import FlaskAPI, exceptions
from flask.ext.api.status import *
from models import user
from bson.objectid import ObjectId
import sys
import json
blueprint = Blueprint(
'users', __name__
)
@blueprint.route('/users/<user_id>', methods=['PUT'])
@Auth.require(Auth.USER)
@Auth.only_me
def user_basic_info(user_id):
entry = user.findUserByID(user_id)
if entry is None:
return '', HTTP_404_NOT_FOUND
req = request.get_json()
try:
utils.mergeFrom(req, entry, user.User.basic_info_fields, require=False)
database_wrapper.save_entity(entry)
except:
return jsonify(error='Invalid key'), HTTP_400_BAD_REQUEST
return '', HTTP_200_OK
@blueprint.route('/users/<user_id>', methods=['GET'])
@Auth.require(Auth.USER)
def userBasicInfo(user_id):
entry = user.findUserByID(user_id)
if entry is None:
return '', HTTP_404_NOT_FOUND
return user.get_basic_info_with_security(entry)
@blueprint.route('/users/<user_id>/<attribute>', methods=['DELETE'])
@Auth.require(Auth.USER)
@Auth.only_me
def delete_basic_user_info(user_id, attribute):
entry = user.findUserByID(user_id)
if entry is None:
return '', HTTP_404_NOT_FOUND
try:
entry[attribute] = None
database_wrapper.save_entity(entry)
except:
print sys.exc_info()[0]
return jsonify(error='Invalid key or field cannot be deleted'), HTTP_400_BAD_REQUEST
#return empty response with 200 status ok
return '', HTTP_200_OK
@blueprint.route('/users/<user_id>/details', methods=['GET'])
@Auth.require(Auth.USER)
def get_user_details(user_id):
try:
entry = user.findUserByID(user_id)
if entry is None:
return '', HTTP_404_NOT_FOUND
return user.get_user_details(entry, user_id == 'me')
except Exception as e:
return jsonify(error=str(e)), HTTP_500_INTERNAL_SERVER_ERROR
@blueprint.route('/users/<user_id>/details/skills', methods=['PUT'])
@Auth.require(Auth.USER)
@Auth.only_me
def put_skills(user_id):
"""
Example request:
PUT
{
"skills":["javascript"]
}
returns STATUS_200_OK when successful
"""
req = request.get_json()
entry = user.findUserByID(user_id)
if entry is None:
return '', HTTP_404_NOT_FOUND
if user.put_skills(entry, req):
return '', HTTP_200_OK
else:
return '', HTTP_400_BAD_REQUEST
@blueprint.route('/users/<user_id>/details/interests', methods=['PUT'])
@Auth.require(Auth.USER)
@Auth.only_me
def put_interests(user_id):
"""
Example request:
PUT
{
"interests":[{"title":"some title", "description":"some description"},
{"title":"some title 2", "description":"some description 2"}]
}
returns STATUS_200_OK when successful
"""
req = request.get_json()
entry = user.findUserByID(user_id)
if entry is None:
return '', HTTP_404_NOT_FOUND
if user.put_interests(entry, req):
return '', HTTP_200_OK
else:
return '', HTTP_400_BAD_REQUEST
@blueprint.route('/users/<user_id>/details/projects', methods=['PUT'])
@Auth.require(Auth.USER)
@Auth.only_me
def put_projects(user_id):
"""
Example request:
PUT
{
"projects":[{
"date": "May 2015",
"title": "some project title",
"description": "some project description",
"details": [{
"title": "some project detail title",
"description": "some project detail description"
}],
"people":["54b797090adfa96230c2c1bb"]
}]
}
returns status 200_OK when successful
"""
req = request.get_json()
entry = user.findUserByID(user_id)
if entry is None:
return '', HTTP_404_NOT_FOUND
if user.put_projects(entry, req):
return '', HTTP_200_OK
else:
return '', HTTP_400_BAD_REQUEST
@blueprint.route('/users/<user_id>/edges', methods=['GET'])
@Auth.require(Auth.USER)
def userEdges(user_id):
entry = user.findUserByID(user_id)
if entry is None:
return '', HTTP_404_NOT_FOUND
suggested_connections = []
pending_connections = []
pending_connections_messages = {}
if user_id == 'me':
suggested_connection_users = user.get_suggested_connections(entry)
suggested_connections = user.get_basic_info_from_users(suggested_connection_users)
pending_connection_ids = map(lambda connection : ObjectId(connection['user']),
user.get_pending_connections(entry))
pending_connections = user.get_basic_info_from_ids(pending_connection_ids)
map(lambda connection: pending_connections_messages.update({connection['user']:connection['message']}),
user.get_pending_connections(entry))
connection_ids = map(ObjectId, user.get_connections(entry))
connections = user.get_basic_info_from_ids(connection_ids)
annotated = {'connections': connections,
'suggestedConnections': suggested_connections,
'pendingConnections': pending_connections,
'pendingConnectionsMessages': pending_connections_messages,
'associations': []}
return jsonify(**annotated)
@blueprint.route('/users/<user_id>/edges/connections', methods=['POST'])
@Auth.require(Auth.USER)
@Auth.only_me
def add_connection_route(user_id):
req = request.get_json()
# TODO: have some system so friend requests are sent
connection_id = req.get('user')
connection_message = req.get('message')
if connection_id is None:
return jsonify(error='missing field \'user\''), HTTP_400_BAD_REQUEST
other_user = user.findUserByID(connection_id)
if other_user is None:
return jsonify(error='bad user'), HTTP_400_BAD_REQUEST
try:
## TODO: improve specificity of errors
user.handle_connection(other_user, connection_message)
return '{}', HTTP_200_OK
except Exception as e:
return jsonify(error=str(e)), HTTP_500_INTERNAL_SERVER_ERROR
@blueprint.route('/users/<user_id>/edges/connections/<connection_id>', methods=['DELETE'])
@Auth.require(Auth.USER)
@Auth.only_me
def remove_connection_route(user_id, connection_id):
entry = user.findUserByID(user_id)
connection = user.findUserByID(connection_id)
if entry is None or connection is None:
return '', HTTP_404_NOT_FOUND
try:
## TODO: improve specificity of errors
user.remove_user_connection(connection)
return '{}', HTTP_200_OK
except Exception as e:
return jsonify(error=str(e)), HTTP_500_INTERNAL_SERVER_ERROR
|
|
# -*- coding: utf-8 -*-
"""
zkb.bodygenerators
~~~~~~~~~~~~~~~~~~
Converter from payload of input file to body content of output.
:Copyright: Copyright 2014 Yang LIU <[email protected]>
:License: BSD, see LICENSE for details.
"""
import re
import os
import hashlib
from markdown import Markdown
from markdown.inlinepatterns import LinkPattern
from markdown.inlinepatterns import ReferencePattern
from markdown.inlinepatterns import ImagePattern
from markdown.inlinepatterns import ImageReferencePattern
from markdown.inlinepatterns import LINK_RE
from markdown.inlinepatterns import REFERENCE_RE
from markdown.inlinepatterns import IMAGE_LINK_RE
from markdown.inlinepatterns import IMAGE_REFERENCE_RE
from markdown.inlinepatterns import SHORT_REF_RE
from zkb.log import logger
from zkb.mdext.blockformatter import BlockHtmlFormatterExtension
from zkb.mdext.blockformatter import CODE_HIGHLIGHT_CLASS
from zkb.mdext.codeblock import CodeBlockHtmlFormatter
from zkb.mdext.latexblock import LatexBlockHtmlFormatter
_REMOTE_LINK_PATTERN = re.compile(r'(\w+:)?//.+')
_BLOCK_SIZE = 65536
class RelocatingImagePattern(ImagePattern):
"""Extension of Markdown image pattern to record local references of
images and replace them with correct file path.
"""
def __init__(self, pattern, md_instance, relocator):
super(RelocatingImagePattern, self).__init__(pattern, md_instance)
self.relocator = relocator
def handleMatch(self, m):
el = super(RelocatingImagePattern, self).handleMatch(m)
# Check 'src' for local files.
src = el.attrib.get('src')
target = self.relocator.relocate(src)
if target is not None:
el.attrib['src'] = target
return el
class RelocatingImageReferencePattern(ImageReferencePattern):
"""Extension of Markdown image reference pattern to record local references
of images.
.. note:: Links that do not start with 'http://', 'ftp://', etc. are
considered as local references. That is, relative links are also considered
as local reference because we cannot separated them from links to local
files.
"""
def __init__(self, pattern, md_instance, relocator):
super(RelocatingImageReferencePattern, self).__init__(
pattern, md_instance)
self.relocator = relocator
def makeTag(self, href, title, text):
el = super(RelocatingImageReferencePattern, self).makeTag(
href, title, text)
# Check 'src' for local files.
src = el.attrib.get('src')
target = self.relocator.relocate(src)
if target is not None:
el.attrib['src'] = target
return el
class RelocatingLinkPattern(LinkPattern):
"""Extension of Markdown link pattern to record local references of files.
.. note:: Links that do not start with 'http://', 'ftp://', etc. are
considered as local references. That is, relative links are also considered
as local reference because we cannot separated them from links to local
files.
"""
def __init__(self, pattern, md_instance, relocator):
super(RelocatingLinkPattern, self).__init__(pattern, md_instance)
self.relocator = relocator
def handleMatch(self, m):
el = super(RelocatingLinkPattern, self).handleMatch(m)
# Check 'src' for local files.
src = el.attrib.get('href')
target = self.relocator.relocate(src)
if target is not None:
el.attrib['href'] = target
return el
class RelocatingReferencePattern(ReferencePattern):
"""Extension of Markdown reference pattern to record local references of
files.
.. note:: Links that do not start with 'http://', 'ftp://', etc. are
considered as local references. That is, relative links are also considered
as local reference because we cannot separated them from links to local
files.
"""
def __init__(self, pattern, md_instance, relocator):
super(RelocatingReferencePattern, self).__init__(pattern, md_instance)
self.relocator = relocator
def makeTag(self, href, title, text):
el = super(RelocatingReferencePattern, self).makeTag(href, title, text)
# Check 'src' for local files.
src = el.attrib.get('href')
target = self.relocator.relocate(src)
if target is not None:
el.attrib['href'] = target
return el
class ResourceRelocator(object):
"""Relocate resources to a specific directory."""
def __init__(self, base, prefix='resources', url_prefix=None):
super(ResourceRelocator, self).__init__()
self.resources = {}
if os.path.isfile(base):
self.base_dir = os.path.dirname(os.path.abspath(base))
else:
self.base_dir = os.path.abspath(base)
self.prefix = prefix
self.url_prefix = url_prefix
def _should_relocate(self, src):
"""Check whether a reference should be relocated.
:param src: reference source.
:type src: str
:rtype: bool
"""
return (not _REMOTE_LINK_PATTERN.match(src)) and os.path.isfile(src)
def _get_hash(self, filename):
"""Get SHA1 hash of a file.
:param filename: file name.
:type filename: str
:rtype: str
"""
hasher = hashlib.sha1()
try:
with open(filename, 'rb') as stream:
buf = stream.read(_BLOCK_SIZE)
while len(buf) > 0:
hasher.update(buf)
buf = stream.read(_BLOCK_SIZE)
return hasher.hexdigest()
except IOError as e:
logger.debug('Error while hashing file: %s' % e.strerror)
return None
def _get_relocate_dir(self, filename):
"""Relocate a local file into specific directory.
:param filename: file name.
:type filename: str
:return: list
"""
file_hash = self._get_hash(filename)
if file_hash is not None:
out = [self.prefix,
file_hash[0:2],
file_hash[2:4],
file_hash[4:],
os.path.basename(filename)]
if self.url_prefix is not None and len(self.url_prefix) > 0:
out.insert(0, self.url_prefix)
return out
return None
def relocate(self, src):
"""Relocate a resource path for generated site.
:param src: original resource path.
:type src: str
:return: relocated resource path.
:rtype: str
"""
def _to_url(array):
return '/' + '/'.join(array)
if not self._should_relocate(src):
return None
filename = os.path.join(self.base_dir, src)
if filename in self.resources:
return _to_url(self.resources[filename])
target = self._get_relocate_dir(filename)
if target is not None:
self.resources[filename] = target
return _to_url(target)
return None
class BodyGenerator(object):
"""Base generator class for creating main content of the article from body
of the article file.
"""
def __init__(self):
super(BodyGenerator, self).__init__()
@classmethod
def from_extension(cls, extension, **kwargs):
"""Create a body generator based on file extension.
:param extension: file extension.
:type extension: str
:return: an instance of one child class of BodyGenerator, or None if
the extension cannot be recognized.
:rtype: BodyGenerator
"""
extension = extension.lower()
if extension in _GENERATOR_EXTENSIONS:
return BodyGenerator.from_type(
_GENERATOR_EXTENSIONS[extension], **kwargs)
return None
@classmethod
def from_type(cls, generator_type, **kwargs):
"""Create a body generator based on generator type.
:param generator_type: type of the generator.
:type generator_type: str
:return: an instance of one child class of BodyGenerator, or None if
the body generator type cannot be recognized.
:rtype: BodyGenerator
"""
generator_type = generator_type.lower()
if generator_type in _BODY_GENERATORS:
constructor = globals()[_BODY_GENERATORS[generator_type]]
return constructor(**kwargs)
return None
def generate(self, body, **options):
"""Generate the content from body of the payload of a file.
:param body: payload content.
:type body: str
:return: a tuple of two elements, generated body and metadata
dictionary, respectively.
:rtype: tuple
"""
pass
class MarkdownBodyGenerator(BodyGenerator):
"""Generator for generating body content from Markdown payload.
"""
def generate(self, body, **options):
"""Override to generate content from Markdown payload.
:param body: payload content.
:type body: str
:return: a tuple of two elements, generated body and metadata
dictionary, respectively.
:rtype: tuple
"""
ext = BlockHtmlFormatterExtension({
'code': CodeBlockHtmlFormatter(css_class=CODE_HIGHLIGHT_CLASS),
'latex': LatexBlockHtmlFormatter()
})
md = Markdown(output_format='html5', extensions=[ext])
if 'relocator' in options:
relocator = options['relocator']
elif 'url' in options:
relocator = ResourceRelocator(options['base'],
url_prefix=options['url'][1:-1])
else:
relocator = ResourceRelocator(options['base'])
recorded_patterns = {
'reference': RelocatingReferencePattern(
REFERENCE_RE, md, relocator),
'link': RelocatingLinkPattern(LINK_RE, md, relocator),
'image_link': RelocatingImagePattern(IMAGE_LINK_RE, md, relocator),
'image_reference': RelocatingImageReferencePattern(
IMAGE_REFERENCE_RE, md, relocator),
'short_reference': RelocatingReferencePattern(
SHORT_REF_RE, md, relocator),
}
for pattern, instance in recorded_patterns.iteritems():
md.inlinePatterns[pattern] = instance
output = md.convert(body)
meta = ext.get_requisites()
meta['local_references'] = relocator.resources
return output, meta
class HtmlBodyGenerator(BodyGenerator):
"""Generator for generating body content from HTML payload.
"""
def generate(self, body, **options):
"""Override to generate content from Markdown payload.
:param body: payload content.
:type body: str
:return: a tuple of two elements, generated body and metadata
dictionary, respectively.
:rtype: tuple
"""
return body, {}
_GENERATOR_EXTENSIONS = {
'.md': 'markdown',
'.markdown': 'markdown',
'.html': 'html',
'.htm': 'html'
}
_BODY_GENERATORS = {
'markdown': MarkdownBodyGenerator.__name__,
'html': HtmlBodyGenerator.__name__
}
SUPPORTED_GENERATOR_EXTENSIONS = _GENERATOR_EXTENSIONS.keys()
|
|
import re
import os
import csv
import xml.etree.ElementTree as ET
import logging
import glob
from datetime import datetime
from collections import OrderedDict
from bs4 import BeautifulSoup #html parser
class RunParser(object):
"""Parses an Illumina run folder. It generates data for statusdb
notable attributes :
:RunInfoParser runinfo: see RunInfo
:RunParametersParser runparameters: see RunParametersParser
:SampleSheetParser samplesheet: see SampleSheetParser
:LaneBarcodeParser lanebarcodes: see LaneBarcodeParser
"""
def __init__(self, path):
if os.path.exists(path):
self.log=logging.getLogger(__name__)
self.path=path
self.parse()
self.create_db_obj()
else:
raise os.error(" flowcell cannot be found at {0}".format(path))
def parse(self, demultiplexingDir='Demultiplexing'):
"""Tries to parse as many files as possible from a run folder"""
fc_name=os.path.basename(os.path.abspath(self.path)).split('_')[-1][1:]
rinfo_path=os.path.join(self.path, 'RunInfo.xml')
rpar_path=os.path.join(self.path, 'runParameters.xml')
ss_path=os.path.join(self.path, 'SampleSheet.csv')
lb_path=os.path.join(self.path, demultiplexingDir, 'Reports', 'html', fc_name, 'all', 'all', 'all', 'laneBarcode.html')
ln_path=os.path.join(self.path, demultiplexingDir, 'Reports', 'html', fc_name, 'all', 'all', 'all', 'lane.html')
undeterminedStatsFolder = os.path.join(self.path, demultiplexingDir, "Stats")
cycle_times_log = os.path.join(self.path, 'Logs', "CycleTimes.txt")
try:
self.runinfo=RunInfoParser(rinfo_path)
except OSError as e:
self.log.info(str(e))
self.runinfo=None
try:
self.runparameters=RunParametersParser(rpar_path)
except OSError as e:
self.log.info(str(e))
self.runParameters=None
try:
self.samplesheet=SampleSheetParser(ss_path)
except OSError as e:
self.log.info(str(e))
self.samplesheet=None
try:
self.lanebarcodes=LaneBarcodeParser(lb_path)
except OSError as e:
self.log.info(str(e))
self.lanebarcodes=None
try:
self.lanes=LaneBarcodeParser(ln_path)
except OSError as e:
self.log.info(str(e))
self.lanes=None
try:
self.undet=DemuxSummaryParser(undeterminedStatsFolder)
except OSError as e:
self.log.info(str(e))
self.undet=None
try:
self.time_cycles = CycleTimesParser(cycle_times_log)
except OSError as e:
self.log.info(str(e))
self.time_cycles = None
def create_db_obj(self):
self.obj={}
bits=os.path.basename(os.path.abspath(self.path)).split('_')
name="{0}_{1}".format(bits[0], bits[-1])
self.obj['name']=name
if self.runinfo:
self.obj['RunInfo']=self.runinfo.data
if self.runinfo.recipe:
self.obj['run_setup']=self.runinfo.recipe
if self.runparameters:
self.obj.update(self.runparameters.data)
if self.runparameters.recipe:
self.obj['run_setup']=self.runparameters.recipe
if self.samplesheet:
self.obj['samplesheet_csv']=self.samplesheet.data
if self.lanebarcodes:
self.obj['illumina']={}
self.obj['illumina']['Demultiplex_Stats']={}
self.obj['illumina']['Demultiplex_Stats']['Barcode_lane_statistics']=self.lanebarcodes.sample_data
self.obj['illumina']['Demultiplex_Stats']['Flowcell_stats']=self.lanebarcodes.flowcell_data
if self.lanes:
self.obj['illumina']['Demultiplex_Stats']['Lanes_stats']=self.lanes.sample_data
if self.undet:
self.obj['Undetermined']=self.undet.result
if self.time_cycles:
self.obj['time cycles'] = self.time_cycles
class DemuxSummaryParser(object):
def __init__(self, path):
if os.path.exists(path):
self.path=path
self.result={}
self.TOTAL = {}
self.parse()
else:
raise os.error("DemuxSummary folder {0} cannot be found".format(path))
def parse(self):
#will only save the 50 more frequent indexes
pattern=re.compile('DemuxSummaryF1L([0-9]).txt')
for file in glob.glob(os.path.join(self.path, 'DemuxSummaryF1L?.txt')):
lane_nb = pattern.search(file).group(1)
self.result[lane_nb]=OrderedDict()
self.TOTAL[lane_nb] = 0
with open(file, 'rU') as f:
undeterminePart = False
for line in f:
if not undeterminePart:
if "### Columns:" in line:
undeterminePart = True
else:
#it means I am readng the index_Sequence Hit_Count
components = line.rstrip().split('\t')
if len(self.result[lane_nb].keys())< 50:
self.result[lane_nb][components[0]] = int(components[1])
self.TOTAL[lane_nb] += int(components[1])
class LaneBarcodeParser(object):
def __init__(self, path ):
if os.path.exists(path):
self.path=path
self.parse()
else:
raise os.error(" laneBarcode.html cannot be found at {0}".format(path))
def parse(self):
self.sample_data=[]
self.flowcell_data={}
with open(self.path, 'rU') as htmlfile:
bsoup=BeautifulSoup(htmlfile)
flowcell_table=bsoup.find_all('table')[1]
lane_table=bsoup.find_all('table')[2]
keys=[]
values=[]
for th in flowcell_table.find_all('th'):
keys.append(th.text)
for td in flowcell_table.find_all('td'):
values.append(td.text)
self.flowcell_data = dict(zip(keys, values))
keys=[]
rows=lane_table.find_all('tr')
for row in rows[0:]:
if len(row.find_all('th')):
#this is the header row
for th in row.find_all('th'):
key=th.text.replace('<br/>', ' ').replace('>', '>')
keys.append(key)
elif len(row.find_all('td')):
values=[]
for td in row.find_all('td'):
values.append(td.text)
d=dict(zip(keys,values))
self.sample_data.append(d)
class DemultiplexingStatsParser(object):
def __init__(self, path ):
if os.path.exists(path):
self.path=path
self.parse()
else:
raise os.error(" DemultiplexingStats.xml cannot be found at {0}".format(path))
def parse(self):
data={}
tree=ET.parse(self.path)
root = tree.getroot()
self.data=xml_to_dict(root)
class SampleSheetParser(object):
"""Parses Samplesheets, with their fake csv format.
Should be instancied with the samplesheet path as an argument.
.header : a dict containing the info located under the [Header] section
.settings : a dict containing the data from the [Settings] section
.reads : a list of the values in the [Reads] section
.data : a list of the values under the [Data] section. These values are stored in a dict format
.datafields : a list of field names for the data section"""
def __init__(self, path ):
self.log=logging.getLogger(__name__)
if os.path.exists(path):
self.parse(path)
else:
raise os.error(" sample sheet cannot be found at {0}".format(path))
def generate_clean_samplesheet(self, fields_to_remove=None, rename_samples=True, rename_qPCR_suffix = False, fields_qPCR= None):
"""Will generate a 'clean' samplesheet, : the given fields will be removed. if rename_samples is True, samples prepended with 'Sample_'
are renamed to match the sample name"""
output=""
if not fields_to_remove:
fields_to_remove=[]
#Header
output+="[Header]{}".format(os.linesep)
for field in self.header:
output+="{},{}".format(field.rstrip(), self.header[field].rstrip())
output+=os.linesep
#Data
output+="[Data]{}".format(os.linesep)
datafields=[]
for field in self.datafields:
if field not in fields_to_remove:
datafields.append(field)
output+=",".join(datafields)
output+=os.linesep
for line in self.data:
line_ar=[]
for field in datafields:
value = line[field]
if rename_samples and 'SampleID' in field :
try:
if rename_qPCR_suffix and 'SampleName' in fields_qPCR:
#substitute SampleID with SampleName, add Sample_ as prefix and remove __qPCR_ suffix
value =re.sub('__qPCR_$', '', 'Sample_{}'.format(line['SampleName']))
else:
#substitute SampleID with SampleName, add Sample_ as prefix
value ='Sample_{}'.format(line['SampleName'])
except:
#otherwise add Sample_ as prefix
value = 'Sample_{}'.format(line['SampleID'])
elif rename_qPCR_suffix and field in fields_qPCR:
value = re.sub('__qPCR_$', '', line[field])
line_ar.append(value)
output+=",".join(line_ar)
output+=os.linesep
return output
def parse(self, path):
flag=None
header={}
reads=[]
settings=[]
csvlines=[]
data=[]
flag= 'data' #in case of HiSeq samplesheet only data section is present
with open(path, 'rU') as csvfile:
for line in csvfile.readlines():
if '[Header]' in line:
flag='HEADER'
elif '[Reads]' in line:
flag='READS'
elif '[Settings]' in line:
flag='SETTINGS'
elif '[Data]' in line:
flag='data'
else:
if flag == 'HEADER':
try:
header[line.split(',')[0]]=line.split(',')[1]
except IndexError as e:
self.log.error("file {} does not seem to be comma separated.".format(path))
raise RunTimeError("Could not parse the samplesheet, does not seem to be comma separated")
elif flag == 'READS':
reads.append(line.split(',')[0])
elif flag == 'SETTINGS':
settings.append(line.split(',')[0])
elif flag == 'data':
csvlines.append(line)
reader = csv.DictReader(csvlines)
for row in reader:
linedict={}
for field in reader.fieldnames:
linedict[field]=row[field]
data.append(linedict)
self.datafields=reader.fieldnames
self.data=data
self.settings=settings
self.header=header
self.reads=reads
class RunInfoParser(object):
"""Parses RunInfo.xml.
Should be instancied with the file path as an argument.
.data : a list of hand-picked values :
-Run ID
-Run Number
-Instrument
-Flowcell name
-Run Date
-Reads metadata
-Flowcell layout
"""
def __init__(self, path ):
self.data={}
self.recipe=None
self.path=path
if os.path.exists(path):
self.parse()
else:
raise os.error(" run info cannot be found at {0}".format(path))
def parse(self):
data={}
tree=ET.parse(self.path)
root = tree.getroot()
run=root.find('Run')
data['Id']=run.get('Id')
data['Number']=run.get('Number')
data['Instrument']=run.find('Instrument').text
data['Flowcell']=run.find('Flowcell').text
data['Date']=run.find('Date').text
data['Reads']=[]
for read in run.find('Reads').findall('Read'):
data['Reads'].append(read.attrib)
layout=run.find('FlowcellLayout')
data['FlowcellLayout']=layout.attrib
self.data=data
self.recipe=make_run_recipe(self.data.get('Reads', {}))
def get_read_configuration(self):
"""return a list of dicts containig the Read Configuration
"""
readConfig = []
try:
readConfig = self.data['Reads']
return sorted(readConfig, key=lambda r: int(r.get("Number", 0)))
except IOError:
raise RuntimeError('Reads section not present in RunInfo. Check the FC folder.')
class RunParametersParser(object):
"""Parses a runParameters.xml file.
This is a much more general xml parser, it will build a dict from the xml data.
Attributes might be replaced if children nodes have the same tag as the attributes
This does not happen in the current xml file, but if you're planning to reuse this, it may be of interest.
"""
def __init__(self, path ):
self.data={}
self.recipe=None
self.path=path
if os.path.exists(path):
self.parse()
else:
raise os.error(" run parameters cannot be found at {0}".format(path))
def parse(self):
data={}
tree=ET.parse(self.path)
root = tree.getroot()
self.data=xml_to_dict(root)
self.recipe=make_run_recipe(self.data.get('Setup', {}).get('Reads', {}).get('Read', {}))
def make_run_recipe(reads):
"""Based on either runParameters of RunInfo, gathers the information as to how many
readings are done and their length, e.g. 2x150"""
nb_reads=0
nb_indexed_reads=0
numCycles=0
for read in reads:
nb_reads+=1
if read['IsIndexedRead'] == 'Y':
nb_indexed_reads+=1
else:
if numCycles and numCycles != read['NumCycles']:
logging.warn("NumCycles in not coherent")
else:
numCycles = read['NumCycles']
if reads:
return "{0}x{1}".format(nb_reads-nb_indexed_reads, numCycles)
return None
def xml_to_dict(root):
current=None
children=list(root)
if children:
current={}
duplicates={}
for child in children:
if len(root.findall(child.tag))>1:
if child.tag not in duplicates:
duplicates[child.tag]=[]
lower=xml_to_dict(child)
duplicates[child.tag].extend(lower.values())
current.update(duplicates)
else:
lower=xml_to_dict(child)
current.update(lower)
if root.attrib:
if current:
if [x in current for x in root.attrib]:
current.update(root.attrib)
else:
current.update({'attribs':root.attribs})
else:
current= root.attrib
if root.text and root.text.strip() != "":
if current:
if 'text' not in current:
current['text']=root.text
else:
#you're really pushing here, pal
current['xml_text']=root.text
else:
current=root.text
return {root.tag:current}
class CycleTimesParser(object):
def __init__(self, path):
if os.path.exists(path):
self.path = path
self.cycles = []
self.parse()
else:
raise os.error("file {0} cannot be found".format(path))
def parse(self):
"""
parse CycleTimes.txt and return ordered list of cycles
CycleTimes.txt contains records: <date> <time> <barcode> <cycle> <info>
one cycle contains a few records (defined by <cycle>)
parser goes over records and saves the first record of each cycle as start time
and the last record of each cycle as end time
"""
data = []
date_format = '%m/%d/%Y-%H:%M:%S.%f'
with open(self.path, 'r') as file:
cycle_times = file.readlines()
# if file is empty, return
if not cycle_times:
return
# first line is header, don't read it
for cycle_line in cycle_times[1:]:
# split line into strings
cycle_list = cycle_line.split()
cycle_time_obj = {}
# parse datetime
cycle_time_obj['datetime'] = datetime.strptime("{date}-{time}".format(date=cycle_list[0], time=cycle_list[1]), date_format)
# parse cycle number
cycle_time_obj['cycle'] = int(cycle_list[3])
# add object in the list
data.append(cycle_time_obj)
# take the first record as current cycle
current_cycle = {
'cycle_number': data[0]['cycle'],
'start': data[0]['datetime'],
'end': data[0]['datetime']
}
# compare each record with current cycle (except the first one)
for record in data[1:]:
# if we are at the same cycle
if record['cycle'] == current_cycle['cycle_number']:
# override end of cycle with current record
current_cycle['end'] = record['datetime']
# if a new cycle starts
else:
# save previous cycle
self.cycles.append(current_cycle)
# initialize new current_cycle
current_cycle = {
'cycle_number': record['cycle'],
'start': record['datetime'],
'end': record['datetime']
}
# the last records is not saved inside the loop
if current_cycle not in self.cycles:
self.cycles.append(current_cycle)
|
|
# doctest
r''' Test the .npy file format.
Set up:
>>> import sys
>>> from io import BytesIO
>>> from numpy.lib import format
>>>
>>> scalars = [
... np.uint8,
... np.int8,
... np.uint16,
... np.int16,
... np.uint32,
... np.int32,
... np.uint64,
... np.int64,
... np.float32,
... np.float64,
... np.complex64,
... np.complex128,
... object,
... ]
>>>
>>> basic_arrays = []
>>>
>>> for scalar in scalars:
... for endian in '<>':
... dtype = np.dtype(scalar).newbyteorder(endian)
... basic = np.arange(15).astype(dtype)
... basic_arrays.extend([
... np.array([], dtype=dtype),
... np.array(10, dtype=dtype),
... basic,
... basic.reshape((3,5)),
... basic.reshape((3,5)).T,
... basic.reshape((3,5))[::-1,::2],
... ])
...
>>>
>>> Pdescr = [
... ('x', 'i4', (2,)),
... ('y', 'f8', (2, 2)),
... ('z', 'u1')]
>>>
>>>
>>> PbufferT = [
... ([3,2], [[6.,4.],[6.,4.]], 8),
... ([4,3], [[7.,5.],[7.,5.]], 9),
... ]
>>>
>>>
>>> Ndescr = [
... ('x', 'i4', (2,)),
... ('Info', [
... ('value', 'c16'),
... ('y2', 'f8'),
... ('Info2', [
... ('name', 'S2'),
... ('value', 'c16', (2,)),
... ('y3', 'f8', (2,)),
... ('z3', 'u4', (2,))]),
... ('name', 'S2'),
... ('z2', 'b1')]),
... ('color', 'S2'),
... ('info', [
... ('Name', 'U8'),
... ('Value', 'c16')]),
... ('y', 'f8', (2, 2)),
... ('z', 'u1')]
>>>
>>>
>>> NbufferT = [
... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8),
... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9),
... ]
>>>
>>>
>>> record_arrays = [
... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')),
... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')),
... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')),
... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')),
... ]
Test the magic string writing.
>>> format.magic(1, 0)
'\x93NUMPY\x01\x00'
>>> format.magic(0, 0)
'\x93NUMPY\x00\x00'
>>> format.magic(255, 255)
'\x93NUMPY\xff\xff'
>>> format.magic(2, 5)
'\x93NUMPY\x02\x05'
Test the magic string reading.
>>> format.read_magic(BytesIO(format.magic(1, 0)))
(1, 0)
>>> format.read_magic(BytesIO(format.magic(0, 0)))
(0, 0)
>>> format.read_magic(BytesIO(format.magic(255, 255)))
(255, 255)
>>> format.read_magic(BytesIO(format.magic(2, 5)))
(2, 5)
Test the header writing.
>>> for arr in basic_arrays + record_arrays:
... f = BytesIO()
... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it
... print(repr(f.getvalue()))
...
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<u2', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>u2', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<i2', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>i2', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<u4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>u4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<i4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>i4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<u8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>u8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<i8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>i8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<f4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>f4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<f8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>f8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<c8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>c8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<c16', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>c16', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n"
"v\x00{'descr': [('x', '<i4', (2,)), ('y', '<f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
"\x16\x02{'descr': [('x', '<i4', (2,)),\n ('Info',\n [('value', '<c16'),\n ('y2', '<f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '<c16', (2,)),\n ('y3', '<f8', (2,)),\n ('z3', '<u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '<U8'), ('Value', '<c16')]),\n ('y', '<f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
"v\x00{'descr': [('x', '>i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
"\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
'''
import sys
import os
import shutil
import tempfile
import warnings
import pytest
from io import BytesIO
import numpy as np
from numpy.testing import (
assert_, assert_array_equal, assert_raises, assert_raises_regex,
assert_warns,
)
from numpy.lib import format
# Generate some basic arrays to test with.
scalars = [
np.uint8,
np.int8,
np.uint16,
np.int16,
np.uint32,
np.int32,
np.uint64,
np.int64,
np.float32,
np.float64,
np.complex64,
np.complex128,
object,
]
basic_arrays = []
for scalar in scalars:
for endian in '<>':
dtype = np.dtype(scalar).newbyteorder(endian)
basic = np.arange(1500).astype(dtype)
basic_arrays.extend([
# Empty
np.array([], dtype=dtype),
# Rank-0
np.array(10, dtype=dtype),
# 1-D
basic,
# 2-D C-contiguous
basic.reshape((30, 50)),
# 2-D F-contiguous
basic.reshape((30, 50)).T,
# 2-D non-contiguous
basic.reshape((30, 50))[::-1, ::2],
])
# More complicated record arrays.
# This is the structure of the table used for plain objects:
#
# +-+-+-+
# |x|y|z|
# +-+-+-+
# Structure of a plain array description:
Pdescr = [
('x', 'i4', (2,)),
('y', 'f8', (2, 2)),
('z', 'u1')]
# A plain list of tuples with values for testing:
PbufferT = [
# x y z
([3, 2], [[6., 4.], [6., 4.]], 8),
([4, 3], [[7., 5.], [7., 5.]], 9),
]
# This is the structure of the table used for nested objects (DON'T PANIC!):
#
# +-+---------------------------------+-----+----------+-+-+
# |x|Info |color|info |y|z|
# | +-----+--+----------------+----+--+ +----+-----+ | |
# | |value|y2|Info2 |name|z2| |Name|Value| | |
# | | | +----+-----+--+--+ | | | | | | |
# | | | |name|value|y3|z3| | | | | | | |
# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+
#
# The corresponding nested array description:
Ndescr = [
('x', 'i4', (2,)),
('Info', [
('value', 'c16'),
('y2', 'f8'),
('Info2', [
('name', 'S2'),
('value', 'c16', (2,)),
('y3', 'f8', (2,)),
('z3', 'u4', (2,))]),
('name', 'S2'),
('z2', 'b1')]),
('color', 'S2'),
('info', [
('Name', 'U8'),
('Value', 'c16')]),
('y', 'f8', (2, 2)),
('z', 'u1')]
NbufferT = [
# x Info color info y z
# value y2 Info2 name z2 Name Value
# name value y3 z3
([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True),
'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8),
([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False),
'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9),
]
record_arrays = [
np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')),
np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')),
np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')),
np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')),
np.zeros(1, dtype=[('c', ('<f8', (5,)), (2,))])
]
#BytesIO that reads a random number of bytes at a time
class BytesIOSRandomSize(BytesIO):
def read(self, size=None):
import random
size = random.randint(1, size)
return super(BytesIOSRandomSize, self).read(size)
def roundtrip(arr):
f = BytesIO()
format.write_array(f, arr)
f2 = BytesIO(f.getvalue())
arr2 = format.read_array(f2, allow_pickle=True)
return arr2
def roundtrip_randsize(arr):
f = BytesIO()
format.write_array(f, arr)
f2 = BytesIOSRandomSize(f.getvalue())
arr2 = format.read_array(f2)
return arr2
def roundtrip_truncated(arr):
f = BytesIO()
format.write_array(f, arr)
#BytesIO is one byte short
f2 = BytesIO(f.getvalue()[0:-1])
arr2 = format.read_array(f2)
return arr2
def assert_equal_(o1, o2):
assert_(o1 == o2)
def test_roundtrip():
for arr in basic_arrays + record_arrays:
arr2 = roundtrip(arr)
assert_array_equal(arr, arr2)
def test_roundtrip_randsize():
for arr in basic_arrays + record_arrays:
if arr.dtype != object:
arr2 = roundtrip_randsize(arr)
assert_array_equal(arr, arr2)
def test_roundtrip_truncated():
for arr in basic_arrays:
if arr.dtype != object:
assert_raises(ValueError, roundtrip_truncated, arr)
def test_long_str():
# check items larger than internal buffer size, gh-4027
long_str_arr = np.ones(1, dtype=np.dtype((str, format.BUFFER_SIZE + 1)))
long_str_arr2 = roundtrip(long_str_arr)
assert_array_equal(long_str_arr, long_str_arr2)
def test_memmap_roundtrip(tmpdir):
for i, arr in enumerate(basic_arrays + record_arrays):
if arr.dtype.hasobject:
# Skip these since they can't be mmap'ed.
continue
# Write it out normally and through mmap.
nfn = os.path.join(tmpdir, f'normal{i}.npy')
mfn = os.path.join(tmpdir, f'memmap{i}.npy')
with open(nfn, 'wb') as fp:
format.write_array(fp, arr)
fortran_order = (
arr.flags.f_contiguous and not arr.flags.c_contiguous)
ma = format.open_memmap(mfn, mode='w+', dtype=arr.dtype,
shape=arr.shape, fortran_order=fortran_order)
ma[...] = arr
ma.flush()
# Check that both of these files' contents are the same.
with open(nfn, 'rb') as fp:
normal_bytes = fp.read()
with open(mfn, 'rb') as fp:
memmap_bytes = fp.read()
assert_equal_(normal_bytes, memmap_bytes)
# Check that reading the file using memmap works.
ma = format.open_memmap(nfn, mode='r')
ma.flush()
def test_compressed_roundtrip(tmpdir):
arr = np.random.rand(200, 200)
npz_file = os.path.join(tmpdir, 'compressed.npz')
np.savez_compressed(npz_file, arr=arr)
with np.load(npz_file) as npz:
arr1 = npz['arr']
assert_array_equal(arr, arr1)
# aligned
dt1 = np.dtype('i1, i4, i1', align=True)
# non-aligned, explicit offsets
dt2 = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'i4'],
'offsets': [1, 6]})
# nested struct-in-struct
dt3 = np.dtype({'names': ['c', 'd'], 'formats': ['i4', dt2]})
# field with '' name
dt4 = np.dtype({'names': ['a', '', 'b'], 'formats': ['i4']*3})
# titles
dt5 = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'i4'],
'offsets': [1, 6], 'titles': ['aa', 'bb']})
# empty
dt6 = np.dtype({'names': [], 'formats': [], 'itemsize': 8})
@pytest.mark.parametrize("dt", [dt1, dt2, dt3, dt4, dt5, dt6])
def test_load_padded_dtype(tmpdir, dt):
arr = np.zeros(3, dt)
for i in range(3):
arr[i] = i + 5
npz_file = os.path.join(tmpdir, 'aligned.npz')
np.savez(npz_file, arr=arr)
with np.load(npz_file) as npz:
arr1 = npz['arr']
assert_array_equal(arr, arr1)
def test_python2_python3_interoperability():
fname = 'win64python2.npy'
path = os.path.join(os.path.dirname(__file__), 'data', fname)
data = np.load(path)
assert_array_equal(data, np.ones(2))
def test_pickle_python2_python3():
# Test that loading object arrays saved on Python 2 works both on
# Python 2 and Python 3 and vice versa
data_dir = os.path.join(os.path.dirname(__file__), 'data')
expected = np.array([None, range, u'\u512a\u826f',
b'\xe4\xb8\x8d\xe8\x89\xaf'],
dtype=object)
for fname in ['py2-objarr.npy', 'py2-objarr.npz',
'py3-objarr.npy', 'py3-objarr.npz']:
path = os.path.join(data_dir, fname)
for encoding in ['bytes', 'latin1']:
data_f = np.load(path, allow_pickle=True, encoding=encoding)
if fname.endswith('.npz'):
data = data_f['x']
data_f.close()
else:
data = data_f
if encoding == 'latin1' and fname.startswith('py2'):
assert_(isinstance(data[3], str))
assert_array_equal(data[:-1], expected[:-1])
# mojibake occurs
assert_array_equal(data[-1].encode(encoding), expected[-1])
else:
assert_(isinstance(data[3], bytes))
assert_array_equal(data, expected)
if fname.startswith('py2'):
if fname.endswith('.npz'):
data = np.load(path, allow_pickle=True)
assert_raises(UnicodeError, data.__getitem__, 'x')
data.close()
data = np.load(path, allow_pickle=True, fix_imports=False,
encoding='latin1')
assert_raises(ImportError, data.__getitem__, 'x')
data.close()
else:
assert_raises(UnicodeError, np.load, path,
allow_pickle=True)
assert_raises(ImportError, np.load, path,
allow_pickle=True, fix_imports=False,
encoding='latin1')
def test_pickle_disallow(tmpdir):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
path = os.path.join(data_dir, 'py2-objarr.npy')
assert_raises(ValueError, np.load, path,
allow_pickle=False, encoding='latin1')
path = os.path.join(data_dir, 'py2-objarr.npz')
with np.load(path, allow_pickle=False, encoding='latin1') as f:
assert_raises(ValueError, f.__getitem__, 'x')
path = os.path.join(tmpdir, 'pickle-disabled.npy')
assert_raises(ValueError, np.save, path, np.array([None], dtype=object),
allow_pickle=False)
@pytest.mark.parametrize('dt', [
np.dtype(np.dtype([('a', np.int8),
('b', np.int16),
('c', np.int32),
], align=True),
(3,)),
np.dtype([('x', np.dtype({'names':['a','b'],
'formats':['i1','i1'],
'offsets':[0,4],
'itemsize':8,
},
(3,)),
(4,),
)]),
np.dtype([('x',
('<f8', (5,)),
(2,),
)]),
np.dtype([('x', np.dtype((
np.dtype((
np.dtype({'names':['a','b'],
'formats':['i1','i1'],
'offsets':[0,4],
'itemsize':8}),
(3,)
)),
(4,)
)))
]),
np.dtype([
('a', np.dtype((
np.dtype((
np.dtype((
np.dtype([
('a', int),
('b', np.dtype({'names':['a','b'],
'formats':['i1','i1'],
'offsets':[0,4],
'itemsize':8})),
]),
(3,),
)),
(4,),
)),
(5,),
)))
]),
])
def test_descr_to_dtype(dt):
dt1 = format.descr_to_dtype(dt.descr)
assert_equal_(dt1, dt)
arr1 = np.zeros(3, dt)
arr2 = roundtrip(arr1)
assert_array_equal(arr1, arr2)
def test_version_2_0():
f = BytesIO()
# requires more than 2 byte for header
dt = [(("%d" % i) * 100, float) for i in range(500)]
d = np.ones(1000, dtype=dt)
format.write_array(f, d, version=(2, 0))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', UserWarning)
format.write_array(f, d)
assert_(w[0].category is UserWarning)
# check alignment of data portion
f.seek(0)
header = f.readline()
assert_(len(header) % format.ARRAY_ALIGN == 0)
f.seek(0)
n = format.read_array(f)
assert_array_equal(d, n)
# 1.0 requested but data cannot be saved this way
assert_raises(ValueError, format.write_array, f, d, (1, 0))
def test_version_2_0_memmap(tmpdir):
# requires more than 2 byte for header
dt = [(("%d" % i) * 100, float) for i in range(500)]
d = np.ones(1000, dtype=dt)
tf1 = os.path.join(tmpdir, f'version2_01.npy')
tf2 = os.path.join(tmpdir, f'version2_02.npy')
# 1.0 requested but data cannot be saved this way
assert_raises(ValueError, format.open_memmap, tf1, mode='w+', dtype=d.dtype,
shape=d.shape, version=(1, 0))
ma = format.open_memmap(tf1, mode='w+', dtype=d.dtype,
shape=d.shape, version=(2, 0))
ma[...] = d
ma.flush()
ma = format.open_memmap(tf1, mode='r')
assert_array_equal(ma, d)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', UserWarning)
ma = format.open_memmap(tf2, mode='w+', dtype=d.dtype,
shape=d.shape, version=None)
assert_(w[0].category is UserWarning)
ma[...] = d
ma.flush()
ma = format.open_memmap(tf2, mode='r')
assert_array_equal(ma, d)
def test_write_version():
f = BytesIO()
arr = np.arange(1)
# These should pass.
format.write_array(f, arr, version=(1, 0))
format.write_array(f, arr)
format.write_array(f, arr, version=None)
format.write_array(f, arr)
format.write_array(f, arr, version=(2, 0))
format.write_array(f, arr)
# These should all fail.
bad_versions = [
(1, 1),
(0, 0),
(0, 1),
(2, 2),
(255, 255),
]
for version in bad_versions:
with assert_raises_regex(ValueError,
'we only support format version.*'):
format.write_array(f, arr, version=version)
bad_version_magic = [
b'\x93NUMPY\x01\x01',
b'\x93NUMPY\x00\x00',
b'\x93NUMPY\x00\x01',
b'\x93NUMPY\x02\x00',
b'\x93NUMPY\x02\x02',
b'\x93NUMPY\xff\xff',
]
malformed_magic = [
b'\x92NUMPY\x01\x00',
b'\x00NUMPY\x01\x00',
b'\x93numpy\x01\x00',
b'\x93MATLB\x01\x00',
b'\x93NUMPY\x01',
b'\x93NUMPY',
b'',
]
def test_read_magic():
s1 = BytesIO()
s2 = BytesIO()
arr = np.ones((3, 6), dtype=float)
format.write_array(s1, arr, version=(1, 0))
format.write_array(s2, arr, version=(2, 0))
s1.seek(0)
s2.seek(0)
version1 = format.read_magic(s1)
version2 = format.read_magic(s2)
assert_(version1 == (1, 0))
assert_(version2 == (2, 0))
assert_(s1.tell() == format.MAGIC_LEN)
assert_(s2.tell() == format.MAGIC_LEN)
def test_read_magic_bad_magic():
for magic in malformed_magic:
f = BytesIO(magic)
assert_raises(ValueError, format.read_array, f)
def test_read_version_1_0_bad_magic():
for magic in bad_version_magic + malformed_magic:
f = BytesIO(magic)
assert_raises(ValueError, format.read_array, f)
def test_bad_magic_args():
assert_raises(ValueError, format.magic, -1, 1)
assert_raises(ValueError, format.magic, 256, 1)
assert_raises(ValueError, format.magic, 1, -1)
assert_raises(ValueError, format.magic, 1, 256)
def test_large_header():
s = BytesIO()
d = {'a': 1, 'b': 2}
format.write_array_header_1_0(s, d)
s = BytesIO()
d = {'a': 1, 'b': 2, 'c': 'x'*256*256}
assert_raises(ValueError, format.write_array_header_1_0, s, d)
def test_read_array_header_1_0():
s = BytesIO()
arr = np.ones((3, 6), dtype=float)
format.write_array(s, arr, version=(1, 0))
s.seek(format.MAGIC_LEN)
shape, fortran, dtype = format.read_array_header_1_0(s)
assert_(s.tell() % format.ARRAY_ALIGN == 0)
assert_((shape, fortran, dtype) == ((3, 6), False, float))
def test_read_array_header_2_0():
s = BytesIO()
arr = np.ones((3, 6), dtype=float)
format.write_array(s, arr, version=(2, 0))
s.seek(format.MAGIC_LEN)
shape, fortran, dtype = format.read_array_header_2_0(s)
assert_(s.tell() % format.ARRAY_ALIGN == 0)
assert_((shape, fortran, dtype) == ((3, 6), False, float))
def test_bad_header():
# header of length less than 2 should fail
s = BytesIO()
assert_raises(ValueError, format.read_array_header_1_0, s)
s = BytesIO(b'1')
assert_raises(ValueError, format.read_array_header_1_0, s)
# header shorter than indicated size should fail
s = BytesIO(b'\x01\x00')
assert_raises(ValueError, format.read_array_header_1_0, s)
# headers without the exact keys required should fail
d = {"shape": (1, 2),
"descr": "x"}
s = BytesIO()
format.write_array_header_1_0(s, d)
assert_raises(ValueError, format.read_array_header_1_0, s)
d = {"shape": (1, 2),
"fortran_order": False,
"descr": "x",
"extrakey": -1}
s = BytesIO()
format.write_array_header_1_0(s, d)
assert_raises(ValueError, format.read_array_header_1_0, s)
def test_large_file_support(tmpdir):
if (sys.platform == 'win32' or sys.platform == 'cygwin'):
pytest.skip("Unknown if Windows has sparse filesystems")
# try creating a large sparse file
tf_name = os.path.join(tmpdir, 'sparse_file')
try:
# seek past end would work too, but linux truncate somewhat
# increases the chances that we have a sparse filesystem and can
# avoid actually writing 5GB
import subprocess as sp
sp.check_call(["truncate", "-s", "5368709120", tf_name])
except Exception:
pytest.skip("Could not create 5GB large file")
# write a small array to the end
with open(tf_name, "wb") as f:
f.seek(5368709120)
d = np.arange(5)
np.save(f, d)
# read it back
with open(tf_name, "rb") as f:
f.seek(5368709120)
r = np.load(f)
assert_array_equal(r, d)
@pytest.mark.skipif(np.dtype(np.intp).itemsize < 8,
reason="test requires 64-bit system")
@pytest.mark.slow
def test_large_archive(tmpdir):
# Regression test for product of saving arrays with dimensions of array
# having a product that doesn't fit in int32. See gh-7598 for details.
try:
a = np.empty((2**30, 2), dtype=np.uint8)
except MemoryError:
pytest.skip("Could not create large file")
fname = os.path.join(tmpdir, "large_archive")
with open(fname, "wb") as f:
np.savez(f, arr=a)
with open(fname, "rb") as f:
new_a = np.load(f)["arr"]
assert_(a.shape == new_a.shape)
def test_empty_npz(tmpdir):
# Test for gh-9989
fname = os.path.join(tmpdir, "nothing.npz")
np.savez(fname)
with np.load(fname) as nps:
pass
def test_unicode_field_names(tmpdir):
# gh-7391
arr = np.array([
(1, 3),
(1, 2),
(1, 3),
(1, 2)
], dtype=[
('int', int),
(u'\N{CJK UNIFIED IDEOGRAPH-6574}\N{CJK UNIFIED IDEOGRAPH-5F62}', int)
])
fname = os.path.join(tmpdir, "unicode.npy")
with open(fname, 'wb') as f:
format.write_array(f, arr, version=(3, 0))
with open(fname, 'rb') as f:
arr2 = format.read_array(f)
assert_array_equal(arr, arr2)
# notifies the user that 3.0 is selected
with open(fname, 'wb') as f:
with assert_warns(UserWarning):
format.write_array(f, arr, version=None)
@pytest.mark.parametrize('dt, fail', [
(np.dtype({'names': ['a', 'b'], 'formats': [float, np.dtype('S3',
metadata={'some': 'stuff'})]}), True),
(np.dtype(int, metadata={'some': 'stuff'}), False),
(np.dtype([('subarray', (int, (2,)))], metadata={'some': 'stuff'}), False),
# recursive: metadata on the field of a dtype
(np.dtype({'names': ['a', 'b'], 'formats': [
float, np.dtype({'names': ['c'], 'formats': [np.dtype(int, metadata={})]})
]}), False)
])
def test_metadata_dtype(dt, fail):
# gh-14142
arr = np.ones(10, dtype=dt)
buf = BytesIO()
with assert_warns(UserWarning):
np.save(buf, arr)
buf.seek(0)
if fail:
with assert_raises(ValueError):
np.load(buf)
else:
arr2 = np.load(buf)
# BUG: assert_array_equal does not check metadata
from numpy.lib.format import _has_metadata
assert_array_equal(arr, arr2)
assert _has_metadata(arr.dtype)
assert not _has_metadata(arr2.dtype)
|
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'grid_layout.ui'
#
# Created: Mon Jun 19 21:36:01 2017
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(1100, 782)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(1100, 300))
MainWindow.setAutoFillBackground(False)
self.centralwidget = QtGui.QWidget(MainWindow)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setMinimumSize(QtCore.QSize(1013, 0))
self.centralwidget.setFocusPolicy(QtCore.Qt.NoFocus)
self.centralwidget.setAutoFillBackground(False)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.gridLayout_6 = QtGui.QGridLayout(self.centralwidget)
self.gridLayout_6.setObjectName(_fromUtf8("gridLayout_6"))
self.label_instructions_title = QtGui.QLabel(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(10)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_instructions_title.sizePolicy().hasHeightForWidth())
self.label_instructions_title.setSizePolicy(sizePolicy)
self.label_instructions_title.setMinimumSize(QtCore.QSize(0, 20))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Serif"))
font.setBold(True)
font.setWeight(75)
self.label_instructions_title.setFont(font)
self.label_instructions_title.setObjectName(_fromUtf8("label_instructions_title"))
self.gridLayout_6.addWidget(self.label_instructions_title, 0, 0, 1, 1)
self.label_instructions = QtGui.QTextEdit(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_instructions.sizePolicy().hasHeightForWidth())
self.label_instructions.setSizePolicy(sizePolicy)
self.label_instructions.setMinimumSize(QtCore.QSize(500, 70))
self.label_instructions.setMaximumSize(QtCore.QSize(16777215, 50))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Monospace"))
font.setPointSize(18)
self.label_instructions.setFont(font)
self.label_instructions.viewport().setProperty("cursor", QtGui.QCursor(QtCore.Qt.IBeamCursor))
self.label_instructions.setFocusPolicy(QtCore.Qt.NoFocus)
self.label_instructions.setToolTip(_fromUtf8(""))
self.label_instructions.setReadOnly(False)
self.label_instructions.setObjectName(_fromUtf8("label_instructions"))
self.gridLayout_6.addWidget(self.label_instructions, 4, 0, 1, 1)
self.label_word_output = QtGui.QLabel(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.label_word_output.sizePolicy().hasHeightForWidth())
self.label_word_output.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Serif"))
font.setBold(True)
font.setWeight(75)
self.label_word_output.setFont(font)
self.label_word_output.setTextFormat(QtCore.Qt.PlainText)
self.label_word_output.setObjectName(_fromUtf8("label_word_output"))
self.gridLayout_6.addWidget(self.label_word_output, 7, 0, 1, 1)
self.selected_words_disp = QtGui.QTextEdit(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(100)
sizePolicy.setHeightForWidth(self.selected_words_disp.sizePolicy().hasHeightForWidth())
self.selected_words_disp.setSizePolicy(sizePolicy)
self.selected_words_disp.setMinimumSize(QtCore.QSize(500, 80))
self.selected_words_disp.setMaximumSize(QtCore.QSize(16777215, 80))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Monospace"))
font.setPointSize(12)
self.selected_words_disp.setFont(font)
self.selected_words_disp.setFocusPolicy(QtCore.Qt.NoFocus)
self.selected_words_disp.setToolTip(_fromUtf8(""))
self.selected_words_disp.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.selected_words_disp.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.selected_words_disp.setObjectName(_fromUtf8("selected_words_disp"))
self.gridLayout_6.addWidget(self.selected_words_disp, 8, 0, 1, 1)
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setSizeConstraint(QtGui.QLayout.SetMinimumSize)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.clear_button = QtGui.QPushButton(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.clear_button.sizePolicy().hasHeightForWidth())
self.clear_button.setSizePolicy(sizePolicy)
self.clear_button.setMinimumSize(QtCore.QSize(60, 50))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Serif"))
font.setBold(True)
font.setWeight(75)
self.clear_button.setFont(font)
self.clear_button.setFocusPolicy(QtCore.Qt.NoFocus)
self.clear_button.setCheckable(False)
self.clear_button.setObjectName(_fromUtf8("clear_button"))
self.gridLayout.addWidget(self.clear_button, 2, 0, 1, 1)
self.scrollbar_letter_speed = QtGui.QSlider(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.scrollbar_letter_speed.sizePolicy().hasHeightForWidth())
self.scrollbar_letter_speed.setSizePolicy(sizePolicy)
self.scrollbar_letter_speed.setMinimumSize(QtCore.QSize(200, 0))
self.scrollbar_letter_speed.setMinimum(1)
self.scrollbar_letter_speed.setMaximum(50)
self.scrollbar_letter_speed.setPageStep(1)
self.scrollbar_letter_speed.setProperty("value", 8)
self.scrollbar_letter_speed.setOrientation(QtCore.Qt.Horizontal)
self.scrollbar_letter_speed.setObjectName(_fromUtf8("scrollbar_letter_speed"))
self.gridLayout.addWidget(self.scrollbar_letter_speed, 5, 0, 1, 1)
self.gridLayout_3 = QtGui.QGridLayout()
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.label_letter_speed = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Serif"))
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_letter_speed.setFont(font)
self.label_letter_speed.setObjectName(_fromUtf8("label_letter_speed"))
self.gridLayout_3.addWidget(self.label_letter_speed, 2, 0, 1, 1)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem, 0, 0, 1, 1)
self.gridLayout.addLayout(self.gridLayout_3, 3, 0, 1, 1)
self.button_pause = QtGui.QPushButton(self.centralwidget)
self.button_pause.setMinimumSize(QtCore.QSize(60, 50))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Serif"))
font.setBold(True)
font.setWeight(75)
self.button_pause.setFont(font)
self.button_pause.setFocusPolicy(QtCore.Qt.NoFocus)
self.button_pause.setCheckable(True)
self.button_pause.setObjectName(_fromUtf8("button_pause"))
self.gridLayout.addWidget(self.button_pause, 0, 0, 1, 1)
self.gridLayout_6.addLayout(self.gridLayout, 4, 1, 5, 1)
self.phrase_disp = QtGui.QTextEdit(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(50)
sizePolicy.setHeightForWidth(self.phrase_disp.sizePolicy().hasHeightForWidth())
self.phrase_disp.setSizePolicy(sizePolicy)
self.phrase_disp.setMinimumSize(QtCore.QSize(500, 50))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Monospace"))
font.setPointSize(18)
self.phrase_disp.setFont(font)
self.phrase_disp.setFocusPolicy(QtCore.Qt.NoFocus)
self.phrase_disp.setStyleSheet(_fromUtf8("border-style:ridge;\n"
"border-color: rgb(255, 92, 144);\n"
"border-width:5px;\n"
""))
self.phrase_disp.setObjectName(_fromUtf8("phrase_disp"))
self.gridLayout_6.addWidget(self.phrase_disp, 6, 0, 1, 1)
self.label_phrases = QtGui.QLabel(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.label_phrases.sizePolicy().hasHeightForWidth())
self.label_phrases.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Serif"))
font.setBold(True)
font.setWeight(75)
self.label_phrases.setFont(font)
self.label_phrases.setObjectName(_fromUtf8("label_phrases"))
self.gridLayout_6.addWidget(self.label_phrases, 5, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1100, 35))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName(_fromUtf8("menuFile"))
self.menuMode = QtGui.QMenu(self.menubar)
self.menuMode.setObjectName(_fromUtf8("menuMode"))
self.menuDisplay = QtGui.QMenu(self.menubar)
self.menuDisplay.setObjectName(_fromUtf8("menuDisplay"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.edit_alphabet = QtGui.QAction(MainWindow)
self.edit_alphabet.setObjectName(_fromUtf8("edit_alphabet"))
self.action_click_selections = QtGui.QAction(MainWindow)
self.action_click_selections.setCheckable(True)
self.action_click_selections.setChecked(True)
self.action_click_selections.setObjectName(_fromUtf8("action_click_selections"))
self.action_click_distribution = QtGui.QAction(MainWindow)
self.action_click_distribution.setCheckable(True)
self.action_click_distribution.setChecked(True)
self.action_click_distribution.setObjectName(_fromUtf8("action_click_distribution"))
self.action_best_words = QtGui.QAction(MainWindow)
self.action_best_words.setCheckable(True)
self.action_best_words.setChecked(True)
self.action_best_words.setObjectName(_fromUtf8("action_best_words"))
self.action_open = QtGui.QAction(MainWindow)
self.action_open.setCheckable(False)
self.action_open.setEnabled(True)
self.action_open.setObjectName(_fromUtf8("action_open"))
self.action_letter_likelihoods = QtGui.QAction(MainWindow)
self.action_letter_likelihoods.setCheckable(True)
self.action_letter_likelihoods.setChecked(True)
self.action_letter_likelihoods.setObjectName(_fromUtf8("action_letter_likelihoods"))
self.action_minimum_view = QtGui.QAction(MainWindow)
self.action_minimum_view.setCheckable(True)
self.action_minimum_view.setChecked(False)
self.action_minimum_view.setObjectName(_fromUtf8("action_minimum_view"))
self.action_dictionary = QtGui.QAction(MainWindow)
self.action_dictionary.setCheckable(False)
self.action_dictionary.setObjectName(_fromUtf8("action_dictionary"))
self.action_save = QtGui.QAction(MainWindow)
self.action_save.setCheckable(False)
self.action_save.setEnabled(True)
self.action_save.setObjectName(_fromUtf8("action_save"))
self.action_close = QtGui.QAction(MainWindow)
self.action_close.setObjectName(_fromUtf8("action_close"))
self.action_alphabet = QtGui.QAction(MainWindow)
self.action_alphabet.setCheckable(True)
self.action_alphabet.setChecked(True)
self.action_alphabet.setObjectName(_fromUtf8("action_alphabet"))
self.actionB = QtGui.QAction(MainWindow)
self.actionB.setObjectName(_fromUtf8("actionB"))
self.action_volume = QtGui.QAction(MainWindow)
self.action_volume.setObjectName(_fromUtf8("action_volume"))
self.action_settings = QtGui.QAction(MainWindow)
self.action_settings.setCheckable(False)
self.action_settings.setObjectName(_fromUtf8("action_settings"))
self.action_space_bar = QtGui.QAction(MainWindow)
self.action_space_bar.setCheckable(True)
self.action_space_bar.setChecked(True)
self.action_space_bar.setObjectName(_fromUtf8("action_space_bar"))
self.action_port = QtGui.QAction(MainWindow)
self.action_port.setCheckable(True)
self.action_port.setChecked(True)
self.action_port.setObjectName(_fromUtf8("action_port"))
self.action_about_ticker = QtGui.QAction(MainWindow)
self.action_about_ticker.setObjectName(_fromUtf8("action_about_ticker"))
self.action_clear = QtGui.QAction(MainWindow)
self.action_clear.setObjectName(_fromUtf8("action_clear"))
self.action_calibrate = QtGui.QAction(MainWindow)
self.action_calibrate.setCheckable(True)
self.action_calibrate.setChecked(True)
self.action_calibrate.setObjectName(_fromUtf8("action_calibrate"))
self.action = QtGui.QAction(MainWindow)
self.action.setCheckable(True)
self.action.setObjectName(_fromUtf8("action"))
self.action_tutorial = QtGui.QAction(MainWindow)
self.action_tutorial.setCheckable(True)
self.action_tutorial.setVisible(True)
self.action_tutorial.setObjectName(_fromUtf8("action_tutorial"))
self.action_inc_phrases = QtGui.QAction(MainWindow)
self.action_inc_phrases.setCheckable(True)
self.action_inc_phrases.setChecked(False)
self.action_inc_phrases.setVisible(True)
self.action_inc_phrases.setObjectName(_fromUtf8("action_inc_phrases"))
self.action_fast_mode = QtGui.QAction(MainWindow)
self.action_fast_mode.setCheckable(True)
self.action_fast_mode.setChecked(True)
self.action_fast_mode.setObjectName(_fromUtf8("action_fast_mode"))
self.action_clear_2 = QtGui.QAction(MainWindow)
self.action_clear_2.setObjectName(_fromUtf8("action_clear_2"))
self.action_clear_sentence = QtGui.QAction(MainWindow)
self.action_clear_sentence.setObjectName(_fromUtf8("action_clear_sentence"))
self.menuFile.addAction(self.action_open)
self.menuFile.addAction(self.action_save)
self.menuFile.addAction(self.action_close)
self.menuMode.addAction(self.action_tutorial)
self.menuMode.addAction(self.action_inc_phrases)
self.menuDisplay.addAction(self.action_clear_sentence)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuMode.menuAction())
self.menubar.addAction(self.menuDisplay.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Grid", None))
self.label_instructions_title.setToolTip(_translate("MainWindow", "Visual instructions", None))
self.label_instructions_title.setText(_translate("MainWindow", "Instructions:", None))
self.label_instructions.setStyleSheet(_translate("MainWindow", "border-style:ridge;\n"
"border-color: rgb(92, 114, 255);\n"
"border-width:5px;\n"
"", None))
self.label_word_output.setToolTip(_translate("MainWindow", "Word selections are displayed here", None))
self.label_word_output.setText(_translate("MainWindow", "Output sentences:", None))
self.selected_words_disp.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Monospace\'; font-size:12pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"> </p></body></html>", None))
self.clear_button.setText(_translate("MainWindow", "Restart", None))
self.label_letter_speed.setToolTip(_translate("MainWindow", "Sound overlap, if=0.0 all voice will speak simultaneously.", None))
self.label_letter_speed.setText(_translate("MainWindow", "Speed: Scan delay (seconds))", None))
self.button_pause.setText(_translate("MainWindow", "Play", None))
self.label_phrases.setText(_translate("MainWindow", "Phrases:", None))
self.menuFile.setTitle(_translate("MainWindow", "File", None))
self.menuMode.setTitle(_translate("MainWindow", "Mode", None))
self.menuDisplay.setTitle(_translate("MainWindow", "Display", None))
self.edit_alphabet.setText(_translate("MainWindow", "Alphabet", None))
self.action_click_selections.setText(_translate("MainWindow", "Selected letters/words", None))
self.action_click_distribution.setText(_translate("MainWindow", "Click distribution", None))
self.action_best_words.setText(_translate("MainWindow", "Most probable words", None))
self.action_open.setText(_translate("MainWindow", "Open", None))
self.action_letter_likelihoods.setText(_translate("MainWindow", "Letter likelhoods", None))
self.action_minimum_view.setText(_translate("MainWindow", "Show minimum", None))
self.action_dictionary.setText(_translate("MainWindow", "Dictionary", None))
self.action_save.setText(_translate("MainWindow", "Save", None))
self.action_close.setText(_translate("MainWindow", "Close", None))
self.action_close.setShortcut(_translate("MainWindow", "Ctrl+W", None))
self.action_alphabet.setText(_translate("MainWindow", "Alphabet", None))
self.actionB.setText(_translate("MainWindow", "b", None))
self.action_volume.setText(_translate("MainWindow", "Volume", None))
self.action_settings.setText(_translate("MainWindow", "Settings", None))
self.action_space_bar.setText(_translate("MainWindow", "Space bar", None))
self.action_port.setText(_translate("MainWindow", "Port 20320", None))
self.action_about_ticker.setText(_translate("MainWindow", "About Ticker", None))
self.action_clear.setText(_translate("MainWindow", "Clear", None))
self.action_calibrate.setText(_translate("MainWindow", "Calibrate (\"yes_\")", None))
self.action.setText(_translate("MainWindow", "Tutorial", None))
self.action_tutorial.setText(_translate("MainWindow", "Tutorial", None))
self.action_inc_phrases.setText(_translate("MainWindow", "Incremet Phrases", None))
self.action_fast_mode.setText(_translate("MainWindow", "Fast Mode", None))
self.action_clear_2.setText(_translate("MainWindow", "Clear", None))
self.action_clear_sentence.setText(_translate("MainWindow", "Clear", None))
|
|
# Copyright 2015, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Node the calls to the 'range' builtin.
This is a rather complex beast as it has many cases, is difficult to know if
it's sizable enough to compute, and there are complex cases, where the bad
result of it can be predicted still, and these are interesting for warnings.
"""
import math
from nuitka.optimizations import BuiltinOptimization
from nuitka.utils.Utils import python_version
from .NodeBases import ExpressionBuiltinNoArgBase, ExpressionChildrenHavingBase
class ExpressionBuiltinRange0(ExpressionBuiltinNoArgBase):
kind = "EXPRESSION_BUILTIN_RANGE0"
def __init__(self, source_ref):
ExpressionBuiltinNoArgBase.__init__(
self,
builtin_function = range,
source_ref = source_ref
)
def mayHaveSideEffects(self):
return False
def mayBeNone(self):
return False
class ExpressionBuiltinRangeBase(ExpressionChildrenHavingBase):
""" Base class for range nodes with 1/2/3 arguments. """
builtin_spec = BuiltinOptimization.builtin_range_spec
def __init__(self, values, source_ref):
ExpressionChildrenHavingBase.__init__(
self,
values = values,
source_ref = source_ref
)
def getTruthValue(self):
length = self.getIterationLength()
if length is None:
return None
else:
return length > 0
def mayHaveSideEffects(self):
for child in self.getVisitableNodes():
if child.mayHaveSideEffects():
return True
if child.getIntegerValue() is None:
return True
if python_version >= 270 and \
child.isExpressionConstantRef() and \
type(child.getConstant()) is float:
return True
return False
def computeBuiltinSpec(self, given_values):
assert self.builtin_spec is not None, self
if not self.builtin_spec.isCompileTimeComputable(given_values):
return self, None, None
from .NodeMakingHelpers import getComputationResult
return getComputationResult(
node = self,
computation = lambda : self.builtin_spec.simulateCall(
given_values
),
description = "Built-in call to '%s' computed." % (
self.builtin_spec.getName()
)
)
def computeExpressionIter1(self, iter_node, constraint_collection):
# TODO: Support Python3 range objects too.
if python_version >= 300:
return iter_node, None, None
iteration_length = self.getIterationLength()
if iteration_length is not None and iteration_length > 256:
result = ExpressionBuiltinXrange(
low = self.getLow(),
high = self.getHigh(),
step = self.getStep(),
source_ref = self.getSourceReference()
)
self.replaceWith(result)
return (
iter_node,
"new_expression",
"Replaced 'range' with 'xrange' built-in call."
)
return iter_node, None, None
@staticmethod
def getLow():
return None
@staticmethod
def getHigh():
return None
@staticmethod
def getStep():
return None
def mayBeNone(self):
return False
class ExpressionBuiltinRange1(ExpressionBuiltinRangeBase):
kind = "EXPRESSION_BUILTIN_RANGE1"
named_children = (
"low",
)
def __init__(self, low, source_ref):
assert low is not None
ExpressionBuiltinRangeBase.__init__(
self,
values = {
"low" : low,
},
source_ref = source_ref
)
getLow = ExpressionChildrenHavingBase.childGetter("low")
def computeExpression(self, constraint_collection):
# TODO: Support Python3 range objects too.
if python_version >= 300:
return self, None, None
low = self.getLow()
return self.computeBuiltinSpec(
given_values = (
low,
)
)
def getIterationLength(self):
low = self.getLow().getIntegerValue()
if low is None:
return None
return max(0, low)
def canPredictIterationValues(self):
return self.getIterationLength() is not None
def getIterationValue(self, element_index):
length = self.getIterationLength()
if length is None:
return None
if element_index > length:
return None
from .NodeMakingHelpers import makeConstantReplacementNode
# TODO: Make sure to cast element_index to what CPython will give, for
# now a downcast will do.
return makeConstantReplacementNode(
constant = int(element_index),
node = self
)
def isKnownToBeIterable(self, count):
return count is None or count == self.getIterationLength()
class ExpressionBuiltinRange2(ExpressionBuiltinRangeBase):
kind = "EXPRESSION_BUILTIN_RANGE2"
named_children = ("low", "high")
def __init__(self, low, high, source_ref):
ExpressionBuiltinRangeBase.__init__(
self,
values = {
"low" : low,
"high" : high
},
source_ref = source_ref
)
getLow = ExpressionChildrenHavingBase.childGetter("low")
getHigh = ExpressionChildrenHavingBase.childGetter("high")
builtin_spec = BuiltinOptimization.builtin_range_spec
def computeExpression(self, constraint_collection):
if python_version >= 300:
return self, None, None
low = self.getLow()
high = self.getHigh()
return self.computeBuiltinSpec(
given_values = (
low,
high
)
)
def getIterationLength(self):
low = self.getLow()
high = self.getHigh()
low = low.getIntegerValue()
if low is None:
return None
high = high.getIntegerValue()
if high is None:
return None
return max(0, high - low)
def canPredictIterationValues(self):
return self.getIterationLength() is not None
def getIterationValue(self, element_index):
low = self.getLow()
high = self.getHigh()
low = low.getIntegerValue()
if low is None:
return None
high = high.getIntegerValue()
if high is None:
return None
result = low + element_index
if result >= high:
return None
else:
from .NodeMakingHelpers import makeConstantReplacementNode
return makeConstantReplacementNode(
constant = result,
node = self
)
def isKnownToBeIterable(self, count):
return count is None or count == self.getIterationLength()
class ExpressionBuiltinRange3(ExpressionBuiltinRangeBase):
kind = "EXPRESSION_BUILTIN_RANGE3"
named_children = (
"low",
"high",
"step"
)
def __init__(self, low, high, step, source_ref):
ExpressionBuiltinRangeBase.__init__(
self,
values = {
"low" : low,
"high" : high,
"step" : step
},
source_ref = source_ref
)
getLow = ExpressionChildrenHavingBase.childGetter("low")
getHigh = ExpressionChildrenHavingBase.childGetter("high")
getStep = ExpressionChildrenHavingBase.childGetter("step")
builtin_spec = BuiltinOptimization.builtin_range_spec
def computeExpression(self, constraint_collection):
if python_version >= 300:
return self, None, None
low = self.getLow()
high = self.getHigh()
step = self.getStep()
return self.computeBuiltinSpec(
given_values = (
low,
high,
step
)
)
def getIterationLength(self):
low = self.getLow()
high = self.getHigh()
step = self.getStep()
low = low.getIntegerValue()
if low is None:
return None
high = high.getIntegerValue()
if high is None:
return None
step = step.getIntegerValue()
if step is None:
return None
# Give up on this, will raise ValueError.
if step == 0:
return None
if low < high:
if step < 0:
estimate = 0
else:
estimate = math.ceil(float(high - low) / step)
else:
if step > 0:
estimate = 0
else:
estimate = math.ceil(float(high - low) / step)
estimate = round(estimate)
assert not estimate < 0
return int(estimate)
def canPredictIterationValues(self):
return self.getIterationLength() is not None
def getIterationValue(self, element_index):
low = self.getLow().getIntegerValue()
if low is None:
return None
high = self.getHigh().getIntegerValue()
if high is None:
return None
step = self.getStep().getIntegerValue()
result = low + step * element_index
if result >= high:
return None
else:
from .NodeMakingHelpers import makeConstantReplacementNode
return makeConstantReplacementNode(
constant = result,
node = self
)
def isKnownToBeIterable(self, count):
return count is None or count == self.getIterationLength()
class ExpressionBuiltinXrange(ExpressionChildrenHavingBase):
kind = "EXPRESSION_BUILTIN_XRANGE"
named_children = ("low", "high", "step")
def __init__(self, low, high, step, source_ref):
ExpressionChildrenHavingBase.__init__(
self,
values = {
"low" : low,
"high" : high,
"step" : step
},
source_ref = source_ref
)
def computeExpression(self, constraint_collection):
return self, None, None
getLow = ExpressionChildrenHavingBase.childGetter("low")
getHigh = ExpressionChildrenHavingBase.childGetter("high")
getStep = ExpressionChildrenHavingBase.childGetter("step")
|
|
import operator
import logging
import itertools
import contextlib
import claripy
from .plugin import SimStatePlugin
from .. import sim_options
from ..state_plugins.sim_action import SimActionObject
l = logging.getLogger(name=__name__)
class SimStateHistory(SimStatePlugin):
"""
This class keeps track of historically-relevant information for paths.
"""
STRONGREF_STATE = True
def __init__(self, parent=None, clone=None):
SimStatePlugin.__init__(self)
# attributes handling the progeny of this history object
self.parent = parent if clone is None else clone.parent
self.merged_from = [ ] if clone is None else list(clone.merged_from)
self.merge_conditions = [ ] if clone is None else list(clone.merge_conditions)
self.depth = (0 if parent is None else parent.depth + 1) if clone is None else clone.depth
self.previous_block_count = (0 if parent is None else parent.block_count) if clone is None else \
clone.previous_block_count
# a string description of this history
self.recent_description = None if clone is None else clone.recent_description
# the control flow transfer information from this history onwards (to the current state)
self.jump_target = None if clone is None else clone.jump_target
self.jump_source = None if clone is None else clone.jump_source
self.jump_avoidable = None if clone is None else clone.jump_avoidable
self.jump_guard = None if clone is None else clone.jump_guard
self.jumpkind = None if clone is None else clone.jumpkind
# the execution log for this history
self.recent_events = [ ] if clone is None else list(clone.recent_events)
self.recent_bbl_addrs = [ ] if clone is None else list(clone.recent_bbl_addrs)
self.recent_ins_addrs = [ ] if clone is None else list(clone.recent_ins_addrs)
self.recent_stack_actions = [ ] if clone is None else list(clone.recent_stack_actions)
self.last_stmt_idx = None if clone is None else clone.last_stmt_idx
# numbers of blocks, syscalls, and instructions that were executed in this step
self.recent_block_count = 0 if clone is None else clone.recent_block_count
self.recent_syscall_count = 0 if clone is None else clone.recent_syscall_count
self.recent_instruction_count = -1 if clone is None else clone.recent_instruction_count
# satness stuff
self._all_constraints = ()
self._satisfiable = None
self.successor_ip = None if clone is None else clone.successor_ip
self.strongref_state = None if clone is None else clone.strongref_state
def init_state(self):
self.successor_ip = self.state._ip
def __getstate__(self):
# flatten ancestry, otherwise we hit recursion errors trying to get the entire history...
# the important intuition here is that if we provide the entire linked list to pickle, pickle
# will traverse it recursively. If we provide it as a real list, it will not do any recursion.
# the nuance is in whether the list we provide has live parent links, in which case it matters
# what order pickle iterates the list, as it will suddenly be able to perform memoization.
ancestry = []
parent = self.parent
self.parent = None
while parent is not None:
ancestry.append(parent)
parent = parent.parent
ancestry[-1].parent = None
rev_ancestry = list(reversed(ancestry))
d = super(SimStateHistory, self).__getstate__()
d['strongref_state'] = None
d['rev_ancestry'] = rev_ancestry
d['successor_ip'] = self.successor_ip
# reconstruct chain
child = self
for parent in ancestry:
child.parent = parent
child = parent
d.pop('parent')
return d
def __setstate__(self, d):
child = self
ancestry = list(reversed(d.pop('rev_ancestry')))
for parent in ancestry:
if hasattr(child, 'parent'):
break
child.parent = parent
child = parent
else:
child.parent = None
self.__dict__.update(d)
def __repr__(self):
addr = self.addr
if addr is None:
addr_str = "Unknown"
else:
addr_str = "%#x" % addr
return "<StateHistory @ %s>" % addr_str
def set_strongref_state(self, state):
if sim_options.EFFICIENT_STATE_MERGING in state.options:
self.strongref_state = state
@property
def addr(self):
if not self.recent_bbl_addrs:
return None
return self.recent_bbl_addrs[-1]
def merge(self, others, merge_conditions, common_ancestor=None):
if not others:
return False
self.merged_from.extend(h for h in others)
self.merge_conditions = merge_conditions
# we must fix this in order to get
# correct results when using constraints_since()
self.parent = common_ancestor if common_ancestor is not None else self.parent
self.recent_events = [e.recent_events for e in itertools.chain([self], others)
if not isinstance(e, SimActionConstraint)
]
# rebuild recent constraints
recent_constraints = [ h.constraints_since(common_ancestor) for h in itertools.chain([self], others) ]
combined_constraint = self.state.solver.Or(
*[ self.state.solver.simplify(self.state.solver.And(*history_constraints)) for history_constraints in recent_constraints ]
)
self.recent_events.append(SimActionConstraint(self.state, combined_constraint))
# hard to say what we should do with these others list of things...
#self.recent_bbl_addrs = [e.recent_bbl_addrs for e in itertools.chain([self], others)]
#self.recent_ins_addrs = [e.recent_ins_addrs for e in itertools.chain([self], others)]
#self.recent_stack_actions = [e.recent_stack_actions for e in itertools.chain([self], others)]
return True
def widen(self, others): # pylint: disable=unused-argument
l.warning("history widening is not implemented!")
return # TODO
@SimStatePlugin.memo
def copy(self, memo): # pylint: disable=unused-argument
return SimStateHistory(clone=self)
def trim(self):
"""
Discard the ancestry of this state.
"""
new_hist = self.copy({})
new_hist.parent = None
self.state.register_plugin('history', new_hist)
def filter_actions(self, block_addr=None, block_stmt=None, insn_addr=None, read_from=None, write_to=None):
"""
Filter self.actions based on some common parameters.
:param block_addr: Only return actions generated in blocks starting at this address.
:param block_stmt: Only return actions generated in the nth statement of each block.
:param insn_addr: Only return actions generated in the assembly instruction at this address.
:param read_from: Only return actions that perform a read from the specified location.
:param write_to: Only return actions that perform a write to the specified location.
Notes:
If IR optimization is turned on, reads and writes may not occur in the instruction
they originally came from. Most commonly, If a register is read from twice in the same
block, the second read will not happen, instead reusing the temp the value is already
stored in.
Valid values for read_from and write_to are the string literals 'reg' or 'mem' (matching
any read or write to registers or memory, respectively), any string (representing a read
or write to the named register), and any integer (representing a read or write to the
memory at this address).
"""
if read_from is not None:
if write_to is not None:
raise ValueError("Can't handle read_from and write_to at the same time!")
if read_from in ('reg', 'mem'):
read_type = read_from
read_offset = None
elif isinstance(read_from, str):
read_type = 'reg'
read_offset = self.state.project.arch.registers[read_from][0]
else:
read_type = 'mem'
read_offset = read_from
if write_to is not None:
if write_to in ('reg', 'mem'):
write_type = write_to
write_offset = None
elif isinstance(write_to, str):
write_type = 'reg'
write_offset = self.state.project.arch.registers[write_to][0]
else:
write_type = 'mem'
write_offset = write_to
#def addr_of_stmt(bbl_addr, stmt_idx):
# if stmt_idx is None:
# return None
# stmts = self.state.project.factory.block(bbl_addr).vex.statements
# if stmt_idx >= len(stmts):
# return None
# for i in reversed(range(stmt_idx + 1)):
# if stmts[i].tag == 'Ist_IMark':
# return stmts[i].addr + stmts[i].delta
# return None
def action_reads(action):
if action.type != read_type:
return False
if action.action != 'read':
return False
if read_offset is None:
return True
addr = action.addr
if isinstance(addr, SimActionObject):
addr = addr.ast
if isinstance(addr, claripy.ast.Base):
if addr.symbolic:
return False
addr = self.state.solver.eval(addr)
if addr != read_offset:
return False
return True
def action_writes(action):
if action.type != write_type:
return False
if action.action != 'write':
return False
if write_offset is None:
return True
addr = action.addr
if isinstance(addr, SimActionObject):
addr = addr.ast
if isinstance(addr, claripy.ast.Base):
if addr.symbolic:
return False
addr = self.state.solver.eval(addr)
if addr != write_offset:
return False
return True
return [x for x in reversed(self.actions) if
(block_addr is None or x.bbl_addr == block_addr) and
(block_stmt is None or x.stmt_idx == block_stmt) and
(read_from is None or action_reads(x)) and
(write_to is None or action_writes(x)) and
(insn_addr is None or (x.sim_procedure is None and x.ins_addr == insn_addr))
#(insn_addr is None or (x.sim_procedure is None and addr_of_stmt(x.bbl_addr, x.stmt_idx) == insn_addr))
]
#def _record_state(self, state, strong_reference=True):
# else:
# # state.scratch.bbl_addr may not be initialized as final states from the "flat_successors" list. We need to get
# # the value from _target in that case.
# if self.addr is None and not self._target.symbolic:
# self._addrs = [ self._target._model_concrete.value ]
# else:
# # FIXME: redesign so this does not happen
# l.warning("Encountered a path to a SimProcedure with a symbolic target address.")
#
# if o.UNICORN in state.options:
# self.extra_length += state.scratch.executed_block_count - 1
#
# if o.TRACK_ACTION_HISTORY in state.options:
# self._events = state.history.events
#
# # record constraints, added constraints, and satisfiability
# self._all_constraints = state.solver.constraints
# self._fresh_constraints = state.history.fresh_constraints
#
# if isinstance(state.solver._solver, claripy.frontend_mixins.SatCacheMixin):
# self._satisfiable = state.solver._solver._cached_satness
# else:
# self._satisfiable = None
#
# # record the state as a weak reference
# self._state_weak_ref = weakref.ref(state)
#
# # and as a strong ref
# if strong_reference:
# self._state_strong_ref = state
def demote(self):
"""
Demotes this history node, causing it to drop the strong state reference.
"""
self.strongref_state = None
def reachable(self):
if self._satisfiable is not None:
pass
elif self.state is not None:
self._satisfiable = self.state.solver.satisfiable()
else:
solver = claripy.Solver()
solver.add(self._all_constraints)
self._satisfiable = solver.satisfiable()
return self._satisfiable
#
# Log handling
#
def add_event(self, event_type, **kwargs):
new_event = SimEvent(self.state, event_type, **kwargs)
self.recent_events.append(new_event)
def add_action(self, action):
self.recent_events.append(action)
def extend_actions(self, new_actions):
self.recent_events.extend(new_actions)
@contextlib.contextmanager
def subscribe_actions(self):
start_idx = len(self.recent_actions)
res = []
yield res
res.extend(self.recent_actions[start_idx:])
#
# Convenient accessors
#
@property
def recent_constraints(self):
# this and the below MUST be lists, not generators, because we need to reverse them
return [ ev.constraint for ev in self.recent_events if isinstance(ev, SimActionConstraint) ]
@property
def recent_actions(self):
return [ ev for ev in self.recent_events if isinstance(ev, SimAction) ]
@property
def block_count(self):
return self.previous_block_count + self.recent_block_count
@property
def lineage(self):
return HistoryIter(self)
@property
def parents(self):
if self.parent:
for p in self.parent.lineage:
yield p
@property
def events(self):
return LambdaIterIter(self, operator.attrgetter('recent_events'))
@property
def actions(self):
return LambdaIterIter(self, operator.attrgetter('recent_actions'))
@property
def jumpkinds(self):
return LambdaAttrIter(self, operator.attrgetter('jumpkind'))
@property
def jump_guards(self):
return LambdaAttrIter(self, operator.attrgetter('jump_guard'))
@property
def jump_targets(self):
return LambdaAttrIter(self, operator.attrgetter('jump_target'))
@property
def descriptions(self):
return LambdaAttrIter(self, operator.attrgetter('recent_description'))
@property
def bbl_addrs(self):
return LambdaIterIter(self, operator.attrgetter('recent_bbl_addrs'))
@property
def ins_addrs(self):
return LambdaIterIter(self, operator.attrgetter('recent_ins_addrs'))
@property
def stack_actions(self):
return LambdaIterIter(self, operator.attrgetter('recent_stack_actions'))
#
# Merging support
#
def closest_common_ancestor(self, other):
"""
Find the common ancestor between this history node and 'other'.
:param other: the PathHistory to find a common ancestor with.
:return: the common ancestor SimStateHistory, or None if there isn't one
"""
our_history_iter = reversed(HistoryIter(self))
their_history_iter = reversed(HistoryIter(other))
sofar = set()
while True:
our_done = False
their_done = False
try:
our_next = next(our_history_iter)
if our_next in sofar:
# we found it!
return our_next
sofar.add(our_next)
except StopIteration:
# we ran out of items during iteration
our_done = True
try:
their_next = next(their_history_iter)
if their_next in sofar:
# we found it!
return their_next
sofar.add(their_next)
except StopIteration:
# we ran out of items during iteration
their_done = True
# if we ran out of both lists, there's no common ancestor
if our_done and their_done:
return None
def constraints_since(self, other):
"""
Returns the constraints that have been accumulated since `other`.
:param other: a prior PathHistory object
:returns: a list of constraints
"""
constraints = [ ]
cur = self
while cur is not other and cur is not None:
constraints.extend(cur.recent_constraints)
cur = cur.parent
return constraints
def make_child(self):
return SimStateHistory(parent=self)
class TreeIter(object):
def __init__(self, start, end=None):
self._start = start
self._end = end
def _iter_nodes(self):
n = self._start
while n is not self._end:
yield n
n = n.parent
def __iter__(self):
for i in self.hardcopy:
yield i
def __reversed__(self):
raise NotImplementedError("Why are you using this class")
@property
def hardcopy(self):
# lmao
return list(reversed(tuple(reversed(self))))
def __len__(self):
# TODO: this is wrong
return self._start.depth
def __getitem__(self, k):
if isinstance(k, slice):
raise ValueError("Please use .hardcopy to use slices")
if k >= 0:
raise ValueError("Please use .hardcopy to use nonnegative indexes")
i = 0
for item in reversed(self):
i -= 1
if i == k:
return item
raise IndexError(k)
def count(self, v):
"""
Count occurrences of value v in the entire history. Note that the subclass must implement the __reversed__
method, otherwise an exception will be thrown.
:param object v: The value to look for
:return: The number of occurrences
:rtype: int
"""
ctr = 0
for item in reversed(self):
if item == v:
ctr += 1
return ctr
class HistoryIter(TreeIter):
def __reversed__(self):
for hist in self._iter_nodes():
yield hist
class LambdaAttrIter(TreeIter):
def __init__(self, start, f, **kwargs):
TreeIter.__init__(self, start, **kwargs)
self._f = f
def __reversed__(self):
for hist in self._iter_nodes():
a = self._f(hist)
if a is not None:
yield a
class LambdaIterIter(LambdaAttrIter):
def __init__(self, start, f, reverse=True, **kwargs):
LambdaAttrIter.__init__(self, start, f, **kwargs)
self._f = f
self._reverse = reverse
def __reversed__(self):
for hist in self._iter_nodes():
for a in reversed(self._f(hist)) if self._reverse else self._f(hist):
yield a
from angr.sim_state import SimState
SimState.register_default('history', SimStateHistory)
from .sim_action import SimAction, SimActionConstraint
from .sim_event import SimEvent
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
import mxnet as mx
from mxnet.test_utils import *
import numpy as np
import os, gzip
import pickle as pickle
import time
try:
import h5py
except ImportError:
h5py = None
import sys
from common import assertRaises
import unittest
def test_MNISTIter():
# prepare data
get_mnist_ubyte()
batch_size = 100
train_dataiter = mx.io.MNISTIter(
image="data/train-images-idx3-ubyte",
label="data/train-labels-idx1-ubyte",
data_shape=(784,),
batch_size=batch_size, shuffle=1, flat=1, silent=0, seed=10)
# test_loop
nbatch = 60000 / batch_size
batch_count = 0
for batch in train_dataiter:
batch_count += 1
assert(nbatch == batch_count)
# test_reset
train_dataiter.reset()
train_dataiter.iter_next()
label_0 = train_dataiter.getlabel().asnumpy().flatten()
train_dataiter.iter_next()
train_dataiter.iter_next()
train_dataiter.iter_next()
train_dataiter.iter_next()
train_dataiter.reset()
train_dataiter.iter_next()
label_1 = train_dataiter.getlabel().asnumpy().flatten()
assert(sum(label_0 - label_1) == 0)
def test_Cifar10Rec():
get_cifar10()
dataiter = mx.io.ImageRecordIter(
path_imgrec="data/cifar/train.rec",
mean_img="data/cifar/cifar10_mean.bin",
rand_crop=False,
and_mirror=False,
shuffle=False,
data_shape=(3,28,28),
batch_size=100,
preprocess_threads=4,
prefetch_buffer=1)
labelcount = [0 for i in range(10)]
batchcount = 0
for batch in dataiter:
npdata = batch.data[0].asnumpy().flatten().sum()
sys.stdout.flush()
batchcount += 1
nplabel = batch.label[0].asnumpy()
for i in range(nplabel.shape[0]):
labelcount[int(nplabel[i])] += 1
for i in range(10):
assert(labelcount[i] == 5000)
def test_NDArrayIter():
data = np.ones([1000, 2, 2])
label = np.ones([1000, 1])
for i in range(1000):
data[i] = i / 100
label[i] = i / 100
dataiter = mx.io.NDArrayIter(data, label, 128, True, last_batch_handle='pad')
batchidx = 0
for batch in dataiter:
batchidx += 1
assert(batchidx == 8)
dataiter = mx.io.NDArrayIter(data, label, 128, False, last_batch_handle='pad')
batchidx = 0
labelcount = [0 for i in range(10)]
for batch in dataiter:
label = batch.label[0].asnumpy().flatten()
assert((batch.data[0].asnumpy()[:,0,0] == label).all())
for i in range(label.shape[0]):
labelcount[int(label[i])] += 1
for i in range(10):
if i == 0:
assert(labelcount[i] == 124)
else:
assert(labelcount[i] == 100)
def test_NDArrayIter_h5py():
if not h5py:
return
data = np.ones([1000, 2, 2])
label = np.ones([1000, 1])
for i in range(1000):
data[i] = i / 100
label[i] = i / 100
try:
os.remove("ndarraytest.h5")
except OSError:
pass
with h5py.File("ndarraytest.h5") as f:
f.create_dataset("data", data=data)
f.create_dataset("label", data=label)
dataiter = mx.io.NDArrayIter(f["data"], f["label"], 128, True, last_batch_handle='pad')
batchidx = 0
for batch in dataiter:
batchidx += 1
assert(batchidx == 8)
dataiter = mx.io.NDArrayIter(f["data"], f["label"], 128, False, last_batch_handle='pad')
labelcount = [0 for i in range(10)]
for batch in dataiter:
label = batch.label[0].asnumpy().flatten()
assert((batch.data[0].asnumpy()[:,0,0] == label).all())
for i in range(label.shape[0]):
labelcount[int(label[i])] += 1
try:
os.remove("ndarraytest.h5")
except OSError:
pass
for i in range(10):
if i == 0:
assert(labelcount[i] == 124)
else:
assert(labelcount[i] == 100)
def test_NDArrayIter_csr():
# creating toy data
num_rows = rnd.randint(5, 15)
num_cols = rnd.randint(1, 20)
batch_size = rnd.randint(1, num_rows)
shape = (num_rows, num_cols)
csr, _ = rand_sparse_ndarray(shape, 'csr')
dns = csr.asnumpy()
# CSRNDArray or scipy.sparse.csr_matrix with last_batch_handle not equal to 'discard' will throw NotImplementedError
assertRaises(NotImplementedError, mx.io.NDArrayIter, {'data': csr}, dns, batch_size)
try:
import scipy.sparse as spsp
train_data = spsp.csr_matrix(dns)
assertRaises(NotImplementedError, mx.io.NDArrayIter, {'data': train_data}, dns, batch_size)
except ImportError:
pass
# CSRNDArray with shuffle
csr_iter = iter(mx.io.NDArrayIter({'csr_data': csr, 'dns_data': dns}, dns, batch_size,
shuffle=True, last_batch_handle='discard'))
num_batch = 0
for batch in csr_iter:
num_batch += 1
assert(num_batch == num_rows // batch_size)
# make iterators
csr_iter = iter(mx.io.NDArrayIter(csr, csr, batch_size, last_batch_handle='discard'))
begin = 0
for batch in csr_iter:
expected = np.zeros((batch_size, num_cols))
end = begin + batch_size
expected[:num_rows - begin] = dns[begin:end]
if end > num_rows:
expected[num_rows - begin:] = dns[0:end - num_rows]
assert_almost_equal(batch.data[0].asnumpy(), expected)
begin += batch_size
def test_LibSVMIter():
def check_libSVMIter_synthetic():
cwd = os.getcwd()
data_path = os.path.join(cwd, 'data.t')
label_path = os.path.join(cwd, 'label.t')
with open(data_path, 'w') as fout:
fout.write('1.0 0:0.5 2:1.2\n')
fout.write('-2.0\n')
fout.write('-3.0 0:0.6 1:2.4 2:1.2\n')
fout.write('4 2:-1.2\n')
with open(label_path, 'w') as fout:
fout.write('1.0\n')
fout.write('-2.0 0:0.125\n')
fout.write('-3.0 2:1.2\n')
fout.write('4 1:1.0 2:-1.2\n')
data_dir = os.path.join(cwd, 'data')
data_train = mx.io.LibSVMIter(data_libsvm=data_path, label_libsvm=label_path,
data_shape=(3, ), label_shape=(3, ), batch_size=3)
first = mx.nd.array([[ 0.5, 0., 1.2], [ 0., 0., 0.], [ 0.6, 2.4, 1.2]])
second = mx.nd.array([[ 0., 0., -1.2], [ 0.5, 0., 1.2], [ 0., 0., 0.]])
i = 0
for batch in iter(data_train):
expected = first.asnumpy() if i == 0 else second.asnumpy()
assert_almost_equal(data_train.getdata().asnumpy(), expected)
i += 1
def check_libSVMIter_news_data():
news_metadata = {
'name': 'news20.t',
'origin_name': 'news20.t.bz2',
'url': "https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/news20.t.bz2",
'feature_dim': 62060,
'num_classes': 20,
'num_examples': 3993,
}
batch_size = 33
num_examples = news_metadata['num_examples']
data_dir = os.path.join(os.getcwd(), 'data')
get_bz2_data(data_dir, news_metadata['name'], news_metadata['url'],
news_metadata['origin_name'])
path = os.path.join(data_dir, news_metadata['name'])
data_train = mx.io.LibSVMIter(data_libsvm=path, data_shape=(news_metadata['feature_dim'],),
batch_size=batch_size)
for epoch in range(2):
num_batches = 0
for batch in data_train:
# check the range of labels
assert(np.sum(batch.label[0].asnumpy() > 20) == 0)
assert(np.sum(batch.label[0].asnumpy() <= 0) == 0)
num_batches += 1
expected_num_batches = num_examples / batch_size
assert(num_batches == int(expected_num_batches)), num_batches
data_train.reset()
check_libSVMIter_synthetic()
check_libSVMIter_news_data()
def test_DataBatch():
from nose.tools import ok_
from mxnet.io import DataBatch
import re
batch = DataBatch(data=[mx.nd.ones((2,3))])
ok_(re.match('DataBatch: data shapes: \[\(2L?, 3L?\)\] label shapes: None', str(batch)))
batch = DataBatch(data=[mx.nd.ones((2,3)), mx.nd.ones((7,8))], label=[mx.nd.ones((4,5))])
ok_(re.match('DataBatch: data shapes: \[\(2L?, 3L?\), \(7L?, 8L?\)\] label shapes: \[\(4L?, 5L?\)\]', str(batch)))
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/7826")
def test_CSVIter():
def check_CSVIter_synthetic():
cwd = os.getcwd()
data_path = os.path.join(cwd, 'data.t')
label_path = os.path.join(cwd, 'label.t')
with open(data_path, 'w') as fout:
for i in range(1000):
fout.write(','.join(['1' for _ in range(8*8)]) + '\n')
with open(label_path, 'w') as fout:
for i in range(1000):
fout.write('0\n')
data_train = mx.io.CSVIter(data_csv=data_path, data_shape=(8,8),
label_csv=label_path, batch_size=100)
expected = mx.nd.ones((100, 8, 8))
for batch in iter(data_train):
assert_almost_equal(data_train.getdata().asnumpy(), expected.asnumpy())
check_CSVIter_synthetic()
if __name__ == "__main__":
test_NDArrayIter()
if h5py:
test_NDArrayIter_h5py()
test_MNISTIter()
test_Cifar10Rec()
test_LibSVMIter()
test_NDArrayIter_csr()
test_CSVIter()
|
|
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.test.utils import override_settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from ..models import Article
from bogofilter.models import BogofilterComment
from bogofilter.moderation import BogofilterCommentModerator
from django_comments.moderation import moderator
from bogofilter.forms import BogofilterCommentForm
import shutil
import os
from . import CommentTestCase
CT = ContentType.objects.get_for_model
class BogofilterTests(CommentTestCase):
def CommentFormData(self, data):
f = BogofilterCommentForm(self.article)
d = data.copy()
d.update(f.initial)
return d
def setUp(self):
super(BogofilterTests, self).setUp()
self.article = Article.objects.get(pk=1)
# register our moderator
if Article not in moderator._registry:
moderator.register(Article, BogofilterCommentModerator)
# training spam
self.c1 = BogofilterComment.objects.create(
content_type = CT(Article),
object_pk = "1",
site = Site.objects.get_current(),
comment = "Tirage du tarot de marseille gratuit en ligne horoscope homme balance 2011",
user_name = "voyance gratuite",
user_email = "[email protected]",
user_url = "http://www.alainfauquet.fr/",
ip_address = "85.254.74.3",
)
self.c1.mark_spam()
self.c2 = BogofilterComment.objects.create(
content_type = CT(Article),
object_pk = "1",
site = Site.objects.get_current(),
comment = "Tirage du tarot de marseille gratuit en ligne horoscope homme balance 2011",
user_name = "voyance gratuite en ligne",
user_email = "[email protected]",
user_url = "http://www.annonces-flash.com/",
ip_address = "85.254.75.75",
)
self.c2.mark_spam()
self.c3 = BogofilterComment.objects.create(
content_type = CT(Article),
object_pk = "1",
site = Site.objects.get_current(),
comment = "Voyance sante gratuite tirag tarot gratuit",
user_name = "voyance gratuite",
user_email = "[email protected]",
user_url = "http://www.annonces-flash.com/",
ip_address = "85.254.81.91",
)
self.c3.mark_spam()
self.c4 = BogofilterComment.objects.create(
content_type = CT(Article),
object_pk = "1",
site = Site.objects.get_current(),
comment = "Voyance pendule gratuite horoscope christine haas 2011",
user_name = "voyance",
user_email = "[email protected]",
user_url = "http://www.alainfauquet.fr/",
ip_address = "85.254.81.202",
)
self.c4.mark_spam()
# training ham
self.c7 = BogofilterComment.objects.create(
content_type = CT(Article),
object_pk = "1",
site = Site.objects.get_current(),
comment = "Great post. Saved me a ton of time. Thanks.",
user_name = "Reg Doe",
user_email = "[email protected]",
user_url = "http://www.regdoe.ca/",
ip_address = "95.47.208.0",
)
self.c7.mark_ham()
self.c8 = BogofilterComment.objects.create(
content_type = CT(Article),
object_pk = "1",
site = Site.objects.get_current(),
comment = "This is just awesome! Thanks for the defs.",
user_name = "Shivi",
user_email = "[email protected]",
user_url = "",
ip_address = "121.160.222.97",
)
self.c8.mark_ham()
self.c9 = BogofilterComment.objects.create(
content_type = CT(Article),
object_pk = "1",
site = Site.objects.get_current(),
comment = "Worked for me thanks",
user_name = "raga",
user_email = "[email protected]",
user_url = "",
ip_address = "140.240.54.144",
)
self.c9.mark_ham()
self.c10 = BogofilterComment.objects.create(
content_type = CT(Article),
object_pk = "1",
site = Site.objects.get_current(),
comment = "Nice post. Very concise and useful. I like your approach of attaching custom attributes to the QPushButton. I tend to do this sort of thing using lambda. I'm not sure if one way is better than the other, just an observation: button.clicked.connect(lambda: self.listen(name, url)) http://codrspace.com/durden/using-lambda-with-pyqt-signals/ I've also seen people solve this problem with functools.partial: http://eli.thegreenplace.net/2011/04/25/passing-extra-arguments-to-pyqt-slot/",
user_name = "Luki Lii",
user_email = "[email protected]",
user_url = "http://lukilii.me/",
ip_address = "137.31.79.10",
)
self.c10.mark_ham()
self.c11 = BogofilterComment.objects.create(
content_type = CT(Article),
object_pk = "1",
site = Site.objects.get_current(),
comment = "Yes, using the lambda is a valid alternative but self.sender() no longer works inside self.listen() - or inside the lambda for that matter.",
user_name = "Stefan Talpalaru",
user_email = "[email protected]",
user_url = "",
ip_address = "78.23.54.6",
)
self.c11.mark_ham()
self.c12 = BogofilterComment.objects.create(
content_type = CT(Article),
object_pk = "1",
site = Site.objects.get_current(),
comment = "Neat article. For future reference: http://blog.scoopz.com/2011/05/05/listen-to-any-bbc-radio-live-stream-using-vlc-including-radio-1-and-1xtra/",
user_name = "carlisle",
user_email = "[email protected]",
user_url = "",
ip_address = "177.208.61.148",
)
self.c12.mark_ham()
self.c13 = BogofilterComment.objects.create(
content_type = CT(Article),
object_pk = "1",
site = Site.objects.get_current(),
comment = "Thank you!",
user_name = "Gordon",
user_email = "[email protected]",
user_url = "http://cafeofbrokenarms.com/",
ip_address = "130.106.173.235",
)
self.c13.mark_ham()
self.c14 = BogofilterComment.objects.create(
content_type = CT(Article),
object_pk = "1",
site = Site.objects.get_current(),
comment = "Thank you for this great bit of code. It has worked for the two websites I have used this in. Your article is very informative and written with a very fun tone. Thanks!",
user_name = "Rachel",
user_email = "[email protected]",
user_url = "",
ip_address = "172.13.9.220",
)
self.c14.mark_ham()
# test spam
self.client.post("/post/",
self.CommentFormData({
"comment" : "Horoscope yahoo du jour l ascendant astrologique",
"name" : "voyance",
"email" : "[email protected]",
"url" : "http://www.annonces-flash.com/",
}),
REMOTE_ADDR="85.254.82.164")
self.c5 = BogofilterComment.objects.order_by('-submit_date')[0]
self.client.post("/post/",
self.CommentFormData({
"comment" : "Accouchement lune calcul horoscope femme scorpion",
"name" : "voyance gratuite en ligne",
"email" : "[email protected]",
"url" : "http://www.annonces-flash.com/",
}),
REMOTE_ADDR="85.254.153.161")
self.c6 = BogofilterComment.objects.order_by('-submit_date')[0]
# test ham
self.client.post("/post/",
self.CommentFormData({
"comment" : "Hi, thanks for the blog. Can this project still work with facebook oauth 2.0 and kay framework 3? because I see the project is pretty old (3years). I try to use Kay ext for facebook/twitter but it does not work. Therefore, I hope your project can help me.",
"name" : "Nam",
"email" : "[email protected]",
"url" : "",
}),
REMOTE_ADDR="48.131.92.5")
self.c28 = BogofilterComment.objects.order_by('-submit_date')[0]
self.client.post("/post/",
self.CommentFormData({
"comment" : "This post was very useful. Having my logs in UTC will be excellent.",
"name" : "Tim Wilder",
"email" : "[email protected]",
"url" : "",
}),
REMOTE_ADDR="207.145.42.4")
self.c29 = BogofilterComment.objects.order_by('-submit_date')[0]
self.client.post("/post/",
self.CommentFormData({
"comment" : "One other thing: since the lambda's body is not evalued until the signal is triggered, a naive implementation would make all the buttons play the last radio station (because 'name' and 'url' point to it at the end of the loop). The only way it works is something like this: button.clicked.connect(lambda _button=button, _name=name, _url=url: self.listen(_button, _name, _url)) Yes, it's ugly...",
"name" : "Stefan Talpalaru",
"email" : "[email protected]",
"url" : "http://stefantalpalaru.wordpress.com",
}),
REMOTE_ADDR="86.8.17.57")
self.c30 = BogofilterComment.objects.order_by('-submit_date')[0]
def tearDown(self):
super(BogofilterTests, self).tearDown()
bogofilter_dir = settings.BOGOFILTER_ARGS[1]
if os.path.isdir(bogofilter_dir):
shutil.rmtree(bogofilter_dir)
# unregister our moderator
if Article in moderator._registry:
moderator.unregister(Article)
def testBogo(self):
# test the spam
self.assertEqual(self.c5.bogotype()[0], 'S')
self.assertEqual(self.c5.is_public, False)
self.assertEqual(self.c6.bogotype()[0], 'S')
self.assertEqual(self.c6.is_public, False)
# test the ham
self.assertNotEqual(self.c28.bogotype()[0], 'S')
self.assertEqual(self.c28.is_public, True)
self.assertNotEqual(self.c29.bogotype()[0], 'S')
self.assertEqual(self.c29.is_public, True)
self.assertNotEqual(self.c30.bogotype()[0], 'S')
self.assertEqual(self.c30.is_public, True)
|
|
# tempfile.py unit tests.
import tempfile
import errno
import io
import os
import signal
import shutil
import sys
import re
import warnings
import contextlib
import unittest
from test import test_support as support
warnings.filterwarnings("ignore",
category=RuntimeWarning,
message="mktemp", module=__name__)
if hasattr(os, 'stat'):
import stat
has_stat = 1
else:
has_stat = 0
has_textmode = (tempfile._text_openflags != tempfile._bin_openflags)
has_spawnl = hasattr(os, 'spawnl')
# TEST_FILES may need to be tweaked for systems depending on the maximum
# number of files that can be opened at one time (see ulimit -n)
if sys.platform in ('openbsd3', 'openbsd4'):
TEST_FILES = 48
else:
TEST_FILES = 100
# This is organized as one test for each chunk of code in tempfile.py,
# in order of their appearance in the file. Testing which requires
# threads is not done here.
# Common functionality.
class TC(unittest.TestCase):
str_check = re.compile(r"[a-zA-Z0-9_-]{6}$")
def failOnException(self, what, ei=None):
if ei is None:
ei = sys.exc_info()
self.fail("%s raised %s: %s" % (what, ei[0], ei[1]))
def nameCheck(self, name, dir, pre, suf):
(ndir, nbase) = os.path.split(name)
npre = nbase[:len(pre)]
nsuf = nbase[len(nbase)-len(suf):]
# check for equality of the absolute paths!
self.assertEqual(os.path.abspath(ndir), os.path.abspath(dir),
"file '%s' not in directory '%s'" % (name, dir))
self.assertEqual(npre, pre,
"file '%s' does not begin with '%s'" % (nbase, pre))
self.assertEqual(nsuf, suf,
"file '%s' does not end with '%s'" % (nbase, suf))
nbase = nbase[len(pre):len(nbase)-len(suf)]
self.assertTrue(self.str_check.match(nbase),
"random string '%s' does not match /^[a-zA-Z0-9_-]{6}$/"
% nbase)
test_classes = []
class test_exports(TC):
def test_exports(self):
# There are no surprising symbols in the tempfile module
dict = tempfile.__dict__
expected = {
"NamedTemporaryFile" : 1,
"TemporaryFile" : 1,
"mkstemp" : 1,
"mkdtemp" : 1,
"mktemp" : 1,
"TMP_MAX" : 1,
"gettempprefix" : 1,
"gettempdir" : 1,
"tempdir" : 1,
"template" : 1,
"SpooledTemporaryFile" : 1
}
unexp = []
for key in dict:
if key[0] != '_' and key not in expected:
unexp.append(key)
self.assertTrue(len(unexp) == 0,
"unexpected keys: %s" % unexp)
test_classes.append(test_exports)
class test__RandomNameSequence(TC):
"""Test the internal iterator object _RandomNameSequence."""
def setUp(self):
self.r = tempfile._RandomNameSequence()
def test_get_six_char_str(self):
# _RandomNameSequence returns a six-character string
s = self.r.next()
self.nameCheck(s, '', '', '')
def test_many(self):
# _RandomNameSequence returns no duplicate strings (stochastic)
dict = {}
r = self.r
for i in xrange(TEST_FILES):
s = r.next()
self.nameCheck(s, '', '', '')
self.assertNotIn(s, dict)
dict[s] = 1
def test_supports_iter(self):
# _RandomNameSequence supports the iterator protocol
i = 0
r = self.r
try:
for s in r:
i += 1
if i == 20:
break
except:
self.failOnException("iteration")
@unittest.skipUnless(hasattr(os, 'fork'),
"os.fork is required for this test")
def test_process_awareness(self):
# ensure that the random source differs between
# child and parent.
read_fd, write_fd = os.pipe()
pid = None
try:
pid = os.fork()
if not pid:
os.close(read_fd)
os.write(write_fd, next(self.r).encode("ascii"))
os.close(write_fd)
# bypass the normal exit handlers- leave those to
# the parent.
os._exit(0)
parent_value = next(self.r)
child_value = os.read(read_fd, len(parent_value)).decode("ascii")
finally:
if pid:
# best effort to ensure the process can't bleed out
# via any bugs above
try:
os.kill(pid, signal.SIGKILL)
except EnvironmentError:
pass
os.close(read_fd)
os.close(write_fd)
self.assertNotEqual(child_value, parent_value)
test_classes.append(test__RandomNameSequence)
class test__candidate_tempdir_list(TC):
"""Test the internal function _candidate_tempdir_list."""
def test_nonempty_list(self):
# _candidate_tempdir_list returns a nonempty list of strings
cand = tempfile._candidate_tempdir_list()
self.assertFalse(len(cand) == 0)
for c in cand:
self.assertIsInstance(c, basestring)
def test_wanted_dirs(self):
# _candidate_tempdir_list contains the expected directories
# Make sure the interesting environment variables are all set.
with support.EnvironmentVarGuard() as env:
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname:
env[envname] = os.path.abspath(envname)
cand = tempfile._candidate_tempdir_list()
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname: raise ValueError
self.assertIn(dirname, cand)
try:
dirname = os.getcwd()
except (AttributeError, os.error):
dirname = os.curdir
self.assertIn(dirname, cand)
# Not practical to try to verify the presence of OS-specific
# paths in this list.
test_classes.append(test__candidate_tempdir_list)
# We test _get_default_tempdir some more by testing gettempdir.
class TestGetDefaultTempdir(TC):
"""Test _get_default_tempdir()."""
def test_no_files_left_behind(self):
# use a private empty directory
our_temp_directory = tempfile.mkdtemp()
try:
# force _get_default_tempdir() to consider our empty directory
def our_candidate_list():
return [our_temp_directory]
with support.swap_attr(tempfile, "_candidate_tempdir_list",
our_candidate_list):
# verify our directory is empty after _get_default_tempdir()
tempfile._get_default_tempdir()
self.assertEqual(os.listdir(our_temp_directory), [])
def raise_OSError(*args, **kwargs):
raise OSError(-1)
with support.swap_attr(io, "open", raise_OSError):
# test again with failing io.open()
with self.assertRaises(IOError) as cm:
tempfile._get_default_tempdir()
self.assertEqual(cm.exception.errno, errno.ENOENT)
self.assertEqual(os.listdir(our_temp_directory), [])
open = io.open
def bad_writer(*args, **kwargs):
fp = open(*args, **kwargs)
fp.write = raise_OSError
return fp
with support.swap_attr(io, "open", bad_writer):
# test again with failing write()
with self.assertRaises(IOError) as cm:
tempfile._get_default_tempdir()
self.assertEqual(cm.exception.errno, errno.ENOENT)
self.assertEqual(os.listdir(our_temp_directory), [])
finally:
shutil.rmtree(our_temp_directory)
test_classes.append(TestGetDefaultTempdir)
class test__get_candidate_names(TC):
"""Test the internal function _get_candidate_names."""
def test_retval(self):
# _get_candidate_names returns a _RandomNameSequence object
obj = tempfile._get_candidate_names()
self.assertIsInstance(obj, tempfile._RandomNameSequence)
def test_same_thing(self):
# _get_candidate_names always returns the same object
a = tempfile._get_candidate_names()
b = tempfile._get_candidate_names()
self.assertTrue(a is b)
test_classes.append(test__get_candidate_names)
@contextlib.contextmanager
def _inside_empty_temp_dir():
dir = tempfile.mkdtemp()
try:
with support.swap_attr(tempfile, 'tempdir', dir):
yield
finally:
support.rmtree(dir)
def _mock_candidate_names(*names):
return support.swap_attr(tempfile,
'_get_candidate_names',
lambda: iter(names))
class test__mkstemp_inner(TC):
"""Test the internal function _mkstemp_inner."""
class mkstemped:
_bflags = tempfile._bin_openflags
_tflags = tempfile._text_openflags
_close = os.close
_unlink = os.unlink
def __init__(self, dir, pre, suf, bin):
if bin: flags = self._bflags
else: flags = self._tflags
(self.fd, self.name) = tempfile._mkstemp_inner(dir, pre, suf, flags)
def write(self, str):
os.write(self.fd, str)
def __del__(self):
self._close(self.fd)
self._unlink(self.name)
def do_create(self, dir=None, pre="", suf="", bin=1):
if dir is None:
dir = tempfile.gettempdir()
try:
file = self.mkstemped(dir, pre, suf, bin)
except:
self.failOnException("_mkstemp_inner")
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# _mkstemp_inner can create files
self.do_create().write("blat")
self.do_create(pre="a").write("blat")
self.do_create(suf="b").write("blat")
self.do_create(pre="a", suf="b").write("blat")
self.do_create(pre="aa", suf=".txt").write("blat")
def test_basic_many(self):
# _mkstemp_inner can create many files (stochastic)
extant = range(TEST_FILES)
for i in extant:
extant[i] = self.do_create(pre="aa")
def test_choose_directory(self):
# _mkstemp_inner can create files in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir).write("blat")
support.gc_collect()
finally:
os.rmdir(dir)
@unittest.skipUnless(has_stat, 'os.stat not available')
def test_file_mode(self):
# _mkstemp_inner creates files with the proper mode
file = self.do_create()
mode = stat.S_IMODE(os.stat(file.name).st_mode)
expected = 0600
if sys.platform in ('win32', 'os2emx'):
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
@unittest.skipUnless(has_spawnl, 'os.spawnl not available')
def test_noinherit(self):
# _mkstemp_inner file handles are not inherited by child processes
if support.verbose:
v="v"
else:
v="q"
file = self.do_create()
fd = "%d" % file.fd
try:
me = __file__
except NameError:
me = sys.argv[0]
# We have to exec something, so that FD_CLOEXEC will take
# effect. The core of this test is therefore in
# tf_inherit_check.py, which see.
tester = os.path.join(os.path.dirname(os.path.abspath(me)),
"tf_inherit_check.py")
# On Windows a spawn* /path/ with embedded spaces shouldn't be quoted,
# but an arg with embedded spaces should be decorated with double
# quotes on each end
if sys.platform in ('win32',):
decorated = '"%s"' % sys.executable
tester = '"%s"' % tester
else:
decorated = sys.executable
retval = os.spawnl(os.P_WAIT, sys.executable, decorated, tester, v, fd)
self.assertFalse(retval < 0,
"child process caught fatal signal %d" % -retval)
self.assertFalse(retval > 0, "child process reports failure %d"%retval)
@unittest.skipUnless(has_textmode, "text mode not available")
def test_textmode(self):
# _mkstemp_inner can create files in text mode
self.do_create(bin=0).write("blat\n")
# XXX should test that the file really is a text file
def default_mkstemp_inner(self):
return tempfile._mkstemp_inner(tempfile.gettempdir(),
tempfile.template,
'',
tempfile._bin_openflags)
def test_collision_with_existing_file(self):
# _mkstemp_inner tries another name when a file with
# the chosen name already exists
with _inside_empty_temp_dir(), \
_mock_candidate_names('aaa', 'aaa', 'bbb'):
(fd1, name1) = self.default_mkstemp_inner()
os.close(fd1)
self.assertTrue(name1.endswith('aaa'))
(fd2, name2) = self.default_mkstemp_inner()
os.close(fd2)
self.assertTrue(name2.endswith('bbb'))
def test_collision_with_existing_directory(self):
# _mkstemp_inner tries another name when a directory with
# the chosen name already exists
with _inside_empty_temp_dir(), \
_mock_candidate_names('aaa', 'aaa', 'bbb'):
dir = tempfile.mkdtemp()
self.assertTrue(dir.endswith('aaa'))
(fd, name) = self.default_mkstemp_inner()
os.close(fd)
self.assertTrue(name.endswith('bbb'))
test_classes.append(test__mkstemp_inner)
class test_gettempprefix(TC):
"""Test gettempprefix()."""
def test_sane_template(self):
# gettempprefix returns a nonempty prefix string
p = tempfile.gettempprefix()
self.assertIsInstance(p, basestring)
self.assertTrue(len(p) > 0)
def test_usable_template(self):
# gettempprefix returns a usable prefix string
# Create a temp directory, avoiding use of the prefix.
# Then attempt to create a file whose name is
# prefix + 'xxxxxx.xxx' in that directory.
p = tempfile.gettempprefix() + "xxxxxx.xxx"
d = tempfile.mkdtemp(prefix="")
try:
p = os.path.join(d, p)
try:
fd = os.open(p, os.O_RDWR | os.O_CREAT)
except:
self.failOnException("os.open")
os.close(fd)
os.unlink(p)
finally:
os.rmdir(d)
test_classes.append(test_gettempprefix)
class test_gettempdir(TC):
"""Test gettempdir()."""
def test_directory_exists(self):
# gettempdir returns a directory which exists
dir = tempfile.gettempdir()
self.assertTrue(os.path.isabs(dir) or dir == os.curdir,
"%s is not an absolute path" % dir)
self.assertTrue(os.path.isdir(dir),
"%s is not a directory" % dir)
def test_directory_writable(self):
# gettempdir returns a directory writable by the user
# sneaky: just instantiate a NamedTemporaryFile, which
# defaults to writing into the directory returned by
# gettempdir.
try:
file = tempfile.NamedTemporaryFile()
file.write("blat")
file.close()
except:
self.failOnException("create file in %s" % tempfile.gettempdir())
def test_same_thing(self):
# gettempdir always returns the same object
a = tempfile.gettempdir()
b = tempfile.gettempdir()
self.assertTrue(a is b)
test_classes.append(test_gettempdir)
class test_mkstemp(TC):
"""Test mkstemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
(fd, name) = tempfile.mkstemp(dir=dir, prefix=pre, suffix=suf)
(ndir, nbase) = os.path.split(name)
adir = os.path.abspath(dir)
self.assertEqual(adir, ndir,
"Directory '%s' incorrectly returned as '%s'" % (adir, ndir))
except:
self.failOnException("mkstemp")
try:
self.nameCheck(name, dir, pre, suf)
finally:
os.close(fd)
os.unlink(name)
def test_basic(self):
# mkstemp can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
self.do_create(dir=".")
def test_choose_directory(self):
# mkstemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir)
finally:
os.rmdir(dir)
test_classes.append(test_mkstemp)
class test_mkdtemp(TC):
"""Test mkdtemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
name = tempfile.mkdtemp(dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("mkdtemp")
try:
self.nameCheck(name, dir, pre, suf)
return name
except:
os.rmdir(name)
raise
def test_basic(self):
# mkdtemp can create directories
os.rmdir(self.do_create())
os.rmdir(self.do_create(pre="a"))
os.rmdir(self.do_create(suf="b"))
os.rmdir(self.do_create(pre="a", suf="b"))
os.rmdir(self.do_create(pre="aa", suf=".txt"))
def test_basic_many(self):
# mkdtemp can create many directories (stochastic)
extant = range(TEST_FILES)
try:
for i in extant:
extant[i] = self.do_create(pre="aa")
finally:
for i in extant:
if(isinstance(i, basestring)):
os.rmdir(i)
def test_choose_directory(self):
# mkdtemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
os.rmdir(self.do_create(dir=dir))
finally:
os.rmdir(dir)
@unittest.skipUnless(has_stat, 'os.stat not available')
def test_mode(self):
# mkdtemp creates directories with the proper mode
dir = self.do_create()
try:
mode = stat.S_IMODE(os.stat(dir).st_mode)
mode &= 0777 # Mask off sticky bits inherited from /tmp
expected = 0700
if sys.platform in ('win32', 'os2emx'):
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
finally:
os.rmdir(dir)
def test_collision_with_existing_file(self):
# mkdtemp tries another name when a file with
# the chosen name already exists
with _inside_empty_temp_dir(), \
_mock_candidate_names('aaa', 'aaa', 'bbb'):
file = tempfile.NamedTemporaryFile(delete=False)
file.close()
self.assertTrue(file.name.endswith('aaa'))
dir = tempfile.mkdtemp()
self.assertTrue(dir.endswith('bbb'))
def test_collision_with_existing_directory(self):
# mkdtemp tries another name when a directory with
# the chosen name already exists
with _inside_empty_temp_dir(), \
_mock_candidate_names('aaa', 'aaa', 'bbb'):
dir1 = tempfile.mkdtemp()
self.assertTrue(dir1.endswith('aaa'))
dir2 = tempfile.mkdtemp()
self.assertTrue(dir2.endswith('bbb'))
test_classes.append(test_mkdtemp)
class test_mktemp(TC):
"""Test mktemp()."""
# For safety, all use of mktemp must occur in a private directory.
# We must also suppress the RuntimeWarning it generates.
def setUp(self):
self.dir = tempfile.mkdtemp()
def tearDown(self):
if self.dir:
os.rmdir(self.dir)
self.dir = None
class mktemped:
_unlink = os.unlink
_bflags = tempfile._bin_openflags
def __init__(self, dir, pre, suf):
self.name = tempfile.mktemp(dir=dir, prefix=pre, suffix=suf)
# Create the file. This will raise an exception if it's
# mysteriously appeared in the meanwhile.
os.close(os.open(self.name, self._bflags, 0600))
def __del__(self):
self._unlink(self.name)
def do_create(self, pre="", suf=""):
try:
file = self.mktemped(self.dir, pre, suf)
except:
self.failOnException("mktemp")
self.nameCheck(file.name, self.dir, pre, suf)
return file
def test_basic(self):
# mktemp can choose usable file names
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
support.gc_collect()
def test_many(self):
# mktemp can choose many usable file names (stochastic)
extant = range(TEST_FILES)
for i in extant:
extant[i] = self.do_create(pre="aa")
del extant
support.gc_collect()
## def test_warning(self):
## # mktemp issues a warning when used
## warnings.filterwarnings("error",
## category=RuntimeWarning,
## message="mktemp")
## self.assertRaises(RuntimeWarning,
## tempfile.mktemp, dir=self.dir)
test_classes.append(test_mktemp)
# We test _TemporaryFileWrapper by testing NamedTemporaryFile.
class test_NamedTemporaryFile(TC):
"""Test NamedTemporaryFile()."""
def do_create(self, dir=None, pre="", suf="", delete=True):
if dir is None:
dir = tempfile.gettempdir()
try:
file = tempfile.NamedTemporaryFile(dir=dir, prefix=pre, suffix=suf,
delete=delete)
except:
self.failOnException("NamedTemporaryFile")
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# NamedTemporaryFile can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_creates_named(self):
# NamedTemporaryFile creates files with names
f = tempfile.NamedTemporaryFile()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s does not exist" % f.name)
def test_del_on_close(self):
# A NamedTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.NamedTemporaryFile(dir=dir)
f.write('blat')
f.close()
self.assertFalse(os.path.exists(f.name),
"NamedTemporaryFile %s exists after close" % f.name)
finally:
os.rmdir(dir)
def test_dis_del_on_close(self):
# Tests that delete-on-close can be disabled
dir = tempfile.mkdtemp()
tmp = None
try:
f = tempfile.NamedTemporaryFile(dir=dir, delete=False)
tmp = f.name
f.write('blat')
f.close()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s missing after close" % f.name)
finally:
if tmp is not None:
os.unlink(tmp)
os.rmdir(dir)
def test_multiple_close(self):
# A NamedTemporaryFile can be closed many times without error
f = tempfile.NamedTemporaryFile()
f.write('abc\n')
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_context_manager(self):
# A NamedTemporaryFile can be used as a context manager
with tempfile.NamedTemporaryFile() as f:
self.assertTrue(os.path.exists(f.name))
self.assertFalse(os.path.exists(f.name))
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_no_leak_fd(self):
# Issue #21058: don't leak file descriptor when fdopen() fails
old_close = os.close
old_fdopen = os.fdopen
closed = []
def close(fd):
closed.append(fd)
def fdopen(*args):
raise ValueError()
os.close = close
os.fdopen = fdopen
try:
self.assertRaises(ValueError, tempfile.NamedTemporaryFile)
self.assertEqual(len(closed), 1)
finally:
os.close = old_close
os.fdopen = old_fdopen
# How to test the mode and bufsize parameters?
test_classes.append(test_NamedTemporaryFile)
class test_SpooledTemporaryFile(TC):
"""Test SpooledTemporaryFile()."""
def do_create(self, max_size=0, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
file = tempfile.SpooledTemporaryFile(max_size=max_size, dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("SpooledTemporaryFile")
return file
def test_basic(self):
# SpooledTemporaryFile can create files
f = self.do_create()
self.assertFalse(f._rolled)
f = self.do_create(max_size=100, pre="a", suf=".txt")
self.assertFalse(f._rolled)
def test_del_on_close(self):
# A SpooledTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.SpooledTemporaryFile(max_size=10, dir=dir)
self.assertFalse(f._rolled)
f.write('blat ' * 5)
self.assertTrue(f._rolled)
filename = f.name
f.close()
self.assertFalse(os.path.exists(filename),
"SpooledTemporaryFile %s exists after close" % filename)
finally:
os.rmdir(dir)
def test_rewrite_small(self):
# A SpooledTemporaryFile can be written to multiple within the max_size
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
for i in range(5):
f.seek(0, 0)
f.write('x' * 20)
self.assertFalse(f._rolled)
def test_write_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.write('x' * 20)
self.assertFalse(f._rolled)
f.write('x' * 10)
self.assertFalse(f._rolled)
f.write('x')
self.assertTrue(f._rolled)
def test_writelines(self):
# Verify writelines with a SpooledTemporaryFile
f = self.do_create()
f.writelines((b'x', b'y', b'z'))
f.seek(0)
buf = f.read()
self.assertEqual(buf, b'xyz')
def test_writelines_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=35)
f.writelines((b'x' * 20, b'x' * 10, b'x' * 5))
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_xreadlines(self):
f = self.do_create(max_size=20)
f.write(b'abc\n' * 5)
f.seek(0)
self.assertFalse(f._rolled)
self.assertEqual(list(f.xreadlines()), [b'abc\n'] * 5)
f.write(b'x\ny')
self.assertTrue(f._rolled)
f.seek(0)
self.assertEqual(list(f.xreadlines()), [b'abc\n'] * 5 + [b'x\n', b'y'])
def test_sparse(self):
# A SpooledTemporaryFile that is written late in the file will extend
# when that occurs
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.seek(100, 0)
self.assertFalse(f._rolled)
f.write('x')
self.assertTrue(f._rolled)
def test_fileno(self):
# A SpooledTemporaryFile should roll over to a real file on fileno()
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
self.assertTrue(f.fileno() > 0)
self.assertTrue(f._rolled)
def test_multiple_close_before_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile()
f.write('abc\n')
self.assertFalse(f._rolled)
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_multiple_close_after_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write('abc\n')
self.assertTrue(f._rolled)
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_bound_methods(self):
# It should be OK to steal a bound method from a SpooledTemporaryFile
# and use it independently; when the file rolls over, those bound
# methods should continue to function
f = self.do_create(max_size=30)
read = f.read
write = f.write
seek = f.seek
write("a" * 35)
write("b" * 35)
seek(0, 0)
self.assertTrue(read(70) == 'a'*35 + 'b'*35)
def test_properties(self):
f = tempfile.SpooledTemporaryFile(max_size=10)
f.write(b'x' * 10)
self.assertFalse(f._rolled)
self.assertEqual(f.mode, 'w+b')
self.assertIsNone(f.name)
with self.assertRaises(AttributeError):
f.newlines
with self.assertRaises(AttributeError):
f.encoding
f.write(b'x')
self.assertTrue(f._rolled)
self.assertEqual(f.mode, 'w+b')
self.assertIsNotNone(f.name)
with self.assertRaises(AttributeError):
f.newlines
with self.assertRaises(AttributeError):
f.encoding
def test_context_manager_before_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_during_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
f.write('abc\n')
f.flush()
self.assertTrue(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_after_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write('abc\n')
f.flush()
self.assertTrue(f._rolled)
with f:
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
test_classes.append(test_SpooledTemporaryFile)
class test_TemporaryFile(TC):
"""Test TemporaryFile()."""
def test_basic(self):
# TemporaryFile can create files
# No point in testing the name params - the file has no name.
try:
tempfile.TemporaryFile()
except:
self.failOnException("TemporaryFile")
def test_has_no_name(self):
# TemporaryFile creates files with no names (on this system)
dir = tempfile.mkdtemp()
f = tempfile.TemporaryFile(dir=dir)
f.write('blat')
# Sneaky: because this file has no name, it should not prevent
# us from removing the directory it was created in.
try:
os.rmdir(dir)
except:
ei = sys.exc_info()
# cleanup
f.close()
os.rmdir(dir)
self.failOnException("rmdir", ei)
def test_multiple_close(self):
# A TemporaryFile can be closed many times without error
f = tempfile.TemporaryFile()
f.write('abc\n')
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
# How to test the mode and bufsize parameters?
if tempfile.NamedTemporaryFile is not tempfile.TemporaryFile:
test_classes.append(test_TemporaryFile)
def test_main():
support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main()
|
|
import re
from inspect import getargspec
from functools import wraps
from django.utils.datastructures import SortedDict
from codenode.external.jsonrpc.site import jsonrpc_site
from codenode.external.jsonrpc.types import *
from codenode.external.jsonrpc.exceptions import *
default_site = jsonrpc_site
KWARG_RE = re.compile(
r'\s*(?P<arg_name>[a-zA-Z0-9_]+)\s*=\s*(?P<arg_type>[a-zA-Z]+)\s*$')
SIG_RE = re.compile(
r'\s*(?P<method_name>[a-zA-Z0-9._]+)\s*(\((?P<args_sig>[^)].*)?\)'
r'\s*(\->\s*(?P<return_sig>.*))?)?\s*$')
class JSONRPCTypeCheckingUnavailable(Exception): pass
def _type_checking_available(sig='', validate=False):
if not hasattr(type, '__eq__') and validate: # and False:
raise JSONRPCTypeCheckingUnavailable(
'Type checking is not available in your version of Python '
'which is only available in Python 2.6 or later. Use Python 2.6 '
'or later or disable type checking in %s' % sig)
def _validate_arg(value, expected):
"Returns whether or not ``value`` is the ``expected`` type."
if type(value) == expected:
return True
return False
def _eval_arg_type(arg_type, T=Any, arg=None, sig=None):
"""
Returns a type from a snippit of python source. Should normally be
something just like 'str' or 'Object'.
arg_type the source to be evaluated
T the default type
arg context of where this type was extracted
sig context from where the arg was extracted
Returns a type or a Type
"""
try:
T = eval(arg_type)
except Exception, e:
raise ValueError('The type of %s could not be evaluated in %s for %s: %s' %
(arg_type, arg, sig, str(e)))
else:
if type(T) not in (type, Type):
raise TypeError('%s is not a valid type in %s for %s' %
(repr(T), arg, sig))
return T
def _parse_sig(sig, arg_names, validate=False):
"""
Parses signatures into a ``SortedDict`` of paramName => type.
Numerically-indexed arguments that do not correspond to an argument
name in python (ie: it takes a variable number of arguments) will be
keyed as the stringified version of it's index.
sig the signature to be parsed
arg_names a list of argument names extracted from python source
Returns a tuple of (method name, types dict, return type)
"""
d = SIG_RE.match(sig)
if not d:
raise ValueError('Invalid method signature %s' % sig)
d = d.groupdict()
ret = [(n, Any) for n in arg_names]
if 'args_sig' in d and type(d['args_sig']) is str and d['args_sig'].strip():
for i, arg in enumerate(d['args_sig'].strip().split(',')):
_type_checking_available(sig, validate)
if '=' in arg:
if not type(ret) is SortedDict:
ret = SortedDict(ret)
dk = KWARG_RE.match(arg)
if not dk:
raise ValueError('Could not parse arg type %s in %s' % (arg, sig))
dk = dk.groupdict()
if not sum([(k in dk and type(dk[k]) is str and bool(dk[k].strip()))
for k in ('arg_name', 'arg_type')]):
raise ValueError('Invalid kwarg value %s in %s' % (arg, sig))
ret[dk['arg_name']] = _eval_arg_type(dk['arg_type'], None, arg, sig)
else:
if type(ret) is SortedDict:
raise ValueError('Positional arguments must occur '
'before keyword arguments in %s' % sig)
if len(ret) < i + 1:
ret.append((str(i), _eval_arg_type(arg, None, arg, sig)))
else:
ret[i] = (ret[i][0], _eval_arg_type(arg, None, arg, sig))
if not type(ret) is SortedDict:
ret = SortedDict(ret)
return (d['method_name'],
ret,
(_eval_arg_type(d['return_sig'], Any, 'return', sig)
if d['return_sig'] else Any))
def _inject_args(sig, types):
"""
A function to inject arguments manually into a method signature before
it's been parsed. If using keyword arguments use 'kw=type' instead in
the types array.
sig the string signature
types a list of types to be inserted
Returns the altered signature.
"""
if '(' in sig:
parts = sig.split('(')
sig = '%s(%s%s%s' % (
parts[0], ', '.join(types),
(', ' if parts[1].index(')') > 0 else ''), parts[1]
)
else:
sig = '%s(%s)' % (sig, ', '.join(types))
return sig
def jsonrpc_method(name, authenticated=False, safe=False, validate=False,
site=default_site):
"""
Wraps a function turns it into a json-rpc method. Adds several attributes
to the function speific to the JSON-RPC machinery and adds it to the default
jsonrpc_site if one isn't provided. You must import the module containing
these functions in your urls.py.
name
The name of your method. IE: `namespace.methodName` The method name
can include type information, like `ns.method(String, Array) -> Nil`.
authenticated=False
Adds `username` and `password` arguments to the beginning of your
method if the user hasn't already been authenticated. These will
be used to authenticate the user against `django.contrib.authenticate`
If you use HTTP auth or other authentication middleware, `username`
and `password` will not be added, and this method will only check
against `request.user.is_authenticated`.
You may pass a callablle to replace `django.contrib.auth.authenticate`
as the authentication method. It must return either a User or `None`
and take the keyword arguments `username` and `password`.
safe=False
Designates whether or not your method may be accessed by HTTP GET.
By default this is turned off.
validate=False
Validates the arguments passed to your method based on type
information provided in the signature. Supply type information by
including types in your method declaration. Like so:
@jsonrpc_method('myapp.specialSauce(Array, String)', validate=True)
def special_sauce(self, ingredients, instructions):
return SpecialSauce(ingredients, instructions)
Calls to `myapp.specialSauce` will now check each arguments type
before calling `special_sauce`, throwing an `InvalidParamsError`
when it encounters a discrepancy. This can significantly reduce the
amount of code required to write JSON-RPC services.
site=default_site
Defines which site the jsonrpc method will be added to. Can be any
object that provides a `register(name, func)` method.
"""
def decorator(func):
arg_names = getargspec(func)[0][1:]
X = {'name': name, 'arg_names': arg_names}
if authenticated:
if authenticated is True:
# TODO: this is an assumption
X['arg_names'] = ['username', 'password'] + X['arg_names']
X['name'] = _inject_args(X['name'], ('String', 'String'))
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
else:
authenticate = authenticated
@wraps(func)
def _func(request, *args, **kwargs):
user = getattr(request, 'user', None)
is_authenticated = getattr(user, 'is_authenticated', lambda: False)
if ((user is not None
and callable(is_authenticated) and not is_authenticated())
or user is None):
user = None
try:
creds = args[:2]
user = authenticate(username=creds[0], password=creds[1])
if user is not None:
args = args[2:]
except IndexError:
if 'username' in kwargs and 'password' in kwargs:
user = authenticate(username=kwargs['username'],
password=kwargs['password'])
if user is not None:
kwargs.pop('username')
kwargs.pop('password')
else:
raise InvalidParamsError(
'Authenticated methods require at least '
'[username, password] or {username: password:} arguments')
if user is None:
raise InvalidCredentialsError
request.user = user
return func(request, *args, **kwargs)
else:
_func = func
method, arg_types, return_type = \
_parse_sig(X['name'], X['arg_names'], validate)
_func.json_args = X['arg_names']
_func.json_arg_types = arg_types
_func.json_return_type = return_type
_func.json_method = method
_func.json_safe = safe
_func.json_sig = X['name']
_func.json_validate = validate
site.register(method, _func)
return _func
return decorator
|
|
from itertools import chain
from nineml.user.component import Property, Component, Prototype, Definition
from nineml.exceptions import (
NineMLUsageError, NineMLNameError, name_error, NineMLUnitMismatchError)
from nineml.base import (
ContainerObject, DynamicPortsObject)
class Initial(Property):
"""
Represents the initial state of a state variable
"""
nineml_type = "Initial"
@classmethod
def _child_accessor_name(cls):
return 'initial_value'
class DynamicsProperties(Component, DynamicPortsObject):
"""
A DynamicsProperties may be regarded as a parameterized instance of a
nineml.abstraction.Dynamics.
Parameters
----------
name : str
a name for the component_class.
definition : Definition
the URL of an abstraction layer component_class class definition,
a Definition or a Prototype instance.
properties : List[Property]|Dict[str,Quantity]
a dictionary containing (value,units) pairs or a
for the component_class's properties.
initial_values : List[Property]|Dict[str,Quantity]
a dictionary containing (value,units) pairs or a
for the component_class's state variables.
"""
nineml_type = 'DynamicsProperties'
nineml_children = Component.nineml_children + (Initial,)
def __init__(self, name, definition, properties={}, initial_values={},
initial_regime=None,
check_initial_values=False):
super(DynamicsProperties, self).__init__(
name=name, definition=definition, properties=properties)
if isinstance(initial_values, dict):
initial_values = (Initial(name, qty)
for name, qty in initial_values.items())
self.add(*initial_values)
if check_initial_values:
self.check_initial_values()
self.initial_regime = initial_regime
@property
def component_classes(self):
"""
Returns the component class wrapped in an iterator for duck typing
with Selection objects
"""
return iter([self.component_class])
def flatten(self, name=None):
return self.clone(name=name, clone_definitions=True)
def get_nineml_type(self):
return self.nineml_type
def check_initial_values(self):
for var in self.definition.component_class.state_variables:
try:
initial_value = self.initial_value(var.name)
except KeyError:
raise NineMLUsageError(
"Initial value not specified for {}".format(var.name))
initial_units = initial_value.units
initial_dimension = initial_units.dimension
var_dimension = var.dimension
if initial_dimension != var_dimension:
raise NineMLUsageError(
"Dimensions for '{}' initial value, {}, in '{}' don't "
"match that of its definition in '{}', {}."
.format(var.name, initial_dimension, self.name,
self.component_class.name, var_dimension))
def __getinitargs__(self):
return (self.name, self.definition, self._properties,
self._initial_values, self._url)
def __getitem__(self, name):
try:
return self.initial_value(name).quantity
except NineMLNameError:
super(DynamicsProperties, self).__getitem__(name)
def __setitem__(self, name, qty):
try:
self.initial_value(name).quantity = qty
except NineMLNameError:
super(DynamicsProperties, self).__setitem__(name, qty)
@property
def initial_values(self):
if isinstance(self.definition, Prototype):
comp = self.definition.component
return (
(self._initial_values[n]
if n in self._initial_values else comp.initial_value(n))
for n in set(chain(self._initial_values,
comp.initial_value_names)))
else:
return iter(self._initial_values.values())
@name_error
def initial_value(self, name):
try:
return self._initial_values[name]
except KeyError:
try:
return self.definition.component.initial_value(name)
except AttributeError:
raise NineMLNameError(
"No initial value named '{}' in component class"
.format(name))
@property
def initial_regime(self):
return self._initial_regime
@initial_regime.setter
def initial_regime(self, regime_name):
if regime_name is None:
# If regime not provided pick the regime with the most time derivs.
# this is a bit of a hack until the state-layer is implemented
regime_name = max(self.component_class.regimes,
key=lambda x: x.num_time_derivatives).name
elif regime_name not in self.component_class.regime_names:
raise NineMLUsageError(
"Specified initial regime, '{}', is not a name of a regime in "
"'{}' Dynamics class (available '{}')"
.format(regime_name, self.component_class.name,
"', '".join(self.component_class.regime_names)))
self._initial_regime = regime_name
def set(self, prop):
try:
super(DynamicsProperties, self).set(prop)
except NineMLNameError:
try:
state_variable = self.component_class.state_variable(prop.name)
except NineMLNameError:
raise NineMLNameError(
"'{}' Dynamics does not have a Parameter or StateVariable "
"named '{}'".format(self.component_class.name, prop.name))
if prop.units.dimension != state_variable.dimension:
raise NineMLUnitMismatchError(
"Dimensions for '{}' initial value ('{}') don't match that"
" of state variable in component class ('{}')."
.format(prop.name, prop.units.dimension.name,
state_variable.dimension.name))
self._initial_values[prop.name] = prop
@property
def initial_value_names(self):
if isinstance(self.definition, Prototype):
return (p.name for p in self.initial_values)
else:
return iter(self._initial_values.keys())
@property
def num_initial_values(self):
return len(list(self.initial_values))
@property
def attributes_with_units(self):
return chain(
super(DynamicsProperties, self).attributes_with_units,
self.initial_values, *[
v.value.distribution.properties for v in self.initial_values
if v.value.is_random()])
def elements(self, local=False):
"""
Overrides the elements method in ContainerObject base class to allow
for "local" kwarg to only iterate the members that are declared in
this instance (i.e. not the prototype)
"""
if local:
return chain(iter(self._properties.values()),
iter(self._initial_values.values()))
else:
return ContainerObject.elements(self)
def serialize_node(self, node, **options):
super(DynamicsProperties, self).serialize_node(node, **options)
node.children(iter(self._initial_values.values()), **options)
@classmethod
def unserialize_node(cls, node, **options):
name = node.attr('name', **options)
definition = node.child((Definition, Prototype), **options)
properties = node.children(Property, **options)
initial_values = node.children(Initial, **options)
return cls(name, definition, properties=properties,
initial_values=initial_values)
def serialize_node_v1(self, node, **options):
self.serialize_node(node, **options)
@classmethod
def unserialize_node_v1(cls, node, **options):
return cls.unserialize_node(node, **options)
def analog_receive_port(self, name):
return self.component_class.analog_receive_port(name)
@property
def analog_receive_ports(self):
return self.component_class.analog_receive_ports
@property
def analog_receive_port_names(self):
return self.component_class.analog_receive_port_names
@property
def num_analog_receive_ports(self):
return self.component_class.num_analog_receive_ports
def analog_send_port(self, name):
return self.component_class.analog_send_port(name)
@property
def analog_send_ports(self):
return self.component_class.analog_send_ports
@property
def analog_send_port_names(self):
return self.component_class.analog_send_port_names
@property
def num_analog_send_ports(self):
return self.component_class.num_analog_send_ports
def analog_reduce_port(self, name):
return self.component_class.analog_reduce_port(name)
@property
def analog_reduce_ports(self):
return self.component_class.analog_reduce_ports
@property
def analog_reduce_port_names(self):
return self.component_class.analog_reduce_port_names
@property
def num_analog_reduce_ports(self):
return self.component_class.num_analog_reduce_ports
def event_receive_port(self, name):
return self.component_class.event_receive_port(name)
@property
def event_receive_ports(self):
return self.component_class.event_receive_ports
@property
def event_receive_port_names(self):
return self.component_class.event_receive_port_names
@property
def num_event_receive_ports(self):
return self.component_class.num_event_receive_ports
def event_send_port(self, name):
return self.component_class.event_send_port(name)
@property
def event_send_ports(self):
return self.component_class.event_send_ports
@property
def event_send_port_names(self):
return self.component_class.event_send_port_names
@property
def num_event_send_ports(self):
return self.component_class.num_event_send_ports
|
|
"""
Test the 'register' command.
"""
from __future__ import print_function
import os
import sys
import time
import re
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class RegisterCommandsTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def setUp(self):
TestBase.setUp(self)
self.has_teardown = False
def tearDown(self):
self.dbg.GetSelectedTarget().GetProcess().Destroy()
TestBase.tearDown(self)
@skipIfiOSSimulator
@skipIf(archs=no_match(['amd64', 'arm', 'i386', 'x86_64']))
def test_register_commands(self):
"""Test commands related to registers, in particular vector registers."""
self.build()
self.common_setup()
# verify that logging does not assert
self.log_enable("registers")
self.expect("register read -a", MISSING_EXPECTED_REGISTERS,
substrs=['registers were unavailable'], matching=False)
if self.getArchitecture() in ['amd64', 'i386', 'x86_64']:
self.runCmd("register read xmm0")
self.runCmd("register read ymm15") # may be available
self.runCmd("register read bnd0") # may be available
elif self.getArchitecture() in ['arm', 'armv7', 'armv7k', 'arm64']:
self.runCmd("register read s0")
self.runCmd("register read q15") # may be available
self.expect(
"register read -s 4",
substrs=['invalid register set index: 4'],
error=True)
@skipIfiOSSimulator
# Writing of mxcsr register fails, presumably due to a kernel/hardware
# problem
@skipIfTargetAndroid(archs=["i386"])
@skipIf(archs=no_match(['amd64', 'arm', 'i386', 'x86_64']))
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr37995")
def test_fp_register_write(self):
"""Test commands that write to registers, in particular floating-point registers."""
self.build()
self.fp_register_write()
@skipIfiOSSimulator
# "register read fstat" always return 0xffff
@expectedFailureAndroid(archs=["i386"])
@skipIfFreeBSD # llvm.org/pr25057
@skipIf(archs=no_match(['amd64', 'i386', 'x86_64']))
@skipIfOutOfTreeDebugserver
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr37995")
def test_fp_special_purpose_register_read(self):
"""Test commands that read fpu special purpose registers."""
self.build()
self.fp_special_purpose_register_read()
@skipIfiOSSimulator
@skipIf(archs=no_match(['amd64', 'arm', 'i386', 'x86_64']))
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr37683")
def test_register_expressions(self):
"""Test expression evaluation with commands related to registers."""
self.build()
self.common_setup()
if self.getArchitecture() in ['amd64', 'i386', 'x86_64']:
gpr = "eax"
vector = "xmm0"
elif self.getArchitecture() in ['arm64', 'aarch64']:
gpr = "w0"
vector = "v0"
elif self.getArchitecture() in ['arm', 'armv7', 'armv7k']:
gpr = "r0"
vector = "q0"
self.expect("expr/x $%s" % gpr, substrs=['unsigned int', ' = 0x'])
self.expect("expr $%s" % vector, substrs=['vector_type'])
self.expect(
"expr (unsigned int)$%s[0]" %
vector, substrs=['unsigned int'])
if self.getArchitecture() in ['amd64', 'x86_64']:
self.expect(
"expr -- ($rax & 0xffffffff) == $eax",
substrs=['true'])
@skipIfiOSSimulator
@skipIf(archs=no_match(['amd64', 'x86_64']))
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr37683")
def test_convenience_registers(self):
"""Test convenience registers."""
self.build()
self.convenience_registers()
@skipIfiOSSimulator
@skipIf(archs=no_match(['amd64', 'x86_64']))
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr37683")
def test_convenience_registers_with_process_attach(self):
"""Test convenience registers after a 'process attach'."""
self.build()
self.convenience_registers_with_process_attach(test_16bit_regs=False)
@skipIfiOSSimulator
@skipIf(archs=no_match(['amd64', 'x86_64']))
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr37683")
def test_convenience_registers_16bit_with_process_attach(self):
"""Test convenience registers after a 'process attach'."""
self.build()
self.convenience_registers_with_process_attach(test_16bit_regs=True)
def common_setup(self):
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Break in main().
lldbutil.run_break_set_by_symbol(
self, "main", num_expected_locations=-1)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped', 'stop reason = breakpoint'])
# platform specific logging of the specified category
def log_enable(self, category):
# This intentionally checks the host platform rather than the target
# platform as logging is host side.
self.platform = ""
if (sys.platform.startswith("freebsd") or
sys.platform.startswith("linux") or
sys.platform.startswith("netbsd")):
self.platform = "posix"
if self.platform != "":
self.log_file = self.getBuildArtifact('TestRegisters.log')
self.runCmd(
"log enable " +
self.platform +
" " +
str(category) +
" registers -v -f " +
self.log_file,
RUN_SUCCEEDED)
if not self.has_teardown:
def remove_log(self):
if os.path.exists(self.log_file):
os.remove(self.log_file)
self.has_teardown = True
self.addTearDownHook(remove_log)
def write_and_read(self, frame, register, new_value, must_exist=True):
value = frame.FindValue(register, lldb.eValueTypeRegister)
if must_exist:
self.assertTrue(
value.IsValid(),
"finding a value for register " +
register)
elif not value.IsValid():
return # If register doesn't exist, skip this test
self.runCmd("register write " + register + " \'" + new_value + "\'")
self.expect(
"register read " +
register,
substrs=[
register +
' = ',
new_value])
def fp_special_purpose_register_read(self):
exe = self.getBuildArtifact("a.out")
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Launch the process and stop.
self.expect("run", PROCESS_STOPPED, substrs=['stopped'])
# Check stop reason; Should be either signal SIGTRAP or EXC_BREAKPOINT
output = self.res.GetOutput()
matched = False
substrs = [
'stop reason = EXC_BREAKPOINT',
'stop reason = signal SIGTRAP']
for str1 in substrs:
matched = output.find(str1) != -1
with recording(self, False) as sbuf:
print("%s sub string: %s" % ('Expecting', str1), file=sbuf)
print("Matched" if matched else "Not Matched", file=sbuf)
if matched:
break
self.assertTrue(matched, STOPPED_DUE_TO_SIGNAL)
process = target.GetProcess()
self.assertTrue(process.GetState() == lldb.eStateStopped,
PROCESS_STOPPED)
thread = process.GetThreadAtIndex(0)
self.assertTrue(thread.IsValid(), "current thread is valid")
currentFrame = thread.GetFrameAtIndex(0)
self.assertTrue(currentFrame.IsValid(), "current frame is valid")
# Extract the value of fstat and ftag flag at the point just before
# we start pushing floating point values on st% register stack
value = currentFrame.FindValue("fstat", lldb.eValueTypeRegister)
error = lldb.SBError()
reg_value_fstat_initial = value.GetValueAsUnsigned(error, 0)
self.assertTrue(error.Success(), "reading a value for fstat")
value = currentFrame.FindValue("ftag", lldb.eValueTypeRegister)
error = lldb.SBError()
reg_value_ftag_initial = value.GetValueAsUnsigned(error, 0)
self.assertTrue(error.Success(), "reading a value for ftag")
fstat_top_pointer_initial = (reg_value_fstat_initial & 0x3800) >> 11
# Execute 'si' aka 'thread step-inst' instruction 5 times and with
# every execution verify the value of fstat and ftag registers
for x in range(0, 5):
# step into the next instruction to push a value on 'st' register
# stack
self.runCmd("si", RUN_SUCCEEDED)
# Verify fstat and save it to be used for verification in next
# execution of 'si' command
if not (reg_value_fstat_initial & 0x3800):
self.expect("register read fstat", substrs=[
'fstat' + ' = ', str("0x%0.4x" % ((reg_value_fstat_initial & ~(0x3800)) | 0x3800))])
reg_value_fstat_initial = (
(reg_value_fstat_initial & ~(0x3800)) | 0x3800)
fstat_top_pointer_initial = 7
else:
self.expect("register read fstat", substrs=[
'fstat' + ' = ', str("0x%0.4x" % (reg_value_fstat_initial - 0x0800))])
reg_value_fstat_initial = (reg_value_fstat_initial - 0x0800)
fstat_top_pointer_initial -= 1
# Verify ftag and save it to be used for verification in next
# execution of 'si' command
self.expect(
"register read ftag", substrs=[
'ftag' + ' = ', str(
"0x%0.4x" %
(reg_value_ftag_initial | (
1 << fstat_top_pointer_initial)))])
reg_value_ftag_initial = reg_value_ftag_initial | (
1 << fstat_top_pointer_initial)
def fp_register_write(self):
exe = self.getBuildArtifact("a.out")
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Launch the process, stop at the entry point.
error = lldb.SBError()
process = target.Launch(
lldb.SBListener(),
None, None, # argv, envp
None, None, None, # stdin/out/err
self.get_process_working_directory(),
0, # launch flags
True, # stop at entry
error)
self.assertTrue(error.Success(), "Launch succeeds. Error is :" + str(error))
self.assertTrue(
process.GetState() == lldb.eStateStopped,
PROCESS_STOPPED)
thread = process.GetThreadAtIndex(0)
self.assertTrue(thread.IsValid(), "current thread is valid")
currentFrame = thread.GetFrameAtIndex(0)
self.assertTrue(currentFrame.IsValid(), "current frame is valid")
if self.getArchitecture() in ['amd64', 'i386', 'x86_64']:
reg_list = [
# reg value must-have
("fcw", "0x0000ff0e", False),
("fsw", "0x0000ff0e", False),
("ftw", "0x0000ff0e", False),
("ip", "0x0000ff0e", False),
("dp", "0x0000ff0e", False),
("mxcsr", "0x0000ff0e", False),
("mxcsrmask", "0x0000ff0e", False),
]
st0regname = None
if currentFrame.FindRegister("st0").IsValid():
st0regname = "st0"
elif currentFrame.FindRegister("stmm0").IsValid():
st0regname = "stmm0"
if st0regname is not None:
# reg value
# must-have
reg_list.append(
(st0regname, "{0x01 0x02 0x03 0x00 0x00 0x00 0x00 0x00 0x00 0x00}", True))
reg_list.append(
("xmm0",
"{0x01 0x02 0x03 0x00 0x00 0x00 0x00 0x00 0x09 0x0a 0x2f 0x2f 0x2f 0x2f 0x2f 0x2f}",
True))
reg_list.append(
("xmm15",
"{0x01 0x02 0x03 0x00 0x00 0x00 0x00 0x00 0x09 0x0a 0x2f 0x2f 0x2f 0x2f 0x0e 0x0f}",
False))
elif self.getArchitecture() in ['arm64', 'aarch64']:
reg_list = [
# reg value
# must-have
("fpsr", "0xfbf79f9f", True),
("s0", "1.25", True),
("s31", "0.75", True),
("d1", "123", True),
("d17", "987", False),
("v1", "{0x01 0x02 0x03 0x00 0x00 0x00 0x00 0x00 0x09 0x0a 0x2f 0x2f 0x2f 0x2f 0x2f 0x2f}", True),
("v14",
"{0x01 0x02 0x03 0x00 0x00 0x00 0x00 0x00 0x09 0x0a 0x2f 0x2f 0x2f 0x2f 0x0e 0x0f}",
False),
]
elif self.getArchitecture() in ['armv7'] and self.platformIsDarwin():
reg_list = [
# reg value
# must-have
("fpsr", "0xfbf79f9f", True),
("s0", "1.25", True),
("s31", "0.75", True),
("d1", "123", True),
("d17", "987", False),
("q1", "{0x01 0x02 0x03 0x00 0x00 0x00 0x00 0x00 0x09 0x0a 0x2f 0x2f 0x2f 0x2f 0x2f 0x2f}", True),
("q14",
"{0x01 0x02 0x03 0x00 0x00 0x00 0x00 0x00 0x09 0x0a 0x2f 0x2f 0x2f 0x2f 0x0e 0x0f}",
False),
]
elif self.getArchitecture() in ['arm', 'armv7k']:
reg_list = [
# reg value
# must-have
("fpscr", "0xfbf79f9f", True),
("s0", "1.25", True),
("s31", "0.75", True),
("d1", "123", True),
("d17", "987", False),
("q1", "{0x01 0x02 0x03 0x00 0x00 0x00 0x00 0x00 0x09 0x0a 0x2f 0x2f 0x2f 0x2f 0x2f 0x2f}", True),
("q14",
"{0x01 0x02 0x03 0x00 0x00 0x00 0x00 0x00 0x09 0x0a 0x2f 0x2f 0x2f 0x2f 0x0e 0x0f}",
False),
]
for (reg, val, must) in reg_list:
self.write_and_read(currentFrame, reg, val, must)
if self.getArchitecture() in ['amd64', 'i386', 'x86_64']:
if st0regname is None:
self.fail("st0regname could not be determined")
self.runCmd(
"register write " +
st0regname +
" \"{0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00}\"")
self.expect(
"register read " +
st0regname +
" --format f",
substrs=[
st0regname +
' = 0'])
has_avx = False
has_mpx = False
# Returns an SBValueList.
registerSets = currentFrame.GetRegisters()
for registerSet in registerSets:
if 'advanced vector extensions' in registerSet.GetName().lower():
has_avx = True
if 'memory protection extension' in registerSet.GetName().lower():
has_mpx = True
if has_avx:
new_value = "{0x01 0x02 0x03 0x00 0x00 0x00 0x00 0x00 0x09 0x0a 0x2f 0x2f 0x2f 0x2f 0x0e 0x0f 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x0c 0x0d 0x0e 0x0f}"
self.write_and_read(currentFrame, "ymm0", new_value)
self.write_and_read(currentFrame, "ymm7", new_value)
self.expect("expr $ymm0", substrs=['vector_type'])
else:
self.runCmd("register read ymm0")
if has_mpx:
# Test write and read for bnd0.
new_value_w = "{0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08 0x09 0x0a 0x0b 0x0c 0x0d 0x0e 0x0f 0x10}"
self.runCmd("register write bnd0 \'" + new_value_w + "\'")
new_value_r = "{0x0807060504030201 0x100f0e0d0c0b0a09}"
self.expect("register read bnd0", substrs = ['bnd0 = ', new_value_r])
self.expect("expr $bnd0", substrs = ['vector_type'])
# Test write and for bndstatus.
new_value = "{0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08}"
self.write_and_read(currentFrame, "bndstatus", new_value)
self.expect("expr $bndstatus", substrs = ['vector_type'])
else:
self.runCmd("register read bnd0")
def convenience_registers(self):
"""Test convenience registers."""
self.common_setup()
# The command "register read -a" does output a derived register like
# eax...
self.expect("register read -a", matching=True,
substrs=['eax'])
# ...however, the vanilla "register read" command should not output derived registers like eax.
self.expect("register read", matching=False,
substrs=['eax'])
# Test reading of rax and eax.
self.expect("register read rax eax",
substrs=['rax = 0x', 'eax = 0x'])
# Now write rax with a unique bit pattern and test that eax indeed
# represents the lower half of rax.
self.runCmd("register write rax 0x1234567887654321")
self.expect("register read rax 0x1234567887654321",
substrs=['0x1234567887654321'])
def convenience_registers_with_process_attach(self, test_16bit_regs):
"""Test convenience registers after a 'process attach'."""
exe = self.getBuildArtifact("a.out")
# Spawn a new process
pid = self.spawnSubprocess(exe, ['wait_for_attach']).pid
self.addTearDownHook(self.cleanupSubprocesses)
if self.TraceOn():
print("pid of spawned process: %d" % pid)
self.runCmd("process attach -p %d" % pid)
# Check that "register read eax" works.
self.runCmd("register read eax")
if self.getArchitecture() in ['amd64', 'x86_64']:
self.expect("expr -- ($rax & 0xffffffff) == $eax",
substrs=['true'])
if test_16bit_regs:
self.expect("expr -- $ax == (($ah << 8) | $al)",
substrs=['true'])
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import json
import httpretty
import six
from keystoneclient import exceptions
from keystoneclient.openstack.common import jsonutils
from keystoneclient.openstack.common import timeutils
from keystoneclient.tests.v2_0 import utils
from keystoneclient.v2_0 import client
class AuthenticateAgainstKeystoneTests(utils.TestCase):
def setUp(self):
super(AuthenticateAgainstKeystoneTests, self).setUp()
self.TEST_RESPONSE_DICT = {
"access": {
"token": {
"expires": "2020-01-01T00:00:10.000123Z",
"id": self.TEST_TOKEN,
"tenant": {
"id": self.TEST_TENANT_ID
},
},
"user": {
"id": self.TEST_USER
},
"serviceCatalog": self.TEST_SERVICE_CATALOG,
},
}
self.TEST_REQUEST_BODY = {
"auth": {
"passwordCredentials": {
"username": self.TEST_USER,
"password": self.TEST_TOKEN,
},
"tenantId": self.TEST_TENANT_ID,
},
}
@httpretty.activate
def test_authenticate_success_expired(self):
# Build an expired token
self.TEST_RESPONSE_DICT['access']['token']['expires'] = (
(timeutils.utcnow() - datetime.timedelta(1)).isoformat())
exp_resp = httpretty.Response(body=json.dumps(self.TEST_RESPONSE_DICT),
content_type='application/json')
# Build a new response
TEST_TOKEN = "abcdef"
self.TEST_RESPONSE_DICT['access']['token']['expires'] = (
'2020-01-01T00:00:10.000123Z')
self.TEST_RESPONSE_DICT['access']['token']['id'] = TEST_TOKEN
new_resp = httpretty.Response(body=json.dumps(self.TEST_RESPONSE_DICT),
content_type='application/json')
# return expired first, and then the new response
self.stub_auth(responses=[exp_resp, new_resp])
cs = client.Client(tenant_id=self.TEST_TENANT_ID,
auth_url=self.TEST_URL,
username=self.TEST_USER,
password=self.TEST_TOKEN)
self.assertEqual(cs.management_url,
self.TEST_RESPONSE_DICT["access"]["serviceCatalog"][3]
['endpoints'][0]["adminURL"])
self.assertEqual(cs.auth_token, TEST_TOKEN)
self.assertRequestBodyIs(json=self.TEST_REQUEST_BODY)
@httpretty.activate
def test_authenticate_failure(self):
_auth = 'auth'
_cred = 'passwordCredentials'
_pass = 'password'
self.TEST_REQUEST_BODY[_auth][_cred][_pass] = 'bad_key'
error = {"unauthorized": {"message": "Unauthorized",
"code": "401"}}
self.stub_auth(status=401, json=error)
# Workaround for issue with assertRaises on python2.6
# where with assertRaises(exceptions.Unauthorized): doesn't work
# right
def client_create_wrapper():
client.Client(username=self.TEST_USER,
password="bad_key",
tenant_id=self.TEST_TENANT_ID,
auth_url=self.TEST_URL)
self.assertRaises(exceptions.Unauthorized, client_create_wrapper)
self.assertRequestBodyIs(json=self.TEST_REQUEST_BODY)
@httpretty.activate
def test_auth_redirect(self):
self.stub_auth(status=305, body='Use Proxy',
location=self.TEST_ADMIN_URL + "/tokens")
self.stub_auth(base_url=self.TEST_ADMIN_URL,
json=self.TEST_RESPONSE_DICT)
cs = client.Client(username=self.TEST_USER,
password=self.TEST_TOKEN,
tenant_id=self.TEST_TENANT_ID,
auth_url=self.TEST_URL)
self.assertEqual(cs.management_url,
self.TEST_RESPONSE_DICT["access"]["serviceCatalog"][3]
['endpoints'][0]["adminURL"])
self.assertEqual(cs.auth_token,
self.TEST_RESPONSE_DICT["access"]["token"]["id"])
self.assertRequestBodyIs(json=self.TEST_REQUEST_BODY)
@httpretty.activate
def test_authenticate_success_password_scoped(self):
self.stub_auth(json=self.TEST_RESPONSE_DICT)
cs = client.Client(username=self.TEST_USER,
password=self.TEST_TOKEN,
tenant_id=self.TEST_TENANT_ID,
auth_url=self.TEST_URL)
self.assertEqual(cs.management_url,
self.TEST_RESPONSE_DICT["access"]["serviceCatalog"][3]
['endpoints'][0]["adminURL"])
self.assertEqual(cs.auth_token,
self.TEST_RESPONSE_DICT["access"]["token"]["id"])
self.assertRequestBodyIs(json=self.TEST_REQUEST_BODY)
@httpretty.activate
def test_authenticate_success_password_unscoped(self):
del self.TEST_RESPONSE_DICT['access']['serviceCatalog']
del self.TEST_REQUEST_BODY['auth']['tenantId']
self.stub_auth(json=self.TEST_RESPONSE_DICT)
cs = client.Client(username=self.TEST_USER,
password=self.TEST_TOKEN,
auth_url=self.TEST_URL)
self.assertEqual(cs.auth_token,
self.TEST_RESPONSE_DICT["access"]["token"]["id"])
self.assertFalse('serviceCatalog' in cs.service_catalog.catalog)
self.assertRequestBodyIs(json=self.TEST_REQUEST_BODY)
@httpretty.activate
def test_auth_url_token_authentication(self):
fake_token = 'fake_token'
fake_url = '/fake-url'
fake_resp = {'result': True}
self.stub_auth(json=self.TEST_RESPONSE_DICT)
self.stub_url('GET', [fake_url], json=fake_resp,
base_url=self.TEST_ADMIN_IDENTITY_ENDPOINT)
cl = client.Client(auth_url=self.TEST_URL,
token=fake_token)
body = httpretty.last_request().body
if six.PY3:
body = body.decode('utf-8')
body = jsonutils.loads(body)
self.assertEqual(body['auth']['token']['id'], fake_token)
resp, body = cl.get(fake_url)
self.assertEqual(fake_resp, body)
self.assertEqual(httpretty.last_request().headers.get('X-Auth-Token'),
self.TEST_TOKEN)
@httpretty.activate
def test_authenticate_success_token_scoped(self):
del self.TEST_REQUEST_BODY['auth']['passwordCredentials']
self.TEST_REQUEST_BODY['auth']['token'] = {'id': self.TEST_TOKEN}
self.stub_auth(json=self.TEST_RESPONSE_DICT)
cs = client.Client(token=self.TEST_TOKEN,
tenant_id=self.TEST_TENANT_ID,
auth_url=self.TEST_URL)
self.assertEqual(cs.management_url,
self.TEST_RESPONSE_DICT["access"]["serviceCatalog"][3]
['endpoints'][0]["adminURL"])
self.assertEqual(cs.auth_token,
self.TEST_RESPONSE_DICT["access"]["token"]["id"])
self.assertRequestBodyIs(json=self.TEST_REQUEST_BODY)
@httpretty.activate
def test_authenticate_success_token_scoped_trust(self):
del self.TEST_REQUEST_BODY['auth']['passwordCredentials']
self.TEST_REQUEST_BODY['auth']['token'] = {'id': self.TEST_TOKEN}
self.TEST_REQUEST_BODY['auth']['trust_id'] = self.TEST_TRUST_ID
response = self.TEST_RESPONSE_DICT.copy()
response['access']['trust'] = {"trustee_user_id": self.TEST_USER,
"id": self.TEST_TRUST_ID}
self.stub_auth(json=response)
cs = client.Client(token=self.TEST_TOKEN,
tenant_id=self.TEST_TENANT_ID,
trust_id=self.TEST_TRUST_ID,
auth_url=self.TEST_URL)
self.assertTrue(cs.auth_ref.trust_scoped)
self.assertEqual(cs.auth_ref.trust_id, self.TEST_TRUST_ID)
self.assertRequestBodyIs(json=self.TEST_REQUEST_BODY)
@httpretty.activate
def test_authenticate_success_token_unscoped(self):
del self.TEST_REQUEST_BODY['auth']['passwordCredentials']
del self.TEST_REQUEST_BODY['auth']['tenantId']
del self.TEST_RESPONSE_DICT['access']['serviceCatalog']
self.TEST_REQUEST_BODY['auth']['token'] = {'id': self.TEST_TOKEN}
self.stub_auth(json=self.TEST_RESPONSE_DICT)
cs = client.Client(token=self.TEST_TOKEN,
auth_url=self.TEST_URL)
self.assertEqual(cs.auth_token,
self.TEST_RESPONSE_DICT["access"]["token"]["id"])
self.assertFalse('serviceCatalog' in cs.service_catalog.catalog)
self.assertRequestBodyIs(json=self.TEST_REQUEST_BODY)
@httpretty.activate
def test_allow_override_of_auth_token(self):
fake_url = '/fake-url'
fake_token = 'fake_token'
fake_resp = {'result': True}
self.stub_auth(json=self.TEST_RESPONSE_DICT)
self.stub_url('GET', [fake_url], json=fake_resp,
base_url=self.TEST_ADMIN_IDENTITY_ENDPOINT)
cl = client.Client(username='exampleuser',
password='password',
tenant_name='exampleproject',
auth_url=self.TEST_URL)
self.assertEqual(cl.auth_token, self.TEST_TOKEN)
# the token returned from the authentication will be used
resp, body = cl.get(fake_url)
self.assertEqual(fake_resp, body)
self.assertEqual(httpretty.last_request().headers.get('X-Auth-Token'),
self.TEST_TOKEN)
# then override that token and the new token shall be used
cl.auth_token = fake_token
resp, body = cl.get(fake_url)
self.assertEqual(fake_resp, body)
self.assertEqual(httpretty.last_request().headers.get('X-Auth-Token'),
fake_token)
# if we clear that overridden token then we fall back to the original
del cl.auth_token
resp, body = cl.get(fake_url)
self.assertEqual(fake_resp, body)
self.assertEqual(httpretty.last_request().headers.get('X-Auth-Token'),
self.TEST_TOKEN)
|
|
"""The tests for the MQTT cover platform."""
import unittest
from homeassistant.setup import setup_component
from homeassistant.const import STATE_OPEN, STATE_CLOSED, STATE_UNKNOWN
import homeassistant.components.cover as cover
from homeassistant.components.cover.mqtt import MqttCover
from tests.common import (
get_test_home_assistant, mock_mqtt_component, fire_mqtt_message)
class TestCoverMQTT(unittest.TestCase):
"""Test the MQTT cover."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.mock_publish = mock_mqtt_component(self.hass)
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_state_via_state_topic(self):
"""Test the controlling state via topic."""
self.assertTrue(setup_component(self.hass, cover.DOMAIN, {
cover.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'qos': 0,
'payload_open': 'OPEN',
'payload_close': 'CLOSE',
'payload_stop': 'STOP'
}
}))
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_UNKNOWN, state.state)
fire_mqtt_message(self.hass, 'state-topic', '0')
self.hass.block_till_done()
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_CLOSED, state.state)
fire_mqtt_message(self.hass, 'state-topic', '50')
self.hass.block_till_done()
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_OPEN, state.state)
fire_mqtt_message(self.hass, 'state-topic', '100')
self.hass.block_till_done()
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_OPEN, state.state)
fire_mqtt_message(self.hass, 'state-topic', STATE_CLOSED)
self.hass.block_till_done()
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_CLOSED, state.state)
fire_mqtt_message(self.hass, 'state-topic', STATE_OPEN)
self.hass.block_till_done()
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_OPEN, state.state)
def test_state_via_template(self):
"""Test the controlling state via topic."""
self.assertTrue(setup_component(self.hass, cover.DOMAIN, {
cover.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'qos': 0,
'value_template': '{{ (value | multiply(0.01)) | int }}',
}
}))
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_UNKNOWN, state.state)
fire_mqtt_message(self.hass, 'state-topic', '10000')
self.hass.block_till_done()
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_OPEN, state.state)
fire_mqtt_message(self.hass, 'state-topic', '99')
self.hass.block_till_done()
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_CLOSED, state.state)
def test_optimistic_state_change(self):
"""Test changing state optimistically."""
self.assertTrue(setup_component(self.hass, cover.DOMAIN, {
cover.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'command_topic': 'command-topic',
'qos': 0,
}
}))
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_UNKNOWN, state.state)
cover.open_cover(self.hass, 'cover.test')
self.hass.block_till_done()
self.assertEqual(('command-topic', 'OPEN', 0, False),
self.mock_publish.mock_calls[-2][1])
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_OPEN, state.state)
cover.close_cover(self.hass, 'cover.test')
self.hass.block_till_done()
self.assertEqual(('command-topic', 'CLOSE', 0, False),
self.mock_publish.mock_calls[-2][1])
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_CLOSED, state.state)
def test_send_open_cover_command(self):
"""Test the sending of open_cover."""
self.assertTrue(setup_component(self.hass, cover.DOMAIN, {
cover.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'qos': 2
}
}))
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_UNKNOWN, state.state)
cover.open_cover(self.hass, 'cover.test')
self.hass.block_till_done()
self.assertEqual(('command-topic', 'OPEN', 2, False),
self.mock_publish.mock_calls[-2][1])
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_UNKNOWN, state.state)
def test_send_close_cover_command(self):
"""Test the sending of close_cover."""
self.assertTrue(setup_component(self.hass, cover.DOMAIN, {
cover.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'qos': 2
}
}))
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_UNKNOWN, state.state)
cover.close_cover(self.hass, 'cover.test')
self.hass.block_till_done()
self.assertEqual(('command-topic', 'CLOSE', 2, False),
self.mock_publish.mock_calls[-2][1])
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_UNKNOWN, state.state)
def test_send_stop__cover_command(self):
"""Test the sending of stop_cover."""
self.assertTrue(setup_component(self.hass, cover.DOMAIN, {
cover.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'qos': 2
}
}))
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_UNKNOWN, state.state)
cover.stop_cover(self.hass, 'cover.test')
self.hass.block_till_done()
self.assertEqual(('command-topic', 'STOP', 2, False),
self.mock_publish.mock_calls[-2][1])
state = self.hass.states.get('cover.test')
self.assertEqual(STATE_UNKNOWN, state.state)
def test_current_cover_position(self):
"""Test the current cover position."""
self.assertTrue(setup_component(self.hass, cover.DOMAIN, {
cover.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'payload_open': 'OPEN',
'payload_close': 'CLOSE',
'payload_stop': 'STOP'
}
}))
state_attributes_dict = self.hass.states.get(
'cover.test').attributes
self.assertFalse('current_position' in state_attributes_dict)
self.assertFalse('current_tilt_position' in state_attributes_dict)
fire_mqtt_message(self.hass, 'state-topic', '0')
self.hass.block_till_done()
current_cover_position = self.hass.states.get(
'cover.test').attributes['current_position']
self.assertEqual(0, current_cover_position)
fire_mqtt_message(self.hass, 'state-topic', '50')
self.hass.block_till_done()
current_cover_position = self.hass.states.get(
'cover.test').attributes['current_position']
self.assertEqual(50, current_cover_position)
fire_mqtt_message(self.hass, 'state-topic', '101')
self.hass.block_till_done()
current_cover_position = self.hass.states.get(
'cover.test').attributes['current_position']
self.assertEqual(50, current_cover_position)
fire_mqtt_message(self.hass, 'state-topic', 'non-numeric')
self.hass.block_till_done()
current_cover_position = self.hass.states.get(
'cover.test').attributes['current_position']
self.assertEqual(50, current_cover_position)
def test_tilt_defaults(self):
"""Test the defaults."""
self.assertTrue(setup_component(self.hass, cover.DOMAIN, {
cover.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'qos': 0,
'payload_open': 'OPEN',
'payload_close': 'CLOSE',
'payload_stop': 'STOP',
'tilt_command_topic': 'tilt-command',
'tilt_status_topic': 'tilt-status'
}
}))
state_attributes_dict = self.hass.states.get(
'cover.test').attributes
self.assertTrue('current_tilt_position' in state_attributes_dict)
current_cover_position = self.hass.states.get(
'cover.test').attributes['current_tilt_position']
self.assertEqual(STATE_UNKNOWN, current_cover_position)
def test_tilt_via_invocation_defaults(self):
"""Test tilt defaults on close/open."""
self.assertTrue(setup_component(self.hass, cover.DOMAIN, {
cover.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'qos': 0,
'payload_open': 'OPEN',
'payload_close': 'CLOSE',
'payload_stop': 'STOP',
'tilt_command_topic': 'tilt-command-topic',
'tilt_status_topic': 'tilt-status-topic'
}
}))
cover.open_cover_tilt(self.hass, 'cover.test')
self.hass.block_till_done()
self.assertEqual(('tilt-command-topic', 100, 0, False),
self.mock_publish.mock_calls[-2][1])
cover.close_cover_tilt(self.hass, 'cover.test')
self.hass.block_till_done()
self.assertEqual(('tilt-command-topic', 0, 0, False),
self.mock_publish.mock_calls[-2][1])
def test_tilt_given_value(self):
"""Test tilting to a given value."""
self.assertTrue(setup_component(self.hass, cover.DOMAIN, {
cover.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'qos': 0,
'payload_open': 'OPEN',
'payload_close': 'CLOSE',
'payload_stop': 'STOP',
'tilt_command_topic': 'tilt-command-topic',
'tilt_status_topic': 'tilt-status-topic',
'tilt_opened_value': 400,
'tilt_closed_value': 125
}
}))
cover.open_cover_tilt(self.hass, 'cover.test')
self.hass.block_till_done()
self.assertEqual(('tilt-command-topic', 400, 0, False),
self.mock_publish.mock_calls[-2][1])
cover.close_cover_tilt(self.hass, 'cover.test')
self.hass.block_till_done()
self.assertEqual(('tilt-command-topic', 125, 0, False),
self.mock_publish.mock_calls[-2][1])
def test_tilt_via_topic(self):
"""Test tilt by updating status via MQTT."""
self.assertTrue(setup_component(self.hass, cover.DOMAIN, {
cover.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'qos': 0,
'payload_open': 'OPEN',
'payload_close': 'CLOSE',
'payload_stop': 'STOP',
'tilt_command_topic': 'tilt-command-topic',
'tilt_status_topic': 'tilt-status-topic',
'tilt_opened_value': 400,
'tilt_closed_value': 125
}
}))
fire_mqtt_message(self.hass, 'tilt-status-topic', '0')
self.hass.block_till_done()
current_cover_tilt_position = self.hass.states.get(
'cover.test').attributes['current_tilt_position']
self.assertEqual(0, current_cover_tilt_position)
fire_mqtt_message(self.hass, 'tilt-status-topic', '50')
self.hass.block_till_done()
current_cover_tilt_position = self.hass.states.get(
'cover.test').attributes['current_tilt_position']
self.assertEqual(50, current_cover_tilt_position)
def test_tilt_via_topic_altered_range(self):
"""Test tilt status via MQTT with altered tilt range."""
self.assertTrue(setup_component(self.hass, cover.DOMAIN, {
cover.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'qos': 0,
'payload_open': 'OPEN',
'payload_close': 'CLOSE',
'payload_stop': 'STOP',
'tilt_command_topic': 'tilt-command-topic',
'tilt_status_topic': 'tilt-status-topic',
'tilt_opened_value': 400,
'tilt_closed_value': 125,
'tilt_min': 0,
'tilt_max': 50
}
}))
fire_mqtt_message(self.hass, 'tilt-status-topic', '0')
self.hass.block_till_done()
current_cover_tilt_position = self.hass.states.get(
'cover.test').attributes['current_tilt_position']
self.assertEqual(0, current_cover_tilt_position)
fire_mqtt_message(self.hass, 'tilt-status-topic', '50')
self.hass.block_till_done()
current_cover_tilt_position = self.hass.states.get(
'cover.test').attributes['current_tilt_position']
self.assertEqual(100, current_cover_tilt_position)
fire_mqtt_message(self.hass, 'tilt-status-topic', '25')
self.hass.block_till_done()
current_cover_tilt_position = self.hass.states.get(
'cover.test').attributes['current_tilt_position']
self.assertEqual(50, current_cover_tilt_position)
def test_tilt_position(self):
"""Test tilt via method invocation."""
self.assertTrue(setup_component(self.hass, cover.DOMAIN, {
cover.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'qos': 0,
'payload_open': 'OPEN',
'payload_close': 'CLOSE',
'payload_stop': 'STOP',
'tilt_command_topic': 'tilt-command-topic',
'tilt_status_topic': 'tilt-status-topic',
'tilt_opened_value': 400,
'tilt_closed_value': 125
}
}))
cover.set_cover_tilt_position(self.hass, 50, 'cover.test')
self.hass.block_till_done()
self.assertEqual(('tilt-command-topic', 50, 0, False),
self.mock_publish.mock_calls[-2][1])
def test_tilt_position_altered_range(self):
"""Test tilt via method invocation with altered range."""
self.assertTrue(setup_component(self.hass, cover.DOMAIN, {
cover.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'qos': 0,
'payload_open': 'OPEN',
'payload_close': 'CLOSE',
'payload_stop': 'STOP',
'tilt_command_topic': 'tilt-command-topic',
'tilt_status_topic': 'tilt-status-topic',
'tilt_opened_value': 400,
'tilt_closed_value': 125,
'tilt_min': 0,
'tilt_max': 50
}
}))
cover.set_cover_tilt_position(self.hass, 50, 'cover.test')
self.hass.block_till_done()
self.assertEqual(('tilt-command-topic', 25, 0, False),
self.mock_publish.mock_calls[-2][1])
def test_find_percentage_in_range_defaults(self):
"""Test find percentage in range with default range."""
mqtt_cover = MqttCover(
'cover.test', 'foo', 'bar', 'fooBar', "fooBarBaz", 0, False,
'OPEN', 'CLOSE', 'OPEN', 'CLOSE', 'STOP', False, None,
100, 0, 0, 100, False, False)
self.assertEqual(44, mqtt_cover.find_percentage_in_range(44))
def test_find_percentage_in_range_altered(self):
"""Test find percentage in range with altered range."""
mqtt_cover = MqttCover(
'cover.test', 'foo', 'bar', 'fooBar', "fooBarBaz", 0, False,
'OPEN', 'CLOSE', 'OPEN', 'CLOSE', 'STOP', False, None,
180, 80, 80, 180, False, False)
self.assertEqual(40, mqtt_cover.find_percentage_in_range(120))
def test_find_percentage_in_range_defaults_inverted(self):
"""Test find percentage in range with default range but inverted."""
mqtt_cover = MqttCover(
'cover.test', 'foo', 'bar', 'fooBar', "fooBarBaz", 0, False,
'OPEN', 'CLOSE', 'OPEN', 'CLOSE', 'STOP', False, None,
100, 0, 0, 100, False, True)
self.assertEqual(56, mqtt_cover.find_percentage_in_range(44))
def test_find_percentage_in_range_altered_inverted(self):
"""Test find percentage in range with altered range and inverted."""
mqtt_cover = MqttCover(
'cover.test', 'foo', 'bar', 'fooBar', "fooBarBaz", 0, False,
'OPEN', 'CLOSE', 'OPEN', 'CLOSE', 'STOP', False, None,
180, 80, 80, 180, False, True)
self.assertEqual(60, mqtt_cover.find_percentage_in_range(120))
def test_find_in_range_defaults(self):
"""Test find in range with default range."""
mqtt_cover = MqttCover(
'cover.test', 'foo', 'bar', 'fooBar', "fooBarBaz", 0, False,
'OPEN', 'CLOSE', 'OPEN', 'CLOSE', 'STOP', False, None,
100, 0, 0, 100, False, False)
self.assertEqual(44, mqtt_cover.find_in_range_from_percent(44))
def test_find_in_range_altered(self):
"""Test find in range with altered range."""
mqtt_cover = MqttCover(
'cover.test', 'foo', 'bar', 'fooBar', "fooBarBaz", 0, False,
'OPEN', 'CLOSE', 'OPEN', 'CLOSE', 'STOP', False, None,
180, 80, 80, 180, False, False)
self.assertEqual(120, mqtt_cover.find_in_range_from_percent(40))
def test_find_in_range_defaults_inverted(self):
"""Test find in range with default range but inverted."""
mqtt_cover = MqttCover(
'cover.test', 'foo', 'bar', 'fooBar', "fooBarBaz", 0, False,
'OPEN', 'CLOSE', 'OPEN', 'CLOSE', 'STOP', False, None,
100, 0, 0, 100, False, True)
self.assertEqual(44, mqtt_cover.find_in_range_from_percent(56))
def test_find_in_range_altered_inverted(self):
"""Test find in range with altered range and inverted."""
mqtt_cover = MqttCover(
'cover.test', 'foo', 'bar', 'fooBar', "fooBarBaz", 0, False,
'OPEN', 'CLOSE', 'OPEN', 'CLOSE', 'STOP', False, None,
180, 80, 80, 180, False, True)
self.assertEqual(120, mqtt_cover.find_in_range_from_percent(60))
|
|
# -*- coding: utf-8 -*-
import os
import warnings
import django
from django.utils import six
from django.db.models import fields as django_fields
from django.core.management.base import BaseCommand, CommandError
from django.db import models
from django.apps import apps
from collections import OrderedDict
class C(object):
def __init__(self, v, start=None, end=None):
self.v = v
if start is None:
if "\n" in self.v:
self.start = '"""'
else:
self.start = '"'
else:
self.start = start
if end is None:
if "\n" in self.v:
self.end = '"""'
else:
self.end = '"'
else:
self.end = end
def __str__(self):
return '{}{}{}'.format(self.start, self.v, self.end)
class CallCallable(object):
def __init__(self, name, *args, **kwargs):
self.name = name
self.args = list(args) # type: list
self.kwargs = kwargs # type: dict
def __str__(self):
assert isinstance(self.name, str)
paras = []
for arg in self.args:
paras.append(str(arg))
for k, v in self.kwargs.items():
paras.append("{}={}".format(k, str(v)))
return "{}({})".format(self.name, ", ".join(paras))
class BindName(object):
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
if isinstance(self.name, str):
return "{} = {}".format(self.name, str(self.value))
else:
return "{} = {}".format(", ".join(self.name), str(self.value))
class TransferField(object):
def __init__(self, field):
assert isinstance(field, models.Field)
self.field = field
self.attrname, self.col_name = field.get_attname_column()
self.bn = lambda v: BindName(self.field.get_attname(), v)
def trans_col_name(self, field):
attname, col_name = field.get_attname_column()
if col_name is not None and attname != col_name:
return col_name
def trans_related(self, field):
if isinstance(field, models.OneToOneField):
print(field.rel.to._meta.object_name, field.rel.related_name, field.rel.related_query_name)
warnings.warn(field)
elif isinstance(field, models.ForeignKey):
# print(field.rel.to._meta.object_name, field.rel.related_name, field.rel.related_query_name)
rel_field_name = self.attrname[:-3] if self.attrname.endswith("_id") else self.attrname
rel_field_id = "{}_id".format(rel_field_name)
rel_field_attr_name, rel_field_db_name = field.rel.to._meta.pk.get_attname_column()
yield BindName(rel_field_id, (CallCallable(
"Column",
"Interger",
CallCallable(
"ForeignKey",
C("{}.{}".format(field.rel.to._meta.db_table, rel_field_db_name))
)
)))
yield BindName(rel_field_name, CallCallable(
"relationship",
'"{}"'.format(field.rel.to._meta.object_name),
back_populates=C("{}_set".format(rel_field_name))
))
elif isinstance(field, models.ManyToManyField):
warnings.warn(field)
def trans_char(self, field):
kwargs = self.trans_args(field)
if field.max_length is not None:
sa_type = CallCallable("String", field.max_length)
else:
sa_type = "String"
return CallCallable("Column", sa_type, **kwargs)
def trans_int(self, field):
return CallCallable("Column", "Integer", **self.trans_args(field))
def trans_args(self, field):
kwargs = {}
if field.null is not None:
kwargs["nullable"] = field.null
if field.primary_key:
kwargs["primary_key"] = field.primary_key
if field.default is not django_fields.NOT_PROVIDED:
kwargs["default"] = field.default
return kwargs
def iter_trans(self):
bn = self.bn
field =self.field
cls = self.field.__class__
if cls in (
models.ManyToManyField,
models.OneToOneField,
models.ForeignKey):
for code in self.trans_related(field):
yield code
elif cls in (
models.CharField,
models.URLField,
models.EmailField,
models.SlugField):
yield bn(self.trans_char(field))
elif cls in (
models.IntegerField,
models.BigIntegerField,
models.SmallIntegerField,
models.PositiveSmallIntegerField,
models.PositiveIntegerField):
yield bn(self.trans_int(field))
elif cls in (models.FileField, models.FilePathField):
yield bn(self.trans_char(field))
elif cls == models.AutoField:
yield bn(CallCallable("Column", "Integer", **self.trans_args(field)))
elif cls == models.BooleanField:
yield bn(CallCallable("Column", "Boolean", **self.trans_args(field)))
elif cls == models.TextField:
yield bn(CallCallable("Column", "Text", **self.trans_args(field)))
elif cls == models.DateTimeField:
code = CallCallable("Column", "Datetime", **self.trans_args(field))
elif cls == models.TimeField:
yield bn(CallCallable("Column", "Time", **self.trans_args(field)))
elif cls == models.DateField:
yield bn(CallCallable("Column", "Data", **self.trans_args(field)))
else:
warnings.warn(self.field)
class Command(BaseCommand):
_function_list = None # save callbeck function
BASE_CLASS_NAME = "base"
if django.VERSION[1] == 7:
from optparse import make_option
def _opt_callback(option, opt, value, parser, *args, **kwargs):
if not hasattr(parser.values, 'app'):
parser.values._update_loose({
"app": []
})
if isinstance(parser.values.app, list):
parser.values.app.append(value)
else:
parser.values._update_loose({
"app": [value]
})
option_list = BaseCommand.option_list + (
make_option(
'-p', '--path',
type=str,
nargs=1,
default=os.path.abspath(os.path.join(os.path.curdir, "sqlalchemy")),
help='set out put directory',
),
make_option(
'-a', '--app',
action="callback",
callback=_opt_callback,
default=[],
type=str,
help='select export app'
)
)
else:
def add_arguments(self, parser):
parser.add_argument(
'path',
type=str,
nargs='?',
default=os.path.abspath(os.path.join(os.path.curdir, "sqlalchemy"))
)
parser.add_argument('-a', '--app', nargs='*', type=str)
help = 'Closes the specified poll for voting'
def handle(self, *args, **options):
if not options['app']: options['app'] = None # django 1.7 optarg don't have arguments app is empty list
self._function_list = []
self._many_to_many_tmp = []
self.load_models(options["app"])
@staticmethod
def app_str_to_app_config(app_id): # type: django.apps.config.AppConfig
"""
:param app_id: app label or app module __name__
:return: AppConfig
:rtype: django.apps.config.AppConfig
"""
if app_id in apps.app_configs:
return apps.app_configs[app_id]
else:
raise CommandError("can't find app `{}`".format(app_id))
def iter_models(self, apps=None):
if apps is None:
if django.VERSION[1] > 7:
from django.apps import apps as django_apps
iter_models = django_apps.get_models()
else:
from django.db import models
# include_auto_created parameter ensures that through tables implicitly
# created by ManyToManyFields will be retrieved as well.
iter_models = models.get_models(include_auto_created=True)
for model in iter_models:
yield model
else:
for app in apps:
for model in self.app_str_to_app_config(app).get_models():
yield model
def iter_fields(self, model):
fields_iter = iter(model._meta.concrete_fields)
for field in fields_iter:
yield field
def load_models(self, apps=None):
res = OrderedDict()
code_struct = []
for model in self.iter_models(apps):
code_struct.extend(self.model_to_sqlalchemy_class_code(model))
code_struct.extend(['', ''])
print ("\n".join(self.iter_format_code(code_struct)))
return ("\n".join(self.iter_format_code(code_struct)))
def field_to_sqlalchemy_class_code(self, field):
assert isinstance(field, models.Field)
tf = TransferField(field)
for code in tf.iter_trans():
yield code
def model_to_sqlalchemy_class_code(self, model):
sa_block = "class {class_name}({base_name}):".format(
class_name=model._meta.object_name,
base_name=self.BASE_CLASS_NAME
)
sa_fields = [
'"""',
'Auto transfer from django app `{}` by django_make_sqlalchemy'.format(model._meta.app_label),
models.__doc__ if models.__doc__ else "",
'"""',
'__tablename__ = "{table_name}"'.format(table_name=model._meta.db_table),
]
for field in self.iter_fields(model):
for code in self.field_to_sqlalchemy_class_code(field):
sa_fields.append(code)
return [sa_block, sa_fields]
def iter_format_code(self, code, level=0):
for subcode in code:
if isinstance(subcode, list):
yield "\n".join(self.iter_format_code(subcode, level+1))
else:
indent = " " * level
if subcode:
yield ("{}{}".format(indent, subcode))
else:
yield ("")
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import functools
import json
import os
import random
import unittest
import warnings
from monty.json import MontyDecoder
from monty.os.path import which
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import Element
from pymatgen.core.sites import PeriodicSite
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.symmetry.structure import SymmetrizedStructure
from pymatgen.transformations.standard_transformations import *
from pymatgen.util.testing import PymatgenTest
enumlib_present = which("enum.x") and which("makestr.x")
class RotationTransformationsTest(unittest.TestCase):
def setUp(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
self.struct = Structure(lattice, ["Si"] * 2, coords)
def test_as_from_dict(self):
t = RotationTransformation([0, 1, 0], 30, False)
d = t.as_dict()
self.assertEqual(type(RotationTransformation.from_dict(d)), RotationTransformation)
def test_rotation_transformation(self):
t = RotationTransformation([0, 1, 0], 30, False)
s2 = t.apply_transformation(self.struct)
s1 = t.inverse.apply_transformation(s2)
self.assertTrue((abs(s1.lattice.matrix - self.struct.lattice.matrix) < 1e-8).all())
class RemoveSpeciesTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
t = RemoveSpeciesTransformation(["Li+"])
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.75, 0.75])
coords.append([0.5, 0.5, 0.5])
coords.append([0.25, 0.25, 0.25])
lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
struct = Structure(lattice, ["Li+", "Li+", "O2-", "O2-"], coords)
s = t.apply_transformation(struct)
self.assertEqual(s.composition.formula, "O2")
d = t.as_dict()
self.assertEqual(type(RemoveSpeciesTransformation.from_dict(d)), RemoveSpeciesTransformation)
class SubstitutionTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
t = SubstitutionTransformation({"Li+": "Na+", "O2-": "S2-"})
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.75, 0.75])
coords.append([0.5, 0.5, 0.5])
coords.append([0.25, 0.25, 0.25])
lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
struct = Structure(lattice, ["Li+", "Li+", "O2-", "O2-"], coords)
s = t.apply_transformation(struct)
self.assertEqual(s.composition.formula, "Na2 S2")
def test_fractional_substitution(self):
t = SubstitutionTransformation({"Li+": "Na+", "O2-": {"S2-": 0.5, "Se2-": 0.5}})
# test the to and from dict on the nested dictionary
t = SubstitutionTransformation.from_dict(t.as_dict())
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.75, 0.75])
coords.append([0.5, 0.5, 0.5])
coords.append([0.25, 0.25, 0.25])
lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
struct = Structure(lattice, ["Li+", "Li+", "O2-", "O2-"], coords)
s = t.apply_transformation(struct)
self.assertEqual(s.composition.formula, "Na2 Se1 S1")
class SupercellTransformationTest(unittest.TestCase):
def setUp(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.75, 0.75])
coords.append([0.5, 0.5, 0.5])
coords.append([0.25, 0.25, 0.25])
lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
self.struct = Structure(lattice, ["Li+", "Li+", "O2-", "O2-"], coords)
def test_apply_transformation(self):
t = SupercellTransformation([[2, 1, 0], [0, 2, 0], [1, 0, 2]])
s = t.apply_transformation(self.struct)
self.assertEqual(s.composition.formula, "Li16 O16")
def test_from_scaling_factors(self):
scale_factors = [random.randint(1, 5) for i in range(3)]
t = SupercellTransformation.from_scaling_factors(*scale_factors)
s = t.apply_transformation(self.struct)
self.assertEqual(s.num_sites, 4 * functools.reduce(lambda a, b: a * b, scale_factors))
class OxidationStateDecorationTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
t = OxidationStateDecorationTransformation({"Li": 1, "O": -2})
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.75, 0.75])
coords.append([0.5, 0.5, 0.5])
coords.append([0.25, 0.25, 0.25])
lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
struct = Structure(lattice, ["Li", "Li", "O", "O"], coords)
s = t.apply_transformation(struct)
self.assertEqual(s[0].species_string, "Li+")
self.assertEqual(s[2].species_string, "O2-")
d = t.as_dict()
self.assertEqual(
type(OxidationStateDecorationTransformation.from_dict(d)),
OxidationStateDecorationTransformation,
)
class AutoOxiStateDecorationTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
p = Poscar.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR.LiFePO4"), check_for_POTCAR=False)
t = AutoOxiStateDecorationTransformation()
s = t.apply_transformation(p.structure)
expected_oxi = {"Li": 1, "P": 5, "O": -2, "Fe": 2}
for site in s:
self.assertEqual(site.specie.oxi_state, expected_oxi[site.specie.symbol])
def test_as_from_dict(self):
t = AutoOxiStateDecorationTransformation()
d = t.as_dict()
t = AutoOxiStateDecorationTransformation.from_dict(d)
self.assertEqual(t.analyzer.dist_scale_factor, 1.015)
class OxidationStateRemovalTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
t = OxidationStateRemovalTransformation()
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.75, 0.75])
coords.append([0.5, 0.5, 0.5])
coords.append([0.25, 0.25, 0.25])
lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
struct = Structure(lattice, ["Li+", "Li+", "O2-", "O2-"], coords)
s = t.apply_transformation(struct)
self.assertEqual(s[0].species_string, "Li")
self.assertEqual(s[2].species_string, "O")
d = t.as_dict()
self.assertEqual(
type(OxidationStateRemovalTransformation.from_dict(d)),
OxidationStateRemovalTransformation,
)
@unittest.skipIf(not enumlib_present, "enum_lib not present.")
class PartialRemoveSpecieTransformationTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_apply_transformation(self):
t = PartialRemoveSpecieTransformation("Li+", 1.0 / 3, 3)
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.75, 0.75])
coords.append([0.5, 0.5, 0.5])
coords.append([0.25, 0.25, 0.25])
lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
struct = Structure(lattice, ["Li+", "Li+", "Li+", "O2-"], coords)
self.assertEqual(len(t.apply_transformation(struct, 100)), 2)
d = t.as_dict()
self.assertEqual(
type(PartialRemoveSpecieTransformation.from_dict(d)),
PartialRemoveSpecieTransformation,
)
def test_apply_transformation_fast(self):
t = PartialRemoveSpecieTransformation("Li+", 0.5)
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.75, 0.75])
coords.append([0.5, 0.5, 0.5])
coords.append([0.25, 0.25, 0.25])
coords.append([0.1, 0.1, 0.1])
coords.append([0.3, 0.75, 0.3])
lattice = Lattice([[10, 0.00, 0.00], [0, 10, 0.00], [0.00, 0, 10]])
struct = Structure(lattice, ["Li+"] * 6, coords)
fast_opt_s = t.apply_transformation(struct)
t = PartialRemoveSpecieTransformation("Li+", 0.5, PartialRemoveSpecieTransformation.ALGO_COMPLETE)
slow_opt_s = t.apply_transformation(struct)
self.assertAlmostEqual(
EwaldSummation(fast_opt_s).total_energy,
EwaldSummation(slow_opt_s).total_energy,
4,
)
self.assertEqual(fast_opt_s, slow_opt_s)
def test_apply_transformations_complete_ranking(self):
p = Poscar.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR.LiFePO4"), check_for_POTCAR=False)
t1 = OxidationStateDecorationTransformation({"Li": 1, "Fe": 2, "P": 5, "O": -2})
s = t1.apply_transformation(p.structure)
t = PartialRemoveSpecieTransformation("Li+", 0.5, PartialRemoveSpecieTransformation.ALGO_COMPLETE)
self.assertEqual(len(t.apply_transformation(s, 10)), 6)
def test_apply_transformations_best_first(self):
p = Poscar.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR.LiFePO4"), check_for_POTCAR=False)
t1 = OxidationStateDecorationTransformation({"Li": 1, "Fe": 2, "P": 5, "O": -2})
s = t1.apply_transformation(p.structure)
t = PartialRemoveSpecieTransformation("Li+", 0.5, PartialRemoveSpecieTransformation.ALGO_BEST_FIRST)
self.assertEqual(len(t.apply_transformation(s)), 26)
class OrderDisorderedStructureTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
t = OrderDisorderedStructureTransformation()
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.75, 0.75])
coords.append([0.5, 0.5, 0.5])
coords.append([0.25, 0.25, 0.25])
lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
struct = Structure(
lattice,
[
{"Si4+": 0.5, "O2-": 0.25, "P5+": 0.25},
{"Si4+": 0.5, "O2-": 0.25, "P5+": 0.25},
{"Si4+": 0.5, "O2-": 0.25, "P5+": 0.25},
{"Si4+": 0.5, "O2-": 0.25, "P5+": 0.25},
],
coords,
)
output = t.apply_transformation(struct, return_ranked_list=50)
self.assertEqual(len(output), 12)
self.assertIsInstance(output[0]["structure"], Structure)
struct = Structure(
lattice,
[
{"Si4+": 0.5},
{"Si4+": 0.5},
{"P5+": 0.5, "O2-": 0.5},
{"P5+": 0.5, "O2-": 0.5},
],
coords,
)
output = t.apply_transformation(struct, return_ranked_list=50)
self.assertIsInstance(output, list)
self.assertEqual(len(output), 4)
self.assertEqual(t.lowest_energy_structure, output[0]["structure"])
struct = Structure(lattice, [{"Si4+": 0.5}, {"Si4+": 0.5}, {"O2-": 0.5}, {"O2-": 0.5}], coords)
allstructs = t.apply_transformation(struct, 50)
self.assertEqual(len(allstructs), 4)
struct = Structure(lattice, [{"Si4+": 0.333}, {"Si4+": 0.333}, {"Si4+": 0.333}, "O2-"], coords)
allstructs = t.apply_transformation(struct, 50)
self.assertEqual(len(allstructs), 3)
d = t.as_dict()
self.assertEqual(
type(OrderDisorderedStructureTransformation.from_dict(d)),
OrderDisorderedStructureTransformation,
)
def test_no_oxidation(self):
specie = {"Cu1+": 0.5, "Au2+": 0.5}
cuau = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3.677), [specie], [[0, 0, 0]])
trans = OrderDisorderedStructureTransformation()
ss = trans.apply_transformation(cuau, return_ranked_list=100)
self.assertEqual(ss[0]["structure"].composition["Cu+"], 2)
trans = OrderDisorderedStructureTransformation(no_oxi_states=True)
ss = trans.apply_transformation(cuau, return_ranked_list=100)
self.assertEqual(ss[0]["structure"].composition["Cu+"], 0)
self.assertEqual(ss[0]["structure"].composition["Cu"], 2)
def test_symmetrized_structure(self):
t = OrderDisorderedStructureTransformation(symmetrized_structures=True)
c = []
sp = []
c.append([0.5, 0.5, 0.5])
sp.append("Si4+")
c.append([0.45, 0.45, 0.45])
sp.append({"Si4+": 0.5})
c.append([0.56, 0.56, 0.56])
sp.append({"Si4+": 0.5})
c.append([0.25, 0.75, 0.75])
sp.append({"Si4+": 0.5})
c.append([0.75, 0.25, 0.25])
sp.append({"Si4+": 0.5})
l = Lattice.cubic(5)
s = Structure(l, sp, c)
test_site = PeriodicSite("Si4+", c[2], l)
s = SymmetrizedStructure(s, "not_real", [0, 1, 1, 2, 2], ["a", "b", "b", "c", "c"])
output = t.apply_transformation(s)
self.assertTrue(test_site in output.sites)
def test_too_small_cell(self):
t = OrderDisorderedStructureTransformation()
coords = list()
coords.append([0.5, 0.5, 0.5])
lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
struct = Structure(lattice, [{"X4+": 0.33, "O2-": 0.33, "P5+": 0.33}], coords)
self.assertRaises(ValueError, t.apply_transformation, struct)
def test_best_first(self):
t = OrderDisorderedStructureTransformation(algo=2)
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.75, 0.75])
coords.append([0.5, 0.5, 0.5])
coords.append([0.25, 0.25, 0.25])
lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
struct = Structure(
lattice,
[
{"Si4+": 0.5, "O2-": 0.25, "P5+": 0.25},
{"Si4+": 0.5, "O2-": 0.25, "P5+": 0.25},
{"Si4+": 0.5, "O2-": 0.25, "P5+": 0.25},
{"Si4+": 0.5, "O2-": 0.25, "P5+": 0.25},
],
coords,
)
output = t.apply_transformation(struct, return_ranked_list=3)
self.assertAlmostEqual(output[0]["energy"], -234.57813667648315, 4)
class PrimitiveCellTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
t = PrimitiveCellTransformation()
coords = list()
coords.append([0, 0, 0])
coords.append([0.375, 0.375, 0.375])
coords.append([0.5, 0.5, 0.5])
coords.append([0.875, 0.875, 0.875])
coords.append([0.125, 0.125, 0.125])
coords.append([0.25, 0.25, 0.25])
coords.append([0.625, 0.625, 0.625])
coords.append([0.75, 0.75, 0.75])
lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
struct = Structure(lattice, ["Li+", "Li+", "Li+", "Li+", "O2-", "O2-", "O2-", "O2-"], coords)
s = t.apply_transformation(struct)
self.assertEqual(len(s), 4)
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "TiO2_super.json")) as f:
s = json.load(f, cls=MontyDecoder)
prim = t.apply_transformation(s)
self.assertEqual(prim.formula, "Ti4 O8")
d = t.as_dict()
self.assertEqual(type(PrimitiveCellTransformation.from_dict(d)), PrimitiveCellTransformation)
class ConventionalCellTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
t = ConventionalCellTransformation()
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.75, 0.75])
coords.append([0.5, 0.5, 0.5])
coords.append([0.25, 0.25, 0.25])
lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
struct = Structure(lattice, ["Li+", "Li+", "O2-", "O2-"], coords)
conventional_struct = t.apply_transformation(struct)
self.assertEqual(conventional_struct.lattice.alpha, 90)
class PerturbStructureTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
t = PerturbStructureTransformation(0.05)
coords = list()
coords.append([0, 0, 0])
coords.append([0.375, 0.375, 0.375])
coords.append([0.5, 0.5, 0.5])
coords.append([0.875, 0.875, 0.875])
coords.append([0.125, 0.125, 0.125])
coords.append([0.25, 0.25, 0.25])
coords.append([0.625, 0.625, 0.625])
coords.append([0.75, 0.75, 0.75])
lattice = [
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
struct = Structure(lattice, ["Li+", "Li+", "Li+", "Li+", "O2-", "O2-", "O2-", "O2-"], coords)
transformed_s = t.apply_transformation(struct)
for i, site in enumerate(transformed_s):
self.assertAlmostEqual(site.distance(struct[i]), 0.05)
d = t.as_dict()
self.assertEqual(
type(PerturbStructureTransformation.from_dict(d)),
PerturbStructureTransformation,
)
t2 = PerturbStructureTransformation(0.05, 0)
transformed_s2 = t2.apply_transformation(struct)
for i, site in enumerate(transformed_s2):
self.assertLessEqual(site.distance(struct[i]), 0.05)
self.assertGreaterEqual(site.distance(struct[i]), 0)
d = t2.as_dict()
self.assertEqual(
type(PerturbStructureTransformation.from_dict(d)),
PerturbStructureTransformation,
)
class DeformStructureTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
t = DeformStructureTransformation([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.05, 1.0]])
coords = list()
coords.append([0, 0, 0])
coords.append([0.375, 0.375, 0.375])
coords.append([0.5, 0.5, 0.5])
coords.append([0.875, 0.875, 0.875])
coords.append([0.125, 0.125, 0.125])
coords.append([0.25, 0.25, 0.25])
coords.append([0.625, 0.625, 0.625])
coords.append([0.75, 0.75, 0.75])
lattice = [
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
struct = Structure(lattice, ["Li+", "Li+", "Li+", "Li+", "O2-", "O2-", "O2-", "O2-"], coords)
transformed_s = t.apply_transformation(struct)
self.assertAlmostEqual(transformed_s.lattice.a, 3.84019793)
self.assertAlmostEqual(transformed_s.lattice.b, 3.84379750)
self.assertAlmostEqual(transformed_s.lattice.c, 3.75022981)
d = json.loads(json.dumps(t.as_dict()))
self.assertEqual(
type(DeformStructureTransformation.from_dict(d)),
DeformStructureTransformation,
)
class DiscretizeOccupanciesTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
l = Lattice.cubic(4)
s_orig = Structure(
l,
[{"Li": 0.19, "Na": 0.19, "K": 0.62}, {"O": 1}],
[[0, 0, 0], [0.5, 0.5, 0.5]],
)
dot = DiscretizeOccupanciesTransformation(max_denominator=5, tol=0.5)
s = dot.apply_transformation(s_orig)
self.assertEqual(
dict(s[0].species),
{Element("Li"): 0.2, Element("Na"): 0.2, Element("K"): 0.6},
)
dot = DiscretizeOccupanciesTransformation(max_denominator=5, tol=0.01)
self.assertRaises(RuntimeError, dot.apply_transformation, s_orig)
s_orig_2 = Structure(
l,
[{"Li": 0.5, "Na": 0.25, "K": 0.25}, {"O": 1}],
[[0, 0, 0], [0.5, 0.5, 0.5]],
)
dot = DiscretizeOccupanciesTransformation(max_denominator=9, tol=0.25, fix_denominator=False)
s = dot.apply_transformation(s_orig_2)
self.assertEqual(
dict(s[0].species),
{
Element("Li"): Fraction(1 / 2),
Element("Na"): Fraction(1 / 4),
Element("K"): Fraction(1 / 4),
},
)
dot = DiscretizeOccupanciesTransformation(max_denominator=9, tol=0.05, fix_denominator=True)
self.assertRaises(RuntimeError, dot.apply_transformation, s_orig_2)
class ChargedCellTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
l = Lattice.cubic(4)
s_orig = Structure(
l,
[{"Li": 0.19, "Na": 0.19, "K": 0.62}, {"O": 1}],
[[0, 0, 0], [0.5, 0.5, 0.5]],
)
cct = ChargedCellTransformation(charge=3)
s = cct.apply_transformation(s_orig)
self.assertEqual(s.charge, 3)
class ScaleToRelaxedTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
# Test on slab relaxation where volume is fixed
f = os.path.join(PymatgenTest.TEST_FILES_DIR, "surface_tests")
Cu_fin = Structure.from_file(os.path.join(f, "Cu_slab_fin.cif"))
Cu_init = Structure.from_file(os.path.join(f, "Cu_slab_init.cif"))
slab_scaling = ScaleToRelaxedTransformation(Cu_init, Cu_fin)
Au_init = Structure.from_file(os.path.join(f, "Au_slab_init.cif"))
Au_fin = slab_scaling.apply_transformation(Au_init)
self.assertAlmostEqual(Au_fin.lattice.volume, Au_init.lattice.volume)
# Test on gb relaxation
f = os.path.join(PymatgenTest.TEST_FILES_DIR, "grain_boundary")
Be_fin = Structure.from_file(os.path.join(f, "Be_gb_fin.cif"))
Be_init = Structure.from_file(os.path.join(f, "Be_gb_init.cif"))
Zn_init = Structure.from_file(os.path.join(f, "Zn_gb_init.cif"))
gb_scaling = ScaleToRelaxedTransformation(Be_init, Be_fin)
Zn_fin = gb_scaling.apply_transformation(Zn_init)
self.assertTrue(all([site.species_string == "Zn" for site in Zn_fin]))
self.assertEqual(Be_init.lattice.a < Be_fin.lattice.a, Zn_init.lattice.a < Zn_fin.lattice.a)
self.assertEqual(Be_init.lattice.b < Be_fin.lattice.b, Zn_init.lattice.b < Zn_fin.lattice.b)
self.assertEqual(Be_init.lattice.c < Be_fin.lattice.c, Zn_init.lattice.c < Zn_fin.lattice.c)
Fe_fin = Structure.from_file(os.path.join(f, "Fe_gb_fin.cif"))
Fe_init = Structure.from_file(os.path.join(f, "Fe_gb_init.cif"))
Mo_init = Structure.from_file(os.path.join(f, "Mo_gb_init.cif"))
gb_scaling = ScaleToRelaxedTransformation(Fe_init, Fe_fin)
Mo_fin = gb_scaling.apply_transformation(Mo_init)
self.assertTrue(all([site.species_string == "Mo" for site in Mo_fin]))
self.assertEqual(Fe_init.lattice.a < Fe_fin.lattice.a, Mo_init.lattice.a < Mo_fin.lattice.a)
self.assertEqual(Fe_init.lattice.b < Fe_fin.lattice.b, Mo_init.lattice.b < Mo_fin.lattice.b)
self.assertEqual(Fe_init.lattice.c < Fe_fin.lattice.c, Mo_init.lattice.c < Mo_fin.lattice.c)
if __name__ == "__main__":
unittest.main()
|
|
# Copyright (C) 2012-2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Configuration for libvirt objects.
Classes to represent the configuration of various libvirt objects
and support conversion to/from XML. These classes are solely concerned
by providing direct Object <-> XML document conversions. No policy or
operational decisions should be made by code in these classes. Such
policy belongs in the 'designer.py' module which provides simplified
helpers for populating up config object instances.
"""
import time
from lxml import etree
from oslo_log import log as logging
from oslo_utils import units
import six
from nova import exception
from nova.pci import utils as pci_utils
from nova.virt import hardware
LOG = logging.getLogger(__name__)
# Namespace to use for Nova specific metadata items in XML
NOVA_NS = "http://openstack.org/xmlns/libvirt/nova/1.0"
class LibvirtConfigObject(object):
def __init__(self, **kwargs):
super(LibvirtConfigObject, self).__init__()
self.root_name = kwargs.get("root_name")
self.ns_prefix = kwargs.get('ns_prefix')
self.ns_uri = kwargs.get('ns_uri')
def _new_node(self, name, **kwargs):
if self.ns_uri is None:
return etree.Element(name, **kwargs)
else:
return etree.Element("{" + self.ns_uri + "}" + name,
nsmap={self.ns_prefix: self.ns_uri},
**kwargs)
def _text_node(self, name, value, **kwargs):
child = self._new_node(name, **kwargs)
child.text = six.text_type(value)
return child
def format_dom(self):
return self._new_node(self.root_name)
def parse_str(self, xmlstr):
self.parse_dom(etree.fromstring(xmlstr))
def parse_dom(self, xmldoc):
if self.root_name != xmldoc.tag:
raise exception.InvalidInput(
"Root element name should be '%s' not '%s'"
% (self.root_name, xmldoc.tag))
def to_xml(self, pretty_print=True):
root = self.format_dom()
xml_str = etree.tostring(root, pretty_print=pretty_print)
LOG.debug("Generated XML %s ", (xml_str,))
return xml_str
class LibvirtConfigCaps(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCaps, self).__init__(root_name="capabilities",
**kwargs)
self.host = None
self.guests = []
def parse_dom(self, xmldoc):
super(LibvirtConfigCaps, self).parse_dom(xmldoc)
for c in xmldoc.getchildren():
if c.tag == "host":
host = LibvirtConfigCapsHost()
host.parse_dom(c)
self.host = host
elif c.tag == "guest":
guest = LibvirtConfigCapsGuest()
guest.parse_dom(c)
self.guests.append(guest)
def format_dom(self):
caps = super(LibvirtConfigCaps, self).format_dom()
if self.host:
caps.append(self.host.format_dom())
for g in self.guests:
caps.append(g.format_dom())
return caps
class LibvirtConfigCapsNUMATopology(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsNUMATopology, self).__init__(
root_name="topology",
**kwargs)
self.cells = []
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsNUMATopology, self).parse_dom(xmldoc)
xmlcells = xmldoc.getchildren()[0]
for xmlcell in xmlcells.getchildren():
cell = LibvirtConfigCapsNUMACell()
cell.parse_dom(xmlcell)
self.cells.append(cell)
def format_dom(self):
topo = super(LibvirtConfigCapsNUMATopology, self).format_dom()
cells = etree.Element("cells")
cells.set("num", str(len(self.cells)))
topo.append(cells)
for cell in self.cells:
cells.append(cell.format_dom())
return topo
class LibvirtConfigCapsNUMACell(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsNUMACell, self).__init__(root_name="cell",
**kwargs)
self.id = None
self.memory = None
self.mempages = []
self.cpus = []
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsNUMACell, self).parse_dom(xmldoc)
self.id = int(xmldoc.get("id"))
for c in xmldoc.getchildren():
if c.tag == "memory":
self.memory = int(c.text)
elif c.tag == "pages":
pages = LibvirtConfigCapsNUMAPages()
pages.parse_dom(c)
self.mempages.append(pages)
elif c.tag == "cpus":
for c2 in c.getchildren():
cpu = LibvirtConfigCapsNUMACPU()
cpu.parse_dom(c2)
self.cpus.append(cpu)
def format_dom(self):
cell = super(LibvirtConfigCapsNUMACell, self).format_dom()
cell.set("id", str(self.id))
mem = etree.Element("memory")
mem.set("unit", "KiB")
mem.text = str(self.memory)
cell.append(mem)
for pages in self.mempages:
cell.append(pages.format_dom())
cpus = etree.Element("cpus")
cpus.set("num", str(len(self.cpus)))
for cpu in self.cpus:
cpus.append(cpu.format_dom())
cell.append(cpus)
return cell
class LibvirtConfigCapsNUMACPU(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsNUMACPU, self).__init__(root_name="cpu",
**kwargs)
self.id = None
self.socket_id = None
self.core_id = None
self.siblings = None
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsNUMACPU, self).parse_dom(xmldoc)
self.id = int(xmldoc.get("id"))
if xmldoc.get("socket_id") is not None:
self.socket_id = int(xmldoc.get("socket_id"))
if xmldoc.get("core_id") is not None:
self.core_id = int(xmldoc.get("core_id"))
if xmldoc.get("siblings") is not None:
self.siblings = hardware.parse_cpu_spec(
xmldoc.get("siblings"))
def format_dom(self):
cpu = super(LibvirtConfigCapsNUMACPU, self).format_dom()
cpu.set("id", str(self.id))
if self.socket_id is not None:
cpu.set("socket_id", str(self.socket_id))
if self.core_id is not None:
cpu.set("core_id", str(self.core_id))
if self.siblings is not None:
cpu.set("siblings",
hardware.format_cpu_spec(self.siblings))
return cpu
class LibvirtConfigCapsNUMAPages(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsNUMAPages, self).__init__(
root_name="pages", **kwargs)
self.size = None
self.total = None
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsNUMAPages, self).parse_dom(xmldoc)
self.size = int(xmldoc.get("size"))
self.total = int(xmldoc.text)
def format_dom(self):
pages = super(LibvirtConfigCapsNUMAPages, self).format_dom()
pages.text = str(self.total)
pages.set("size", str(self.size))
pages.set("unit", "KiB")
return pages
class LibvirtConfigCapsHost(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsHost, self).__init__(root_name="host",
**kwargs)
self.cpu = None
self.uuid = None
self.topology = None
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsHost, self).parse_dom(xmldoc)
for c in xmldoc.getchildren():
if c.tag == "cpu":
cpu = LibvirtConfigCPU()
cpu.parse_dom(c)
self.cpu = cpu
elif c.tag == "uuid":
self.uuid = c.text
elif c.tag == "topology":
self.topology = LibvirtConfigCapsNUMATopology()
self.topology.parse_dom(c)
def format_dom(self):
caps = super(LibvirtConfigCapsHost, self).format_dom()
if self.uuid:
caps.append(self._text_node("uuid", self.uuid))
if self.cpu:
caps.append(self.cpu.format_dom())
if self.topology:
caps.append(self.topology.format_dom())
return caps
class LibvirtConfigCapsGuest(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsGuest, self).__init__(root_name="guest",
**kwargs)
self.arch = None
self.ostype = None
self.domtype = list()
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsGuest, self).parse_dom(xmldoc)
for c in xmldoc.getchildren():
if c.tag == "os_type":
self.ostype = c.text
elif c.tag == "arch":
self.arch = c.get("name")
for sc in c.getchildren():
if sc.tag == "domain":
self.domtype.append(sc.get("type"))
def format_dom(self):
caps = super(LibvirtConfigCapsGuest, self).format_dom()
if self.ostype is not None:
caps.append(self._text_node("os_type", self.ostype))
if self.arch:
arch = etree.Element("arch", name=self.arch)
for dt in self.domtype:
dte = etree.Element("domain")
dte.set("type", dt)
arch.append(dte)
caps.append(arch)
return caps
class LibvirtConfigGuestTimer(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestTimer, self).__init__(root_name="timer",
**kwargs)
self.name = "platform"
self.track = None
self.tickpolicy = None
self.present = None
def format_dom(self):
tm = super(LibvirtConfigGuestTimer, self).format_dom()
tm.set("name", self.name)
if self.track is not None:
tm.set("track", self.track)
if self.tickpolicy is not None:
tm.set("tickpolicy", self.tickpolicy)
if self.present is not None:
if self.present:
tm.set("present", "yes")
else:
tm.set("present", "no")
return tm
class LibvirtConfigGuestClock(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestClock, self).__init__(root_name="clock",
**kwargs)
self.offset = "utc"
self.adjustment = None
self.timezone = None
self.timers = []
def format_dom(self):
clk = super(LibvirtConfigGuestClock, self).format_dom()
clk.set("offset", self.offset)
if self.adjustment:
clk.set("adjustment", self.adjustment)
elif self.timezone:
clk.set("timezone", self.timezone)
for tm in self.timers:
clk.append(tm.format_dom())
return clk
def add_timer(self, tm):
self.timers.append(tm)
class LibvirtConfigCPUFeature(LibvirtConfigObject):
def __init__(self, name=None, **kwargs):
super(LibvirtConfigCPUFeature, self).__init__(root_name='feature',
**kwargs)
self.name = name
def parse_dom(self, xmldoc):
super(LibvirtConfigCPUFeature, self).parse_dom(xmldoc)
self.name = xmldoc.get("name")
def format_dom(self):
ft = super(LibvirtConfigCPUFeature, self).format_dom()
ft.set("name", self.name)
return ft
def __eq__(self, obj):
return obj.name == self.name
def __ne__(self, obj):
return obj.name != self.name
def __hash__(self):
return hash(self.name)
class LibvirtConfigCPU(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCPU, self).__init__(root_name='cpu',
**kwargs)
self.arch = None
self.vendor = None
self.model = None
self.sockets = None
self.cores = None
self.threads = None
self.features = set()
def parse_dom(self, xmldoc):
super(LibvirtConfigCPU, self).parse_dom(xmldoc)
for c in xmldoc.getchildren():
if c.tag == "arch":
self.arch = c.text
elif c.tag == "model":
self.model = c.text
elif c.tag == "vendor":
self.vendor = c.text
elif c.tag == "topology":
self.sockets = int(c.get("sockets"))
self.cores = int(c.get("cores"))
self.threads = int(c.get("threads"))
elif c.tag == "feature":
f = LibvirtConfigCPUFeature()
f.parse_dom(c)
self.add_feature(f)
def format_dom(self):
cpu = super(LibvirtConfigCPU, self).format_dom()
if self.arch is not None:
cpu.append(self._text_node("arch", self.arch))
if self.model is not None:
cpu.append(self._text_node("model", self.model))
if self.vendor is not None:
cpu.append(self._text_node("vendor", self.vendor))
if (self.sockets is not None and
self.cores is not None and
self.threads is not None):
top = etree.Element("topology")
top.set("sockets", str(self.sockets))
top.set("cores", str(self.cores))
top.set("threads", str(self.threads))
cpu.append(top)
# sorting the features to allow more predictable tests
for f in sorted(self.features, key=lambda x: x.name):
cpu.append(f.format_dom())
return cpu
def add_feature(self, feat):
self.features.add(feat)
class LibvirtConfigGuestCPUFeature(LibvirtConfigCPUFeature):
def __init__(self, name=None, **kwargs):
super(LibvirtConfigGuestCPUFeature, self).__init__(name, **kwargs)
self.policy = "require"
def format_dom(self):
ft = super(LibvirtConfigGuestCPUFeature, self).format_dom()
ft.set("policy", self.policy)
return ft
class LibvirtConfigGuestCPUNUMACell(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUNUMACell, self).__init__(root_name="cell",
**kwargs)
self.id = None
self.cpus = None
self.memory = None
def parse_dom(self, xmldoc):
if xmldoc.get("id") is not None:
self.id = int(xmldoc.get("id"))
if xmldoc.get("memory") is not None:
self.memory = int(xmldoc.get("memory"))
if xmldoc.get("cpus") is not None:
self.cpus = hardware.parse_cpu_spec(xmldoc.get("cpus"))
def format_dom(self):
cell = super(LibvirtConfigGuestCPUNUMACell, self).format_dom()
if self.id is not None:
cell.set("id", str(self.id))
if self.cpus is not None:
cell.set("cpus",
hardware.format_cpu_spec(self.cpus))
if self.memory is not None:
cell.set("memory", str(self.memory))
return cell
class LibvirtConfigGuestCPUNUMA(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUNUMA, self).__init__(root_name="numa",
**kwargs)
self.cells = []
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestCPUNUMA, self).parse_dom(xmldoc)
for child in xmldoc.getchildren():
if child.tag == "cell":
cell = LibvirtConfigGuestCPUNUMACell()
cell.parse_dom(child)
self.cells.append(cell)
def format_dom(self):
numa = super(LibvirtConfigGuestCPUNUMA, self).format_dom()
for cell in self.cells:
numa.append(cell.format_dom())
return numa
class LibvirtConfigGuestCPU(LibvirtConfigCPU):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPU, self).__init__(**kwargs)
self.mode = None
self.match = "exact"
self.numa = None
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestCPU, self).parse_dom(xmldoc)
self.mode = xmldoc.get('mode')
self.match = xmldoc.get('match')
for child in xmldoc.getchildren():
if child.tag == "numa":
numa = LibvirtConfigGuestCPUNUMA()
numa.parse_dom(child)
self.numa = numa
def format_dom(self):
cpu = super(LibvirtConfigGuestCPU, self).format_dom()
if self.mode:
cpu.set("mode", self.mode)
cpu.set("match", self.match)
if self.numa is not None:
cpu.append(self.numa.format_dom())
return cpu
class LibvirtConfigGuestSMBIOS(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSMBIOS, self).__init__(root_name="smbios",
**kwargs)
self.mode = "sysinfo"
def format_dom(self):
smbios = super(LibvirtConfigGuestSMBIOS, self).format_dom()
smbios.set("mode", self.mode)
return smbios
class LibvirtConfigGuestSysinfo(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSysinfo, self).__init__(root_name="sysinfo",
**kwargs)
self.type = "smbios"
self.bios_vendor = None
self.bios_version = None
self.system_manufacturer = None
self.system_product = None
self.system_version = None
self.system_serial = None
self.system_uuid = None
def format_dom(self):
sysinfo = super(LibvirtConfigGuestSysinfo, self).format_dom()
sysinfo.set("type", self.type)
bios = None
system = None
if self.bios_vendor is not None:
if bios is None:
bios = etree.Element("bios")
info = etree.Element("entry", name="vendor")
info.text = self.bios_vendor
bios.append(info)
if self.bios_version is not None:
if bios is None:
bios = etree.Element("bios")
info = etree.Element("entry", name="version")
info.text = self.bios_version
bios.append(info)
if self.system_manufacturer is not None:
if system is None:
system = etree.Element("system")
info = etree.Element("entry", name="manufacturer")
info.text = self.system_manufacturer
system.append(info)
if self.system_product is not None:
if system is None:
system = etree.Element("system")
info = etree.Element("entry", name="product")
info.text = self.system_product
system.append(info)
if self.system_version is not None:
if system is None:
system = etree.Element("system")
info = etree.Element("entry", name="version")
info.text = self.system_version
system.append(info)
if self.system_serial is not None:
if system is None:
system = etree.Element("system")
info = etree.Element("entry", name="serial")
info.text = self.system_serial
system.append(info)
if self.system_uuid is not None:
if system is None:
system = etree.Element("system")
info = etree.Element("entry", name="uuid")
info.text = self.system_uuid
system.append(info)
if bios is not None:
sysinfo.append(bios)
if system is not None:
sysinfo.append(system)
return sysinfo
class LibvirtConfigGuestDevice(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDevice, self).__init__(**kwargs)
class LibvirtConfigGuestDisk(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDisk, self).__init__(root_name="disk",
**kwargs)
self.source_type = "file"
self.source_device = "disk"
self.driver_name = None
self.driver_format = None
self.driver_cache = None
self.driver_discard = None
self.source_path = None
self.source_protocol = None
self.source_name = None
self.source_hosts = []
self.source_ports = []
self.target_dev = None
self.target_path = None
self.target_bus = None
self.auth_username = None
self.auth_secret_type = None
self.auth_secret_uuid = None
self.serial = None
self.disk_read_bytes_sec = None
self.disk_read_iops_sec = None
self.disk_write_bytes_sec = None
self.disk_write_iops_sec = None
self.disk_total_bytes_sec = None
self.disk_total_iops_sec = None
self.logical_block_size = None
self.physical_block_size = None
self.readonly = False
self.snapshot = None
self.backing_store = None
def format_dom(self):
dev = super(LibvirtConfigGuestDisk, self).format_dom()
dev.set("type", self.source_type)
dev.set("device", self.source_device)
if (self.driver_name is not None or
self.driver_format is not None or
self.driver_cache is not None or
self.driver_discard is not None):
drv = etree.Element("driver")
if self.driver_name is not None:
drv.set("name", self.driver_name)
if self.driver_format is not None:
drv.set("type", self.driver_format)
if self.driver_cache is not None:
drv.set("cache", self.driver_cache)
if self.driver_discard is not None:
drv.set("discard", self.driver_discard)
dev.append(drv)
if self.source_type == "file":
dev.append(etree.Element("source", file=self.source_path))
elif self.source_type == "block":
dev.append(etree.Element("source", dev=self.source_path))
elif self.source_type == "mount":
dev.append(etree.Element("source", dir=self.source_path))
elif self.source_type == "network":
source = etree.Element("source", protocol=self.source_protocol)
if self.source_name is not None:
source.set('name', self.source_name)
hosts_info = zip(self.source_hosts, self.source_ports)
for name, port in hosts_info:
host = etree.Element('host', name=name)
if port is not None:
host.set('port', port)
source.append(host)
dev.append(source)
if self.auth_secret_type is not None:
auth = etree.Element("auth")
auth.set("username", self.auth_username)
auth.append(etree.Element("secret", type=self.auth_secret_type,
uuid=self.auth_secret_uuid))
dev.append(auth)
if self.source_type == "mount":
dev.append(etree.Element("target", dir=self.target_path))
else:
dev.append(etree.Element("target", dev=self.target_dev,
bus=self.target_bus))
if self.serial is not None:
dev.append(self._text_node("serial", self.serial))
iotune = etree.Element("iotune")
if self.disk_read_bytes_sec is not None:
iotune.append(self._text_node("read_bytes_sec",
self.disk_read_bytes_sec))
if self.disk_read_iops_sec is not None:
iotune.append(self._text_node("read_iops_sec",
self.disk_read_iops_sec))
if self.disk_write_bytes_sec is not None:
iotune.append(self._text_node("write_bytes_sec",
self.disk_write_bytes_sec))
if self.disk_write_iops_sec is not None:
iotune.append(self._text_node("write_iops_sec",
self.disk_write_iops_sec))
if self.disk_total_bytes_sec is not None:
iotune.append(self._text_node("total_bytes_sec",
self.disk_total_bytes_sec))
if self.disk_total_iops_sec is not None:
iotune.append(self._text_node("total_iops_sec",
self.disk_total_iops_sec))
if len(iotune) > 0:
dev.append(iotune)
# Block size tuning
if (self.logical_block_size is not None or
self.physical_block_size is not None):
blockio = etree.Element("blockio")
if self.logical_block_size is not None:
blockio.set('logical_block_size', self.logical_block_size)
if self.physical_block_size is not None:
blockio.set('physical_block_size', self.physical_block_size)
dev.append(blockio)
if self.readonly:
dev.append(etree.Element("readonly"))
return dev
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestDisk, self).parse_dom(xmldoc)
self.source_type = xmldoc.get('type')
self.snapshot = xmldoc.get('snapshot')
for c in xmldoc.getchildren():
if c.tag == 'driver':
self.driver_name = c.get('name')
self.driver_format = c.get('type')
self.driver_cache = c.get('cache')
self.driver_discard = c.get('discard')
elif c.tag == 'source':
if self.source_type == 'file':
self.source_path = c.get('file')
elif self.source_type == 'block':
self.source_path = c.get('dev')
elif self.source_type == 'mount':
self.source_path = c.get('dir')
elif self.source_type == 'network':
self.source_protocol = c.get('protocol')
self.source_name = c.get('name')
for sub in c.getchildren():
if sub.tag == 'host':
self.source_hosts.append(sub.get('name'))
self.source_ports.append(sub.get('port'))
elif c.tag == 'serial':
self.serial = c.text
elif c.tag == 'target':
if self.source_type == 'mount':
self.target_path = c.get('dir')
else:
self.target_dev = c.get('dev')
self.target_bus = c.get('bus', None)
elif c.tag == 'backingStore':
b = LibvirtConfigGuestDiskBackingStore()
b.parse_dom(c)
self.backing_store = b
class LibvirtConfigGuestDiskBackingStore(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDiskBackingStore, self).__init__(
root_name="backingStore", **kwargs)
self.index = None
self.source_type = None
self.source_file = None
self.source_protocol = None
self.source_name = None
self.source_hosts = []
self.source_ports = []
self.driver_name = None
self.driver_format = None
self.backing_store = None
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestDiskBackingStore, self).parse_dom(xmldoc)
self.source_type = xmldoc.get('type')
self.index = xmldoc.get('index')
for c in xmldoc.getchildren():
if c.tag == 'driver':
self.driver_name = c.get('name')
self.driver_format = c.get('type')
elif c.tag == 'source':
self.source_file = c.get('file')
self.source_protocol = c.get('protocol')
self.source_name = c.get('name')
for d in c.getchildren():
if d.tag == 'host':
self.source_hosts.append(d.get('name'))
self.source_ports.append(d.get('port'))
elif c.tag == 'backingStore':
if c.getchildren():
self.backing_store = LibvirtConfigGuestDiskBackingStore()
self.backing_store.parse_dom(c)
class LibvirtConfigGuestSnapshotDisk(LibvirtConfigObject):
"""Disk class for handling disk information in snapshots.
Similar to LibvirtConfigGuestDisk, but used to represent
disk entities in <domainsnapshot> structures rather than
real devices. These typically have fewer members, and
different expectations for which fields are required.
"""
def __init__(self, **kwargs):
super(LibvirtConfigGuestSnapshotDisk, self).__init__(root_name="disk",
**kwargs)
self.source_type = None
self.source_device = None
self.name = None
self.snapshot = None
self.driver_name = None
self.driver_format = None
self.driver_cache = None
self.source_path = None
self.source_protocol = None
self.source_name = None
self.source_hosts = []
self.source_ports = []
self.target_dev = None
self.target_path = None
self.target_bus = None
self.auth_username = None
self.auth_secret_type = None
self.auth_secret_uuid = None
self.serial = None
def format_dom(self):
dev = super(LibvirtConfigGuestSnapshotDisk, self).format_dom()
if self.name:
dev.attrib['name'] = self.name
if self.snapshot:
dev.attrib['snapshot'] = self.snapshot
if self.source_type:
dev.set("type", self.source_type)
if self.source_device:
dev.set("device", self.source_device)
if (self.driver_name is not None or
self.driver_format is not None or
self.driver_cache is not None):
drv = etree.Element("driver")
if self.driver_name is not None:
drv.set("name", self.driver_name)
if self.driver_format is not None:
drv.set("type", self.driver_format)
if self.driver_cache is not None:
drv.set("cache", self.driver_cache)
dev.append(drv)
if self.source_type == "file":
dev.append(etree.Element("source", file=self.source_path))
elif self.source_type == "block":
dev.append(etree.Element("source", dev=self.source_path))
elif self.source_type == "mount":
dev.append(etree.Element("source", dir=self.source_path))
elif self.source_type == "network":
source = etree.Element("source", protocol=self.source_protocol)
if self.source_name is not None:
source.set('name', self.source_name)
hosts_info = zip(self.source_hosts, self.source_ports)
for name, port in hosts_info:
host = etree.Element('host', name=name)
if port is not None:
host.set('port', port)
source.append(host)
dev.append(source)
if self.auth_secret_type is not None:
auth = etree.Element("auth")
auth.set("username", self.auth_username)
auth.append(etree.Element("secret", type=self.auth_secret_type,
uuid=self.auth_secret_uuid))
dev.append(auth)
if self.source_type == "mount":
dev.append(etree.Element("target", dir=self.target_path))
else:
if self.target_bus and self.target_dev:
dev.append(etree.Element("target", dev=self.target_dev,
bus=self.target_bus))
return dev
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestSnapshotDisk, self).parse_dom(xmldoc)
self.source_type = xmldoc.get('type')
self.snapshot = xmldoc.get('snapshot')
for c in xmldoc.getchildren():
if c.tag == 'driver':
self.driver_name = c.get('name')
self.driver_format = c.get('type')
self.driver_cache = c.get('cache')
elif c.tag == 'source':
if self.source_type == 'file':
self.source_path = c.get('file')
elif self.source_type == 'block':
self.source_path = c.get('dev')
elif self.source_type == 'mount':
self.source_path = c.get('dir')
elif self.source_type == 'network':
self.source_protocol = c.get('protocol')
self.source_name = c.get('name')
for sub in c.getchildren():
if sub.tag == 'host':
self.source_hosts.append(sub.get('name'))
self.source_ports.append(sub.get('port'))
elif c.tag == 'serial':
self.serial = c.text
for c in xmldoc.getchildren():
if c.tag == 'target':
if self.source_type == 'mount':
self.target_path = c.get('dir')
else:
self.target_dev = c.get('dev')
self.target_bus = c.get('bus', None)
class LibvirtConfigGuestFilesys(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestFilesys, self).__init__(root_name="filesystem",
**kwargs)
self.source_type = "mount"
self.source_dir = None
self.source_file = None
self.source_dev = None
self.target_dir = "/"
self.driver_type = "loop"
self.driver_format = "raw"
def format_dom(self):
dev = super(LibvirtConfigGuestFilesys, self).format_dom()
dev.set("type", self.source_type)
if self.source_type == "file":
dev.append(etree.Element("driver", type = self.driver_type,
format = self.driver_format))
dev.append(etree.Element("source", file=self.source_file))
elif self.source_type == "block":
dev.append(etree.Element("source", dev=self.source_dev))
else:
dev.append(etree.Element("source", dir=self.source_dir))
dev.append(etree.Element("target", dir=self.target_dir))
return dev
class LibvirtConfigGuestIDMap(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestIDMap, self).__init__(**kwargs)
self.start = 0
self.target = 0
self.count = 10000
def parse_dom(self, xmldoc):
self.start = int(xmldoc.get('start'))
self.target = int(xmldoc.get('target'))
self.count = int(xmldoc.get('count'))
def format_dom(self):
obj = super(LibvirtConfigGuestIDMap, self).format_dom()
obj.set("start", str(self.start))
obj.set("target", str(self.target))
obj.set("count", str(self.count))
return obj
class LibvirtConfigGuestUIDMap(LibvirtConfigGuestIDMap):
def __init__(self, **kwargs):
super(LibvirtConfigGuestUIDMap, self).__init__(root_name="uid",
**kwargs)
class LibvirtConfigGuestGIDMap(LibvirtConfigGuestIDMap):
def __init__(self, **kwargs):
super(LibvirtConfigGuestGIDMap, self).__init__(root_name="gid",
**kwargs)
class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestInterface, self).__init__(
root_name="interface",
**kwargs)
self.net_type = None
self.target_dev = None
self.model = None
self.mac_addr = None
self.script = None
self.source_dev = None
self.source_mode = "private"
self.vporttype = None
self.vportparams = []
self.filtername = None
self.filterparams = []
self.driver_name = None
self.vhostuser_mode = None
self.vhostuser_path = None
self.vhostuser_type = None
self.vif_inbound_peak = None
self.vif_inbound_burst = None
self.vif_inbound_average = None
self.vif_outbound_peak = None
self.vif_outbound_burst = None
self.vif_outbound_average = None
self.vlan = None
def format_dom(self):
dev = super(LibvirtConfigGuestInterface, self).format_dom()
dev.set("type", self.net_type)
if self.net_type == "hostdev":
dev.set("managed", "yes")
dev.append(etree.Element("mac", address=self.mac_addr))
if self.model:
dev.append(etree.Element("model", type=self.model))
if self.driver_name:
dev.append(etree.Element("driver", name=self.driver_name))
if self.net_type == "ethernet":
if self.script is not None:
dev.append(etree.Element("script", path=self.script))
elif self.net_type == "direct":
dev.append(etree.Element("source", dev=self.source_dev,
mode=self.source_mode))
elif self.net_type == "hostdev":
source_elem = etree.Element("source")
domain, bus, slot, func = \
pci_utils.get_pci_address_fields(self.source_dev)
addr_elem = etree.Element("address", type='pci')
addr_elem.set("domain", "0x%s" % (domain))
addr_elem.set("bus", "0x%s" % (bus))
addr_elem.set("slot", "0x%s" % (slot))
addr_elem.set("function", "0x%s" % (func))
source_elem.append(addr_elem)
dev.append(source_elem)
elif self.net_type == "vhostuser":
dev.append(etree.Element("source", type=self.vhostuser_type,
mode=self.vhostuser_mode,
path=self.vhostuser_path))
else:
dev.append(etree.Element("source", bridge=self.source_dev))
if self.vlan and self.net_type in ("direct", "hostdev"):
vlan_elem = etree.Element("vlan")
tag_elem = etree.Element("tag", id=self.vlan)
vlan_elem.append(tag_elem)
dev.append(vlan_elem)
if self.target_dev is not None:
dev.append(etree.Element("target", dev=self.target_dev))
if self.vporttype is not None:
vport = etree.Element("virtualport", type=self.vporttype)
for p in self.vportparams:
param = etree.Element("parameters")
param.set(p['key'], p['value'])
vport.append(param)
dev.append(vport)
if self.filtername is not None:
filter = etree.Element("filterref", filter=self.filtername)
for p in self.filterparams:
filter.append(etree.Element("parameter",
name=p['key'],
value=p['value']))
dev.append(filter)
if self.vif_inbound_average or self.vif_outbound_average:
bandwidth = etree.Element("bandwidth")
if self.vif_inbound_average is not None:
vif_inbound = etree.Element("inbound",
average=str(self.vif_inbound_average))
if self.vif_inbound_peak is not None:
vif_inbound.set("peak", str(self.vif_inbound_peak))
if self.vif_inbound_burst is not None:
vif_inbound.set("burst", str(self.vif_inbound_burst))
bandwidth.append(vif_inbound)
if self.vif_outbound_average is not None:
vif_outbound = etree.Element("outbound",
average=str(self.vif_outbound_average))
if self.vif_outbound_peak is not None:
vif_outbound.set("peak", str(self.vif_outbound_peak))
if self.vif_outbound_burst is not None:
vif_outbound.set("burst", str(self.vif_outbound_burst))
bandwidth.append(vif_outbound)
dev.append(bandwidth)
return dev
def add_filter_param(self, key, value):
self.filterparams.append({'key': key, 'value': value})
def add_vport_param(self, key, value):
self.vportparams.append({'key': key, 'value': value})
class LibvirtConfigGuestInput(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestInput, self).__init__(root_name="input",
**kwargs)
self.type = "tablet"
self.bus = "usb"
def format_dom(self):
dev = super(LibvirtConfigGuestInput, self).format_dom()
dev.set("type", self.type)
dev.set("bus", self.bus)
return dev
class LibvirtConfigGuestGraphics(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestGraphics, self).__init__(root_name="graphics",
**kwargs)
self.type = "vnc"
self.autoport = True
self.keymap = None
self.listen = None
def format_dom(self):
dev = super(LibvirtConfigGuestGraphics, self).format_dom()
dev.set("type", self.type)
if self.autoport:
dev.set("autoport", "yes")
else:
dev.set("autoport", "no")
if self.keymap:
dev.set("keymap", self.keymap)
if self.listen:
dev.set("listen", self.listen)
return dev
class LibvirtConfigSeclabel(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigSeclabel, self).__init__(root_name="seclabel",
**kwargs)
self.type = 'dynamic'
self.baselabel = None
def format_dom(self):
seclabel = super(LibvirtConfigSeclabel, self).format_dom()
seclabel.set('type', self.type)
if self.baselabel:
seclabel.append(self._text_node("baselabel", self.baselabel))
return seclabel
class LibvirtConfigGuestVideo(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestVideo, self).__init__(root_name="video",
**kwargs)
self.type = 'cirrus'
self.vram = None
self.heads = None
def format_dom(self):
dev = super(LibvirtConfigGuestVideo, self).format_dom()
model = etree.Element("model")
model.set("type", self.type)
if self.vram:
model.set("vram", str(self.vram))
if self.heads:
model.set("heads", str(self.heads))
dev.append(model)
return dev
class LibvirtConfigMemoryBalloon(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigMemoryBalloon, self).__init__(
root_name='memballoon',
**kwargs)
self.model = None
self.period = None
def format_dom(self):
dev = super(LibvirtConfigMemoryBalloon, self).format_dom()
dev.set('model', str(self.model))
if self.period is not None:
dev.append(etree.Element('stats', period=str(self.period)))
return dev
class LibvirtConfigGuestController(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestController,
self).__init__(root_name="controller", **kwargs)
self.type = None
self.index = None
self.model = None
def format_dom(self):
controller = super(LibvirtConfigGuestController, self).format_dom()
controller.set("type", self.type)
if self.index is not None:
controller.set("index", str(self.index))
if self.model:
controller.set("model", str(self.model))
return controller
class LibvirtConfigGuestHostdev(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestHostdev, self).\
__init__(root_name="hostdev", **kwargs)
self.mode = kwargs.get('mode')
self.type = kwargs.get('type')
self.managed = 'yes'
def format_dom(self):
dev = super(LibvirtConfigGuestHostdev, self).format_dom()
dev.set("mode", self.mode)
dev.set("type", self.type)
dev.set("managed", self.managed)
return dev
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestHostdev, self).parse_dom(xmldoc)
self.mode = xmldoc.get('mode')
self.type = xmldoc.get('type')
self.managed = xmldoc.get('managed')
return xmldoc.getchildren()
class LibvirtConfigGuestHostdevPCI(LibvirtConfigGuestHostdev):
def __init__(self, **kwargs):
super(LibvirtConfigGuestHostdevPCI, self).\
__init__(mode='subsystem', type='pci',
**kwargs)
self.domain = None
self.bus = None
self.slot = None
self.function = None
def format_dom(self):
dev = super(LibvirtConfigGuestHostdevPCI, self).format_dom()
address = etree.Element("address",
domain='0x' + self.domain,
bus='0x' + self.bus,
slot='0x' + self.slot,
function='0x' + self.function)
source = etree.Element("source")
source.append(address)
dev.append(source)
return dev
def parse_dom(self, xmldoc):
childs = super(LibvirtConfigGuestHostdevPCI, self).parse_dom(xmldoc)
for c in childs:
if c.tag == "source":
for sub in c.getchildren():
if sub.tag == 'address':
self.domain = sub.get('domain')
self.bus = sub.get('bus')
self.slot = sub.get('slot')
self.function = sub.get('function')
class LibvirtConfigGuestCharBase(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCharBase, self).__init__(**kwargs)
self.type = "pty"
self.source_path = None
self.listen_port = None
self.listen_host = None
def format_dom(self):
dev = super(LibvirtConfigGuestCharBase, self).format_dom()
dev.set("type", self.type)
if self.type == "file":
dev.append(etree.Element("source", path=self.source_path))
elif self.type == "unix":
dev.append(etree.Element("source", mode="bind",
path=self.source_path))
elif self.type == "tcp":
dev.append(etree.Element("source", mode="bind",
host=self.listen_host,
service=str(self.listen_port)))
return dev
class LibvirtConfigGuestChar(LibvirtConfigGuestCharBase):
def __init__(self, **kwargs):
super(LibvirtConfigGuestChar, self).__init__(**kwargs)
self.target_port = None
self.target_type = None
def format_dom(self):
dev = super(LibvirtConfigGuestChar, self).format_dom()
if self.target_port is not None or self.target_type is not None:
target = etree.Element("target")
if self.target_port is not None:
target.set("port", str(self.target_port))
if self.target_type is not None:
target.set("type", self.target_type)
dev.append(target)
return dev
class LibvirtConfigGuestSerial(LibvirtConfigGuestChar):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSerial, self).__init__(root_name="serial",
**kwargs)
class LibvirtConfigGuestConsole(LibvirtConfigGuestChar):
def __init__(self, **kwargs):
super(LibvirtConfigGuestConsole, self).__init__(root_name="console",
**kwargs)
class LibvirtConfigGuestChannel(LibvirtConfigGuestCharBase):
def __init__(self, **kwargs):
super(LibvirtConfigGuestChannel, self).__init__(root_name="channel",
**kwargs)
self.target_type = "virtio"
self.target_name = None
def format_dom(self):
dev = super(LibvirtConfigGuestChannel, self).format_dom()
target = etree.Element("target", type=self.target_type)
if self.target_name is not None:
target.set("name", self.target_name)
dev.append(target)
return dev
class LibvirtConfigGuestWatchdog(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestWatchdog, self).__init__(root_name="watchdog",
**kwargs)
self.model = 'i6300esb'
self.action = 'reset'
def format_dom(self):
dev = super(LibvirtConfigGuestWatchdog, self).format_dom()
dev.set('model', self.model)
dev.set('action', self.action)
return dev
class LibvirtConfigGuestCPUTuneVCPUPin(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUTuneVCPUPin, self).__init__(
root_name="vcpupin",
**kwargs)
self.id = None
self.cpuset = None
def format_dom(self):
root = super(LibvirtConfigGuestCPUTuneVCPUPin, self).format_dom()
root.set("vcpu", str(self.id))
if self.cpuset is not None:
root.set("cpuset",
hardware.format_cpu_spec(self.cpuset))
return root
class LibvirtConfigGuestCPUTuneEmulatorPin(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUTuneEmulatorPin, self).__init__(
root_name="emulatorpin",
**kwargs)
self.cpuset = None
def format_dom(self):
root = super(LibvirtConfigGuestCPUTuneEmulatorPin, self).format_dom()
if self.cpuset is not None:
root.set("cpuset",
hardware.format_cpu_spec(self.cpuset))
return root
class LibvirtConfigGuestCPUTune(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUTune, self).__init__(root_name="cputune",
**kwargs)
self.shares = None
self.quota = None
self.period = None
self.vcpupin = []
self.emulatorpin = None
def format_dom(self):
root = super(LibvirtConfigGuestCPUTune, self).format_dom()
if self.shares is not None:
root.append(self._text_node("shares", str(self.shares)))
if self.quota is not None:
root.append(self._text_node("quota", str(self.quota)))
if self.period is not None:
root.append(self._text_node("period", str(self.period)))
if self.emulatorpin is not None:
root.append(self.emulatorpin.format_dom())
for vcpu in self.vcpupin:
root.append(vcpu.format_dom())
return root
class LibvirtConfigGuestMemoryBacking(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestMemoryBacking, self).__init__(
root_name="memoryBacking", **kwargs)
self.hugepages = []
self.sharedpages = True
self.locked = False
def format_dom(self):
root = super(LibvirtConfigGuestMemoryBacking, self).format_dom()
if self.hugepages:
hugepages = etree.Element("hugepages")
for item in self.hugepages:
hugepages.append(item.format_dom())
root.append(hugepages)
if not self.sharedpages:
root.append(etree.Element("nosharedpages"))
if self.locked:
root.append(etree.Element("locked"))
return root
class LibvirtConfigGuestMemoryBackingPage(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestMemoryBackingPage, self).__init__(
root_name="page", **kwargs)
self.size_kb = None
self.nodeset = None
def format_dom(self):
page = super(LibvirtConfigGuestMemoryBackingPage, self).format_dom()
page.set("size", str(self.size_kb))
page.set("nodeset", hardware.format_cpu_spec(self.nodeset))
page.set("unit", "KiB")
return page
class LibvirtConfigGuestMemoryTune(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestMemoryTune, self).__init__(
root_name="memtune", **kwargs)
self.hard_limit = None
self.soft_limit = None
self.swap_hard_limit = None
self.min_guarantee = None
def format_dom(self):
root = super(LibvirtConfigGuestMemoryTune, self).format_dom()
if self.hard_limit is not None:
root.append(self._text_node("hard_limit",
str(self.hard_limit),
units="K"))
if self.soft_limit is not None:
root.append(self._text_node("soft_limit",
str(self.soft_limit),
units="K"))
if self.swap_hard_limit is not None:
root.append(self._text_node("swap_hard_limit",
str(self.swap_hard_limit),
units="K"))
if self.min_guarantee is not None:
root.append(self._text_node("min_guarantee",
str(self.min_guarantee),
units="K"))
return root
class LibvirtConfigGuestNUMATuneMemory(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestNUMATuneMemory, self).__init__(
root_name="memory", **kwargs)
self.mode = "strict"
self.nodeset = []
def format_dom(self):
root = super(LibvirtConfigGuestNUMATuneMemory, self).format_dom()
root.set("mode", self.mode)
root.set("nodeset", hardware.format_cpu_spec(self.nodeset))
return root
class LibvirtConfigGuestNUMATuneMemNode(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestNUMATuneMemNode, self).__init__(
root_name="memnode", **kwargs)
self.cellid = 0
self.mode = "strict"
self.nodeset = []
def format_dom(self):
root = super(LibvirtConfigGuestNUMATuneMemNode, self).format_dom()
root.set("cellid", str(self.cellid))
root.set("mode", self.mode)
root.set("nodeset", hardware.format_cpu_spec(self.nodeset))
return root
class LibvirtConfigGuestNUMATune(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestNUMATune, self).__init__(
root_name="numatune", **kwargs)
self.memory = None
self.memnodes = []
def format_dom(self):
root = super(LibvirtConfigGuestNUMATune, self).format_dom()
if self.memory is not None:
root.append(self.memory.format_dom())
for node in self.memnodes:
root.append(node.format_dom())
return root
class LibvirtConfigGuestFeature(LibvirtConfigObject):
def __init__(self, name, **kwargs):
super(LibvirtConfigGuestFeature, self).__init__(root_name=name,
**kwargs)
class LibvirtConfigGuestFeatureACPI(LibvirtConfigGuestFeature):
def __init__(self, **kwargs):
super(LibvirtConfigGuestFeatureACPI, self).__init__("acpi",
**kwargs)
class LibvirtConfigGuestFeatureAPIC(LibvirtConfigGuestFeature):
def __init__(self, **kwargs):
super(LibvirtConfigGuestFeatureAPIC, self).__init__("apic",
**kwargs)
class LibvirtConfigGuestFeaturePAE(LibvirtConfigGuestFeature):
def __init__(self, **kwargs):
super(LibvirtConfigGuestFeaturePAE, self).__init__("pae",
**kwargs)
class LibvirtConfigGuestFeatureHyperV(LibvirtConfigGuestFeature):
# QEMU requires at least this value to be set
MIN_SPINLOCK_RETRIES = 4095
def __init__(self, **kwargs):
super(LibvirtConfigGuestFeatureHyperV, self).__init__("hyperv",
**kwargs)
self.relaxed = False
self.vapic = False
self.spinlocks = False
self.spinlock_retries = self.MIN_SPINLOCK_RETRIES
def format_dom(self):
root = super(LibvirtConfigGuestFeatureHyperV, self).format_dom()
if self.relaxed:
root.append(etree.Element("relaxed", state="on"))
if self.vapic:
root.append(etree.Element("vapic", state="on"))
if self.spinlocks:
root.append(etree.Element("spinlocks", state="on",
retries=str(self.spinlock_retries)))
return root
class LibvirtConfigGuest(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuest, self).__init__(root_name="domain",
**kwargs)
self.virt_type = None
self.uuid = None
self.name = None
self.memory = 500 * units.Mi
self.membacking = None
self.memtune = None
self.numatune = None
self.vcpus = 1
self.cpuset = None
self.cpu = None
self.cputune = None
self.features = []
self.clock = None
self.sysinfo = None
self.os_type = None
self.os_loader = None
self.os_kernel = None
self.os_initrd = None
self.os_cmdline = None
self.os_root = None
self.os_init_path = None
self.os_boot_dev = []
self.os_smbios = None
self.os_mach_type = None
self.os_bootmenu = False
self.devices = []
self.metadata = []
self.idmaps = []
def _format_basic_props(self, root):
root.append(self._text_node("uuid", self.uuid))
root.append(self._text_node("name", self.name))
root.append(self._text_node("memory", self.memory))
if self.membacking is not None:
root.append(self.membacking.format_dom())
if self.memtune is not None:
root.append(self.memtune.format_dom())
if self.numatune is not None:
root.append(self.numatune.format_dom())
if self.cpuset is not None:
vcpu = self._text_node("vcpu", self.vcpus)
vcpu.set("cpuset", hardware.format_cpu_spec(self.cpuset))
root.append(vcpu)
else:
root.append(self._text_node("vcpu", self.vcpus))
if len(self.metadata) > 0:
metadata = etree.Element("metadata")
for m in self.metadata:
metadata.append(m.format_dom())
root.append(metadata)
def _format_os(self, root):
os = etree.Element("os")
type_node = self._text_node("type", self.os_type)
if self.os_mach_type is not None:
type_node.set("machine", self.os_mach_type)
os.append(type_node)
if self.os_kernel is not None:
os.append(self._text_node("kernel", self.os_kernel))
if self.os_loader is not None:
os.append(self._text_node("loader", self.os_loader))
if self.os_initrd is not None:
os.append(self._text_node("initrd", self.os_initrd))
if self.os_cmdline is not None:
os.append(self._text_node("cmdline", self.os_cmdline))
if self.os_root is not None:
os.append(self._text_node("root", self.os_root))
if self.os_init_path is not None:
os.append(self._text_node("init", self.os_init_path))
for boot_dev in self.os_boot_dev:
os.append(etree.Element("boot", dev=boot_dev))
if self.os_smbios is not None:
os.append(self.os_smbios.format_dom())
if self.os_bootmenu:
os.append(etree.Element("bootmenu", enable="yes"))
root.append(os)
def _format_features(self, root):
if len(self.features) > 0:
features = etree.Element("features")
for feat in self.features:
features.append(feat.format_dom())
root.append(features)
def _format_devices(self, root):
if len(self.devices) == 0:
return
devices = etree.Element("devices")
for dev in self.devices:
devices.append(dev.format_dom())
root.append(devices)
def _format_idmaps(self, root):
if len(self.idmaps) == 0:
return
idmaps = etree.Element("idmap")
for idmap in self.idmaps:
idmaps.append(idmap.format_dom())
root.append(idmaps)
def format_dom(self):
root = super(LibvirtConfigGuest, self).format_dom()
root.set("type", self.virt_type)
self._format_basic_props(root)
if self.sysinfo is not None:
root.append(self.sysinfo.format_dom())
self._format_os(root)
self._format_features(root)
if self.cputune is not None:
root.append(self.cputune.format_dom())
if self.clock is not None:
root.append(self.clock.format_dom())
if self.cpu is not None:
root.append(self.cpu.format_dom())
self._format_devices(root)
self._format_idmaps(root)
return root
def parse_dom(self, xmldoc):
# Note: This cover only for: LibvirtConfigGuestDisks
# LibvirtConfigGuestHostdevPCI
# LibvirtConfigGuestUidMap
# LibvirtConfigGuestGidMap
# LibvirtConfigGuestCPU
for c in xmldoc.getchildren():
if c.tag == 'devices':
for d in c.getchildren():
if d.tag == 'disk':
obj = LibvirtConfigGuestDisk()
obj.parse_dom(d)
self.devices.append(obj)
elif d.tag == 'hostdev' and d.get('type') == 'pci':
obj = LibvirtConfigGuestHostdevPCI()
obj.parse_dom(d)
self.devices.append(obj)
if c.tag == 'idmap':
for map in c.getchildren():
obj = None
if map.tag == 'uid':
obj = LibvirtConfigGuestUIDMap()
elif map.tag == 'gid':
obj = LibvirtConfigGuestGIDMap()
if obj:
obj.parse_dom(map)
self.idmaps.append(obj)
elif c.tag == 'cpu':
obj = LibvirtConfigGuestCPU()
obj.parse_dom(c)
self.cpu = obj
def add_device(self, dev):
self.devices.append(dev)
def set_clock(self, clk):
self.clock = clk
class LibvirtConfigGuestSnapshot(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSnapshot, self).__init__(
root_name="domainsnapshot",
**kwargs)
self.name = None
self.disks = []
def format_dom(self):
ss = super(LibvirtConfigGuestSnapshot, self).format_dom()
if self.name:
ss.append(self._text_node("name", self.name))
disks = etree.Element('disks')
for disk in self.disks:
disks.append(disk.format_dom())
ss.append(disks)
return ss
def add_disk(self, disk):
self.disks.append(disk)
class LibvirtConfigNodeDevice(LibvirtConfigObject):
"""Libvirt Node Devices parser."""
def __init__(self, **kwargs):
super(LibvirtConfigNodeDevice, self).__init__(root_name="device",
**kwargs)
self.name = None
self.parent = None
self.driver = None
self.pci_capability = None
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDevice, self).parse_dom(xmldoc)
for c in xmldoc.getchildren():
if c.tag == "name":
self.name = c.text
elif c.tag == "parent":
self.parent = c.text
elif c.tag == "capability" and c.get("type") == 'pci':
pcicap = LibvirtConfigNodeDevicePciCap()
pcicap.parse_dom(c)
self.pci_capability = pcicap
class LibvirtConfigNodeDevicePciCap(LibvirtConfigObject):
"""Libvirt Node Devices pci capability parser."""
def __init__(self, **kwargs):
super(LibvirtConfigNodeDevicePciCap, self).__init__(
root_name="capability", **kwargs)
self.domain = None
self.bus = None
self.slot = None
self.function = None
self.product = None
self.product_id = None
self.vendor = None
self.vendor_id = None
self.numa_node = None
self.fun_capability = list()
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDevicePciCap, self).parse_dom(xmldoc)
for c in xmldoc.getchildren():
if c.tag == "domain":
self.domain = int(c.text)
elif c.tag == "slot":
self.slot = int(c.text)
elif c.tag == "bus":
self.bus = int(c.text)
elif c.tag == "function":
self.function = int(c.text)
elif c.tag == "product":
self.product = c.text
self.product_id = int(c.get('id'), 16)
elif c.tag == "vendor":
self.vendor = c.text
self.vendor_id = int(c.get('id'), 16)
elif c.tag == "numa":
self.numa_node = int(c.get('node'))
elif c.tag == "capability" and c.get('type') in \
('virt_functions', 'phys_function'):
funcap = LibvirtConfigNodeDevicePciSubFunctionCap()
funcap.parse_dom(c)
self.fun_capability.append(funcap)
class LibvirtConfigNodeDevicePciSubFunctionCap(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigNodeDevicePciSubFunctionCap, self).__init__(
root_name="capability", **kwargs)
self.type = None
self.device_addrs = list() # list of tuple (domain,bus,slot,function)
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDevicePciSubFunctionCap, self).parse_dom(xmldoc)
self.type = xmldoc.get("type")
for c in xmldoc.getchildren():
if c.tag == "address":
self.device_addrs.append((int(c.get('domain'), 16),
int(c.get('bus'), 16),
int(c.get('slot'), 16),
int(c.get('function'), 16)))
class LibvirtConfigGuestRng(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestRng, self).__init__(root_name="rng",
**kwargs)
self.model = 'random'
self.backend = None
self.rate_period = None
self.rate_bytes = None
def format_dom(self):
dev = super(LibvirtConfigGuestRng, self).format_dom()
dev.set('model', 'virtio')
backend = etree.Element("backend")
backend.set("model", self.model)
backend.text = self.backend
if self.rate_period and self.rate_bytes:
rate = etree.Element("rate")
rate.set("period", str(self.rate_period))
rate.set("bytes", str(self.rate_bytes))
dev.append(rate)
dev.append(backend)
return dev
class LibvirtConfigGuestMetaNovaInstance(LibvirtConfigObject):
def __init__(self):
super(LibvirtConfigGuestMetaNovaInstance,
self).__init__(root_name="instance",
ns_prefix="nova",
ns_uri=NOVA_NS)
self.package = None
self.flavor = None
self.name = None
self.creationTime = None
self.owner = None
self.roottype = None
self.rootid = None
def format_dom(self):
meta = super(LibvirtConfigGuestMetaNovaInstance, self).format_dom()
pkg = self._new_node("package")
pkg.set("version", self.package)
meta.append(pkg)
if self.name is not None:
meta.append(self._text_node("name", self.name))
if self.creationTime is not None:
timestr = time.strftime("%Y-%m-%d %H:%M:%S",
time.gmtime(self.creationTime))
meta.append(self._text_node("creationTime", timestr))
if self.flavor is not None:
meta.append(self.flavor.format_dom())
if self.owner is not None:
meta.append(self.owner.format_dom())
if self.roottype is not None and self.rootid is not None:
root = self._new_node("root")
root.set("type", self.roottype)
root.set("uuid", str(self.rootid))
meta.append(root)
return meta
class LibvirtConfigGuestMetaNovaFlavor(LibvirtConfigObject):
def __init__(self):
super(LibvirtConfigGuestMetaNovaFlavor,
self).__init__(root_name="flavor",
ns_prefix="nova",
ns_uri=NOVA_NS)
self.name = None
self.memory = None
self.disk = None
self.swap = None
self.ephemeral = None
self.vcpus = None
def format_dom(self):
meta = super(LibvirtConfigGuestMetaNovaFlavor, self).format_dom()
meta.set("name", self.name)
if self.memory is not None:
meta.append(self._text_node("memory", str(self.memory)))
if self.disk is not None:
meta.append(self._text_node("disk", str(self.disk)))
if self.swap is not None:
meta.append(self._text_node("swap", str(self.swap)))
if self.ephemeral is not None:
meta.append(self._text_node("ephemeral", str(self.ephemeral)))
if self.vcpus is not None:
meta.append(self._text_node("vcpus", str(self.vcpus)))
return meta
class LibvirtConfigGuestMetaNovaOwner(LibvirtConfigObject):
def __init__(self):
super(LibvirtConfigGuestMetaNovaOwner,
self).__init__(root_name="owner",
ns_prefix="nova",
ns_uri=NOVA_NS)
self.userid = None
self.username = None
self.projectid = None
self.projectname = None
def format_dom(self):
meta = super(LibvirtConfigGuestMetaNovaOwner, self).format_dom()
if self.userid is not None and self.username is not None:
user = self._text_node("user", self.username)
user.set("uuid", self.userid)
meta.append(user)
if self.projectid is not None and self.projectname is not None:
project = self._text_node("project", self.projectname)
project.set("uuid", self.projectid)
meta.append(project)
return meta
class LibvirtConfigSecret(LibvirtConfigObject):
def __init__(self):
super(LibvirtConfigSecret,
self).__init__(root_name="secret")
self.ephemeral = False
self.private = False
self.description = None
self.uuid = None
self.usage_type = None
self.usage_id = None
def get_yes_no_str(self, value):
if value:
return 'yes'
return 'no'
def format_dom(self):
root = super(LibvirtConfigSecret, self).format_dom()
root.set("ephemeral", self.get_yes_no_str(self.ephemeral))
root.set("private", self.get_yes_no_str(self.private))
if self.description is not None:
root.append(self._text_node("description", str(self.description)))
if self.uuid is not None:
root.append(self._text_node("uuid", str(self.uuid)))
usage = self._new_node("usage")
usage.set("type", self.usage_type)
if self.usage_type == 'ceph':
usage.append(self._text_node('name', str(self.usage_id)))
elif self.usage_type == 'iscsi':
usage.append(self._text_node('target', str(self.usage_id)))
elif self.usage_type == 'volume':
usage.append(self._text_node('volume', str(self.usage_id)))
root.append(usage)
return root
|
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=missing-docstring
"""Quick program to test the qi tools modules."""
import unittest
from unittest.mock import Mock, call, patch
import math
from io import StringIO
import numpy as np
from qiskit.tools.qi.qi import partial_trace, vectorize, devectorize, outer
from qiskit.tools.qi.qi import concurrence, qft, chop
from qiskit.tools.qi.qi import shannon_entropy, entropy, mutual_information
from qiskit.tools.qi.qi import choi_to_rauli
from qiskit.tools.qi.qi import entanglement_of_formation, is_pos_def
from qiskit.tools.qi.qi import __eof_qubit as eof_qubit
from qiskit.quantum_info import purity
from qiskit.quantum_info.random import random_density_matrix
from qiskit.exceptions import QiskitError
from qiskit.test import QiskitTestCase
class TestQI(QiskitTestCase):
"""Tests for qi.py"""
def test_partial_trace(self):
# reference
rho0 = [[0.5, 0.5], [0.5, 0.5]]
rho1 = [[1, 0], [0, 0]]
rho2 = [[0, 0], [0, 1]]
rho10 = np.kron(rho1, rho0)
rho20 = np.kron(rho2, rho0)
rho21 = np.kron(rho2, rho1)
rho210 = np.kron(rho21, rho0)
rhos = [rho0, rho1, rho2, rho10, rho20, rho21]
# test partial trace
tau0 = partial_trace(rho210, [1, 2])
tau1 = partial_trace(rho210, [0, 2])
tau2 = partial_trace(rho210, [0, 1])
# test different dimensions
tau10 = partial_trace(rho210, [1], dimensions=[4, 2])
tau20 = partial_trace(rho210, [1], dimensions=[2, 2, 2])
tau21 = partial_trace(rho210, [0], dimensions=[2, 4])
taus = [tau0, tau1, tau2, tau10, tau20, tau21]
all_pass = True
for i, j in zip(rhos, taus):
all_pass &= (np.linalg.norm(i - j) == 0)
self.assertTrue(all_pass)
def test_vectorize(self):
mat = [[1, 2], [3, 4]]
col = [1, 3, 2, 4]
row = [1, 2, 3, 4]
paul = [5, 5, -1j, -3]
test_pass = (np.linalg.norm(vectorize(mat) - col) == 0 and
np.linalg.norm(vectorize(mat, method='col') - col) == 0 and
np.linalg.norm(vectorize(mat, method='row') - row) == 0 and
np.linalg.norm(vectorize(mat, method='pauli') - paul) == 0)
self.assertTrue(test_pass)
def test_devectorize(self):
mat = [[1, 2], [3, 4]]
col = [1, 3, 2, 4]
row = [1, 2, 3, 4]
paul = [5, 5, -1j, -3]
test_pass = (np.linalg.norm(devectorize(col) - mat) == 0 and
np.linalg.norm(devectorize(col, method='col') - mat) == 0 and
np.linalg.norm(devectorize(row, method='row') - mat) == 0 and
np.linalg.norm(devectorize(paul, method='pauli') - mat) == 0)
self.assertTrue(test_pass)
def test_outer(self):
v_z = [1, 0]
v_y = [1, 1j]
rho_z = [[1, 0], [0, 0]]
rho_y = [[1, -1j], [1j, 1]]
op_zy = [[1, -1j], [0, 0]]
op_yz = [[1, 0], [1j, 0]]
test_pass = (np.linalg.norm(outer(v_z) - rho_z) == 0 and
np.linalg.norm(outer(v_y) - rho_y) == 0 and
np.linalg.norm(outer(v_y, v_z) - op_yz) == 0 and
np.linalg.norm(outer(v_z, v_y) - op_zy) == 0)
self.assertTrue(test_pass)
def test_purity(self):
rho1 = [[1, 0], [0, 0]]
rho2 = [[0.5, 0], [0, 0.5]]
rho3 = 0.7 * np.array(rho1) + 0.3 * np.array(rho2)
test_pass = (purity(rho1) == 1.0 and
purity(rho2) == 0.5 and
round(purity(rho3), 10) == 0.745)
self.assertTrue(test_pass)
def test_purity_1d_input(self):
input_state = [1, 0]
res = purity(input_state)
self.assertEqual(1, res)
def test_concurrence(self):
psi1 = [1, 0, 0, 0]
rho1 = [[0.5, 0, 0, 0.5], [0, 0, 0, 0], [0, 0, 0, 0], [0.5, 0, 0, 0.5]]
rho2 = [[0, 0, 0, 0], [0, 0.5, -0.5j, 0],
[0, 0.5j, 0.5, 0], [0, 0, 0, 0]]
rho3 = 0.5 * np.array(rho1) + 0.5 * np.array(rho2)
rho4 = 0.75 * np.array(rho1) + 0.25 * np.array(rho2)
test_pass = (concurrence(psi1) == 0.0 and
concurrence(rho1) == 1.0 and
concurrence(rho2) == 1.0 and
concurrence(rho3) == 0.0 and
concurrence(rho4) == 0.5)
self.assertTrue(test_pass)
def test_concurrence_not_two_qubits(self):
input_state = np.array([[0, 1], [1, 0]])
self.assertRaises(Exception, concurrence, input_state)
def test_qft(self):
num_qbits = 3
circuit = Mock()
q = list(range(num_qbits))
qft(circuit, q, num_qbits)
self.assertEqual([call(0), call(1), call(2)], circuit.h.mock_calls)
expected_calls = [call(math.pi / 2.0, 1, 0),
call(math.pi / 4.0, 2, 0),
call(math.pi / 2.0, 2, 1)]
self.assertEqual(expected_calls, circuit.cu1.mock_calls)
def test_chop(self):
array_in = [1.023, 1.0456789, 0.0000001, 0.1]
res = chop(array_in, epsilon=1e-3)
for i, expected in enumerate([1.023, 1.0456789, 0.0, 0.1]):
self.assertEqual(expected, res[i])
def test_chop_imaginary(self):
array_in = np.array([0.000456789+0.0004j, 1.0456789, 4+0.00004j,
0.0000742+3j, 0.000002, 2+6j])
res = chop(array_in, epsilon=1e-3)
for i, expected in enumerate([0.0+0.0j, 1.0456789, 4+0.0j, 0+3j,
0.0, 2+6j]):
self.assertEqual(expected, res[i])
def test_shannon_entropy(self):
input_pvec = np.array([0.5, 0.3, 0.07, 0.1, 0.03])
# Base 2
self.assertAlmostEqual(1.7736043871504037,
shannon_entropy(input_pvec))
# Base e
self.assertAlmostEqual(1.229368880382052,
shannon_entropy(input_pvec, np.e))
# Base 10
self.assertAlmostEqual(0.533908120973504,
shannon_entropy(input_pvec, 10))
def test_entropy(self):
input_density_matrix = np.array([[0.5, 0.0], [0.0, 0.5]])
res = entropy(input_density_matrix)
self.assertAlmostEqual(0.6931471805599453, res)
def test_entropy_1d(self):
input_vector = np.array([0.5, 1, 0])
res = entropy(input_vector)
self.assertEqual(0, res)
def test_mutual_information(self):
input_state = np.array([[0.5, 0.25, 0.75, 1],
[1, 0, 1, 0],
[0.5, 0.5, 0.5, 0.5],
[0, 1, 0, 1]])
res = mutual_information(input_state, 2)
self.assertAlmostEqual(-0.15821825498448047, res)
def test_entanglement_of_formation(self):
input_state = np.array([[0.5, 0.25, 0.75, 1],
[1, 0, 1, 0],
[0.5, 0.5, 0.5, 0.5],
[0, 1, 0, 1]])
res = entanglement_of_formation(input_state, 2)
self.assertAlmostEqual(0.6985340217364572, res)
def test_entanglement_of_formation_1d_input(self):
input_state = np.array([0.5, 0.25, 0.75, 1])
res = entanglement_of_formation(input_state, 2)
self.assertAlmostEqual(0.15687647805861626, res)
def test_entanglement_of_formation_invalid_input(self):
input_state = np.array([[0, 1], [1, 0]])
expected = "Input must be a state-vector or 2-qubit density matrix."
with patch('sys.stdout', new=StringIO()) as fake_stout:
res = entanglement_of_formation(input_state, 1)
self.assertEqual(fake_stout.getvalue().strip(), expected)
self.assertIsNone(res)
def test__eof_qubit(self):
input_rho = np.array([[0.5, 0.25, 0.75, 1],
[1, 0, 1, 0],
[0.5, 0.5, 0.5, 0.5],
[0, 1, 0, 1]])
res = eof_qubit(input_rho)
self.assertAlmostEqual(0.6985340217364572, res)
def test_is_pos_def(self):
input_x = np.array([[1, 0],
[0, 1]])
res = is_pos_def(input_x)
self.assertTrue(res)
def test_choi_to_rauli(self):
input_matrix = np.array([[0.5, 0.25, 0.75, 1],
[1, 0, 1, 0],
[0.5, 0.5, 0.5, 0.5],
[0, 1, 0, 1]])
res = choi_to_rauli(input_matrix)
expected = np.array([[2.0+0.j, 2.25+0.0j, 0.0+0.75j, -1.0+0.0j],
[1.75+0.j, 2.5+0.j, 0.-1.5j, 0.75+0.0j],
[0.0-0.25j, 0.0+0.5j, -0.5+0.0j, 0.0-1.25j],
[0.0+0.j, 0.25+0.0j, 0.0-1.25j, 1.0+0.0j]])
self.assertTrue(np.array_equal(expected, res))
def test_random_density_matrix(self):
random_hs_matrix = random_density_matrix(2, seed=42)
self.assertEqual((2, 2), random_hs_matrix.shape)
random_bures_matrix = random_density_matrix(2, method='Bures', seed=40)
self.assertEqual((2, 2), random_bures_matrix.shape)
def test_random_density_matrix_invalid_method(self):
self.assertRaises(QiskitError, random_density_matrix, 2,
method='Special')
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
import abc
from collections import OrderedDict
import os
from os.path import join as jp
from jenkinsflow.api_base import UnknownJobException, BuildResult, Progress
from .abstract_api import AbstractApiJob, AbstractApiJenkins
from .config import test_tmp_dir
def _mkdir(path):
try:
os.mkdir(path)
except OSError:
if not os.path.exists(path):
raise
class TestJob(AbstractApiJob, metaclass=abc.ABCMeta):
_current_order = 1
def __init__(self, exec_time, max_fails, expect_invocations, expect_order, initial_buildno, invocation_delay,
unknown_result, final_result, serial, print_env, flow_created, create_job, disappearing, non_existing, kill):
"""
Set unknown_result to True if the result is indeterminate (timeout or invoke_unchecked)
"""
assert exec_time > 0
assert max_fails >= 0
assert expect_invocations >= 0
assert expect_order is None or expect_order >= 1
assert initial_buildno is None or initial_buildno >= 1
assert invocation_delay >= 0
assert unknown_result in (False, True)
assert serial in (False, True)
assert flow_created in (False, True)
assert disappearing in (False, True)
assert non_existing in (False, True)
self.exec_time = exec_time
self.max_fails = max_fails
self.expect_invocations = expect_invocations
self.expect_order = expect_order
self.initial_buildno = initial_buildno
self.invocation_delay = invocation_delay
self.unknown_result = unknown_result
self.serial = serial
self.print_env = print_env
self.final_result = final_result if isinstance(final_result, (BuildResult, type(None))) else BuildResult[final_result.upper()]
self.flow_created = flow_created
self.create_job = create_job
self.disappearing = disappearing
self.non_existing = non_existing
self.kill = kill
self.invocation_number = 0
self.invocation_time = self.start_time = self.end_time = 0
self.actual_order = -1
self.build_params = None
@property
def has_force_result_param(self):
return self.max_fails > 0 or self.final_result
def invoke(self, securitytoken, build_params, cause, description):
self.build_params = build_params
self.invocation_number += 1
self.actual_order = TestJob._current_order
TestJob._current_order += 1
def __repr__(self):
return ", expect_invocations: " + repr(self.expect_invocations) + \
", invocation_number: " + repr(self.invocation_number) + \
", expect_order: " + repr(self.expect_order) + \
", start_time: " + repr(self.start_time) + \
", exec_time: " + repr(self.exec_time) + \
", end_time: " + repr(self.end_time)
class Jobs():
def __init__(self, api):
self.api = api
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type:
return None
test_jobs = self.api.test_jobs
for job_name, job in test_jobs.items():
if job.flow_created:
# Patch up another job that is supposed to be created by this job - replace job_name with job reference
for other_job in test_jobs.values():
if isinstance(other_job.create_job, str):
other_job_name = self.api.job_name_prefix + other_job.create_job
if other_job_name == job_name:
other_job.create_job = job
break
else:
raise Exception("Job: " + repr(job_name) + " is supposed to be created by another job, but that job was not found")
for job_name, job in test_jobs.items():
if job.create_job and isinstance(job.create_job, str):
raise Exception("Job: " + repr(job_name) + " is supposed to create job: " + repr(job.create_job) + " but definition for that job was not found")
class TestJenkins(AbstractApiJenkins, metaclass=abc.ABCMeta):
def __init__(self, job_name_prefix, **kwargs):
super().__init__(**kwargs)
self.job_name_prefix = job_name_prefix
TestJob._current_order = 1
self.test_jobs = OrderedDict()
@abc.abstractmethod
def job(self, name, max_fails, expect_invocations, expect_order, exec_time=None, initial_buildno=None, invocation_delay=0.1, params=None,
script=None, unknown_result=False, final_result=None, serial=False, print_env=False, flow_created=False, create_job=None, disappearing=False,
non_existing=False, kill=False, num_builds_to_keep=4, allow_running=False):
"""Create a job with corresponding test metadata.
Args:
name (str): Base name of job, the test framework will add a prefix base on test module
exec_time (int): Number of seconds that the job will run (sleep), actual run may/will be longer
max_fails (int): Number of times the job will fail during this flow (when using retry)
expect_invocations (int): Number of expected invocation during this flow. Will be larger or equal to exec_time.
...
flow_created (boolean): This job is expected to be non-existing at start of flow and be created during the flow
create_job (str): Name of another job that will be created by this job, when this job is running
"""
pass
# Delete/Create hack sufficient to get resonable coverage on job_load test
def delete_job(self, job_name):
try:
job = self.test_jobs[job_name]
if job.non_existing:
raise UnknownJobException(job_name)
job.non_existing = True
except KeyError as ex:
raise Exception("Test job setup error, missing test job definition:", job_name) from ex
def create_job(self, job_name, config_xml):
try:
job = self.test_jobs[job_name]
job.non_existing = False
except KeyError as ex:
raise Exception("Test job setup error, missing test job definition:", job_name) from ex
def job_creator(self):
return Jobs(self)
@abc.abstractmethod
def flow_job(self, name=None, params=None):
pass
def flow_job_name(self, name):
# Don't create flow jobs when mocked
name = '0flow_' + name if name else '0flow'
return (self.job_name_prefix or '') + name
def __enter__(self):
# pylint: disable=attribute-defined-outside-init
self._pre_work_dir = os.getcwd()
self._work_dir = jp(test_tmp_dir,self.job_name_prefix)
_mkdir(self._work_dir)
os.chdir(self._work_dir)
return self
def __exit__(self, exc_type, exc_value, traceback):
os.chdir(self._pre_work_dir)
if not exc_type:
self.test_results()
def test_results(self):
print("Jenkinsflow Test Framework -- Checking results")
max_actual_order = 0
last_expected_order = 0
last_end_time = 0
for job in self.test_jobs.values():
if job.expect_order is not None:
# Check job invocation order
assert last_expected_order <= job.expect_order, "Mock job list must be sorted by expected order, error in test setup."
assert job.actual_order >= job.expect_order, "Job: " + job.name + " was started out of order, " + \
"job.actual_order: " + repr(job.actual_order) + ", job.expect_order: " + repr(job.expect_order)
if job.expect_order > last_expected_order:
assert job.actual_order > max_actual_order
if job.serial:
assert job.invocation_time >= last_end_time, "Serial job " + job.name + " started before previous job finished"
last_end_time = job.end_time
last_expected_order = job.expect_order
max_actual_order = max(job.actual_order, max_actual_order)
if job.expect_invocations is not None:
# Check expected number of job invocations
assert job.expect_invocations == job.invocation_number, "Job: " + job.name + " invoked " + str(job.invocation_number) + " times, expected " + str(job.expect_invocations) + " invocations"
if job.unknown_result:
# The job must still be running, but maybe it has not been started yet, so wait up to 3 seconds for it to start
max_wait_to_start = 3
start_check_interval = 0.01
max_wait_index = int(max_wait_to_start / start_check_interval)
prev_progress = None
for _ in range(1, max_wait_index):
# TODO: job obj should be invocation obj!
result, progress, last_build_number = job.job_status()
if progress == Progress.RUNNING:
break
if progress != prev_progress:
print("FW: last build status:", job.name, result, progress, last_build_number)
prev_progress = progress
self.sleep(start_check_interval)
if hasattr(job, 'jenkins'):
job.jenkins.quick_poll()
job.poll()
# pylint: disable=maybe-no-member
assert progress == Progress.RUNNING, "Job: " + job.name + " is expected to be running, but state is " + progress.name
# Now stop the job, so that it won't be running after the testsuite is finished
job.stop_all()
elif job.expect_invocations != 0 and not job.kill:
if job.invocation_number > job.max_fails:
expect_status = BuildResult.SUCCESS if job.final_result is None else job.final_result
else:
expect_status = BuildResult.FAILURE
# TODO job obj should be called invocation!
# Get last obj
try:
key = next(reversed(job._invocations))
invocation = job._invocations[key]
except TypeError:
# script_api ?
invocation = job._invocations[-1]
assert invocation.build_number is not None, "Job: " + repr(job) + " should have had build_number, but it has None"
result, progress = invocation.status()
assert result == expect_status, "Job: " + job.name + " expected result " + repr(expect_status) + " but got " + repr(result)
elif job.kill:
result, progress, _ = job.job_status()
assert progress == Progress.IDLE
# TODO
# expect_status = BuildResult.ABORTED
# assert result == expect_status, "Job: " + job.name + " expected result " + repr(expect_status) + " but got " + repr(result)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitAuthorizationsOperations:
"""ExpressRouteCircuitAuthorizationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified authorization from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
authorization_name=authorization_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
async def get(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
**kwargs: Any
) -> "_models.ExpressRouteCircuitAuthorization":
"""Gets the specified authorization from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitAuthorization, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_03_01.models.ExpressRouteCircuitAuthorization
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitAuthorization"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
authorization_parameters: "_models.ExpressRouteCircuitAuthorization",
**kwargs: Any
) -> "_models.ExpressRouteCircuitAuthorization":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitAuthorization"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(authorization_parameters, 'ExpressRouteCircuitAuthorization')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
authorization_parameters: "_models.ExpressRouteCircuitAuthorization",
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCircuitAuthorization"]:
"""Creates or updates an authorization in the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:param authorization_parameters: Parameters supplied to the create or update express route
circuit authorization operation.
:type authorization_parameters: ~azure.mgmt.network.v2017_03_01.models.ExpressRouteCircuitAuthorization
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitAuthorization or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2017_03_01.models.ExpressRouteCircuitAuthorization]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitAuthorization"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
authorization_name=authorization_name,
authorization_parameters=authorization_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
def list(
self,
resource_group_name: str,
circuit_name: str,
**kwargs: Any
) -> AsyncIterable["_models.AuthorizationListResult"]:
"""Gets all authorizations in an express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AuthorizationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_03_01.models.AuthorizationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AuthorizationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AuthorizationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations'} # type: ignore
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import atexit
import errno
import os
import shutil
import stat
import tempfile
import threading
import uuid
from collections import defaultdict
from pants.util.strutil import ensure_text
def safe_mkdir(directory, clean=False):
"""Ensure a directory is present.
If it's not there, create it. If it is, no-op. If clean is True, ensure the dir is empty."""
if clean:
safe_rmtree(directory)
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def safe_mkdir_for(path):
"""Ensure that the parent directory for a file is present.
If it's not there, create it. If it is, no-op.
"""
safe_mkdir(os.path.dirname(path), clean=False)
def safe_file_dump(path, content):
safe_mkdir_for(path)
with open(path, 'w') as outfile:
outfile.write(content)
def safe_walk(path, **kwargs):
"""Just like os.walk, but ensures that the returned values are unicode objects.
This isn't strictly safe, in that it is possible that some paths
will not be decodeable, but that case is rare, and the only
alternative is to somehow avoid all interaction between paths and
unicode objects, which seems especially tough in the presence of
unicode_literals. See e.g.
https://mail.python.org/pipermail/python-dev/2008-December/083856.html
"""
# If os.walk is given a text argument, it yields text values; if it
# is given a binary argument, it yields binary values.
return os.walk(ensure_text(path), **kwargs)
_MKDTEMP_CLEANER = None
_MKDTEMP_DIRS = defaultdict(set)
_MKDTEMP_LOCK = threading.RLock()
def _mkdtemp_atexit_cleaner():
for td in _MKDTEMP_DIRS.pop(os.getpid(), []):
safe_rmtree(td)
def _mkdtemp_unregister_cleaner():
global _MKDTEMP_CLEANER
_MKDTEMP_CLEANER = None
def _mkdtemp_register_cleaner(cleaner):
global _MKDTEMP_CLEANER
if not cleaner:
return
assert callable(cleaner)
if _MKDTEMP_CLEANER is None:
atexit.register(cleaner)
_MKDTEMP_CLEANER = cleaner
def safe_mkdtemp(cleaner=_mkdtemp_atexit_cleaner, **kw):
"""Create a temporary directory that is cleaned up on process exit.
Arguments are as to tempfile.mkdtemp.
"""
# Proper lock sanitation on fork [issue 6721] would be desirable here.
with _MKDTEMP_LOCK:
return register_rmtree(tempfile.mkdtemp(**kw), cleaner=cleaner)
def register_rmtree(directory, cleaner=_mkdtemp_atexit_cleaner):
"""Register an existing directory to be cleaned up at process exit."""
with _MKDTEMP_LOCK:
_mkdtemp_register_cleaner(cleaner)
_MKDTEMP_DIRS[os.getpid()].add(directory)
return directory
def safe_rmtree(directory):
"""Delete a directory if it's present. If it's not present, no-op."""
shutil.rmtree(directory, ignore_errors=True)
def safe_open(filename, *args, **kwargs):
"""Open a file safely, ensuring that its directory exists."""
safe_mkdir(os.path.dirname(filename))
return open(filename, *args, **kwargs)
def safe_delete(filename):
"""Delete a file safely. If it's not present, no-op."""
try:
os.unlink(filename)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def safe_concurrent_rename(src, dst):
"""Rename src to dst, ignoring errors due to dst already existing.
Useful when concurrent processes may attempt to create dst, and it doesn't matter who wins.
"""
# Delete dst, in case it existed (with old content) even before any concurrent processes
# attempted this write. This ensures that at least one process writes the new content.
if os.path.isdir(src): # Note that dst may not exist, so we test for the type of src.
safe_rmtree(dst)
else:
safe_delete(dst)
try:
shutil.move(src, dst)
except IOError as e:
if e.errno != errno.EEXIST:
raise
def safe_concurrent_create(func, path):
"""Safely execute code that creates a file at a well-known path.
Useful when concurrent processes may attempt to create a file, and it doesn't matter who wins.
:param func: A callable that takes a single path argument and creates a file at that path.
:param path: The path to execute the callable on.
:return: func(path)'s return value.
"""
safe_mkdir_for(path)
tmp_path = '{0}.tmp.{1}'.format(path, uuid.uuid4().hex)
ret = func(tmp_path)
safe_concurrent_rename(tmp_path, path)
return ret
def chmod_plus_x(path):
"""Equivalent of unix `chmod a+x path`"""
path_mode = os.stat(path).st_mode
path_mode &= int('777', 8)
if path_mode & stat.S_IRUSR:
path_mode |= stat.S_IXUSR
if path_mode & stat.S_IRGRP:
path_mode |= stat.S_IXGRP
if path_mode & stat.S_IROTH:
path_mode |= stat.S_IXOTH
os.chmod(path, path_mode)
def relative_symlink(source_path, link_path):
"""Create a symlink at link_path pointing to relative source
:param source_path: Absolute path to source file
:param link_path: Absolute path to intended symlink
:raises ValueError if source_path or link_path are not unique, absolute paths
:raises OSError on failure UNLESS file already exists or no such file/directory
"""
if not os.path.isabs(source_path):
raise ValueError("Path for source:{} must be absolute".format(source_path))
if not os.path.isabs(link_path):
raise ValueError("Path for link:{} must be absolute".format(link_path))
if source_path == link_path:
raise ValueError("Path for link is identical to source:{}".format(source_path))
try:
if os.path.lexists(link_path):
os.unlink(link_path)
rel_path = os.path.relpath(source_path, os.path.dirname(link_path))
os.symlink(rel_path, link_path)
except OSError as e:
# Another run may beat us to deletion or creation.
if not (e.errno == errno.EEXIST or e.errno == errno.ENOENT):
raise
def relativize_path(path, rootdir):
# Note that we can't test for length and return the shorter of the two, because we need these
# paths to be stable across systems (e.g., because they get embedded in analysis files),
# and this choice might be inconsistent across systems. So we assume the relpath is always
# shorter. We relativize because of a known case of very long full path prefixes on Mesos,
# so this seems like the right heuristic.
# Note also that we mustn't call realpath on the path - we need to preserve the symlink structure.
return os.path.relpath(path, rootdir)
# When running pants under mesos/aurora, the sandbox pathname can be very long. Since it gets
# prepended to most components in the classpath (some from ivy, the rest from the build),
# in some runs the classpath gets too big and exceeds ARG_MAX.
# We prevent this by using paths relative to the current working directory.
def relativize_paths(paths, rootdir):
return [relativize_path(path, rootdir) for path in paths]
def touch(path, times=None):
"""Equivalent of unix `touch path`.
:path: The file to touch.
:times Either a tuple of (atime, mtime) or else a single time to use for both. If not
specified both atime and mtime are updated to the current time.
"""
if times:
if len(times) > 2:
raise ValueError('times must either be a tuple of (atime, mtime) or else a single time value '
'to use for both.')
if len(times) == 1:
times = (times, times)
with safe_open(path, 'a'):
os.utime(path, times)
|
|
from pamqp import specification
from pamqp.body import ContentBody
from amqpstorm import Channel
from amqpstorm import exception
from amqpstorm.basic import Basic
from amqpstorm.tests.utility import FakeConnection
from amqpstorm.tests.utility import TestFramework
class BasicExceptionTests(TestFramework):
def test_basic_qos_invalid_parameter(self):
channel = Channel(0, FakeConnection(), 360)
channel.set_state(Channel.OPEN)
basic = Basic(channel)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'prefetch_count should be an integer',
basic.qos, 'travis-ci'
)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'prefetch_size should be an integer',
basic.qos, 1, 'travis-ci'
)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'global_ should be a boolean',
basic.qos, 1, 1, 'travis-ci'
)
def test_basic_get_invalid_parameter(self):
channel = Channel(0, FakeConnection(), 360)
channel.set_state(Channel.OPEN)
basic = Basic(channel)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'queue should be a string',
basic.get, None
)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'no_ack should be a boolean',
basic.get, '', 'travis-ci'
)
channel.consumer_tags.append('travis-ci')
self.assertRaisesRegexp(
exception.AMQPChannelError,
"Cannot call 'get' when channel "
"is set to consume",
basic.get, '', True, 'travis-ci'
)
def test_basic_recover_invalid_parameter(self):
channel = Channel(0, FakeConnection(), 360)
channel.set_state(Channel.OPEN)
basic = Basic(channel)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'requeue should be a boolean',
basic.recover, None
)
def test_basic_consume_invalid_parameter(self):
channel = Channel(0, FakeConnection(), 360)
channel.set_state(Channel.OPEN)
basic = Basic(channel)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'queue should be a string',
basic.consume, None, 1
)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'consumer_tag should be a string',
basic.consume, None, '', 1
)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'exclusive should be a boolean',
basic.consume, None, '', '', None
)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'no_ack should be a boolean',
basic.consume, None, '', '', True, None
)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'no_local should be a boolean',
basic.consume, None, '', '', True, True, None
)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'arguments should be a dict or None',
basic.consume, None, '', '', True, True, True, []
)
def test_basic_cancel_invalid_parameter(self):
channel = Channel(0, FakeConnection(), 360)
channel.set_state(Channel.OPEN)
basic = Basic(channel)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'consumer_tag should be a string',
basic.cancel, None
)
def test_basic_publish_invalid_parameter(self):
channel = Channel(0, FakeConnection(), 360)
channel.set_state(Channel.OPEN)
basic = Basic(channel)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'body should be a string',
basic.publish, None, ''
)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'routing_key should be a string',
basic.publish, '', None
)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'exchange should be a string',
basic.publish, '', '', None
)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'properties should be a dict or None',
basic.publish, '', '', '', []
)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'properties should be a dict or None',
basic.publish, '', '', '', 1
)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'mandatory should be a boolean',
basic.publish, '', '', '', {}, None
)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'immediate should be a boolean',
basic.publish, '', '', '', {}, True, None
)
def test_basic_ack_invalid_parameter(self):
channel = Channel(0, FakeConnection(), 360)
channel.set_state(Channel.OPEN)
basic = Basic(channel)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'delivery_tag should be an integer',
basic.ack, 'travis-ci'
)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'delivery_tag should be an integer',
basic.ack, None
)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'multiple should be a boolean',
basic.ack, 1, None
)
def test_basic_nack_invalid_parameter(self):
channel = Channel(0, FakeConnection(), 360)
channel.set_state(Channel.OPEN)
basic = Basic(channel)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'delivery_tag should be an integer',
basic.nack, 'travis-ci'
)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'multiple should be a boolean',
basic.nack, 1, None
)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'requeue should be a boolean',
basic.nack, 1, True, None
)
def test_basic_reject_invalid_parameter(self):
channel = Channel(0, FakeConnection(), 360)
channel.set_state(Channel.OPEN)
basic = Basic(channel)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'delivery_tag should be an integer',
basic.reject, 'travis-ci'
)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'delivery_tag should be an integer',
basic.reject, None
)
self.assertRaisesRegexp(
exception.AMQPInvalidArgument,
'requeue should be a boolean',
basic.reject, 1, None
)
def test_basic_get_content_body_timeout_error(self):
body = ContentBody(value=self.message)
channel = Channel(0, FakeConnection(), 0.01)
channel.set_state(Channel.OPEN)
basic = Basic(channel)
uuid = channel.rpc.register_request([body.name])
self.assertRaisesRegexp(
exception.AMQPChannelError,
'rpc requests .* \(.*\) took too long',
basic._get_content_body, uuid, len(self.message)
)
def test_basic_publish_confirms_raises_on_timeout(self):
connection = FakeConnection()
channel = Channel(9, connection, 0.01)
channel._confirming_deliveries = True
channel.set_state(Channel.OPEN)
basic = Basic(channel)
self.assertRaisesRegexp(
exception.AMQPChannelError,
'rpc requests .* \(.*\) took too long',
basic.publish, body=self.message,
routing_key='travis-ci'
)
def test_basic_publish_confirms_raises_on_invalid_frame(self):
def on_publish_return_invalid_frame(*_):
channel.rpc.on_frame(specification.Basic.Cancel())
connection = FakeConnection(on_write=on_publish_return_invalid_frame)
channel = Channel(9, connection, 0.01)
channel._confirming_deliveries = True
channel.set_state(Channel.OPEN)
basic = Basic(channel)
self.assertRaisesRegexp(
exception.AMQPChannelError,
'rpc requests .* \(.*\) took too long',
basic.publish, body=self.message,
routing_key='travis-ci'
)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# parse EIT now & next information from DVB-T streams
from Kamaelia.Device.DVB.Core import DVB_Multiplex, DVB_Demuxer
from Axon.Component import component
import struct
from Axon.Ipc import shutdownMicroprocess,producerFinished
class PSIPacketReconstructor(component):
"""\
Takes DVB Transport stream packets for a given PID and reconstructs the
PSI packets from within the stream.
Will only handle stream from a single PID.
"""
def shutdown(self):
while self.dataReady("control"):
msg = self.recv("control")
self.send(msg,"signal")
if isinstance(msg, (shutdownMicroprocess, producerFinished)):
return True
return False
def main(self):
buffer = ""
nextCont = None
# XXX assuming for the moment that this can only handle one PID at a time
while not self.shutdown():
while self.dataReady("inbox"):
data = self.recv("inbox")
byte = ord(data[1])
start_indicator = (byte & 0x40) != 0
byte = ord(data[3])
adaption = (byte & 0x30) >> 4
contcount = byte & 0x0f
# check continuity counter is okay (otherwise ignore packet)
# or that its the start of a new packet and we've not started receiving yet
if (nextCont == None and start_indicator) or nextCont == contcount:
# determine start of payload offset
if adaption == 1:
payload_start = 4
elif adaption == 3:
payload_start = 4+1+ord(data[4]) # skip past adaption data
else: # adaption == 0 or adaption == 2
# ignore if adaption field==0 or no payload
continue
# if start of new payload present, flush previous, now complete, packet
if start_indicator:
prevstart = payload_start
payload_start = prevstart + ord(data[prevstart]) + 1
buffer = buffer + data[prevstart+1:payload_start]
if len(buffer) and nextCont != None: # don't flush through dregs if this is the first time
self.send( buffer, "outbox" )
buffer = ""
buffer = buffer + data[payload_start:]
nextCont = (contcount + 1) & 0xf
else:
# reset for crash relock
nextCont = None
buffer= ""
self.pause()
yield 1
class EITPacketParser(component):
"""\
Parses EIT packets and extracts NOW & NEXT short event descriptions for
channels within this transport stream.
(Ignores events belonging to other multiplexes)
"""
Inboxes = { "inbox" : "PES packets",
"control" : "NOT USED",
}
Outboxes = { "outbox" : "Parsed NOW and NEXT EIT events",
"signal" : "NOT USED",
}
def shutdown(self):
while self.dataReady("control"):
msg = self.recv("control")
self.send(msg,"signal")
if isinstance(msg, (shutdownMicroprocess, producerFinished)):
return True
return False
def main(self):
while not self.shutdown():
while self.dataReady("inbox"):
data = self.recv("inbox")
msg = {}
# passes CRC test
s = struct.unpack(">BHHBBBHHBB", data[:14])
table_id = s[0]
syntax = s[1] & 0x8000;
section_length = s[1] & 0x0fff
service_id = s[2]
version = (s[3] >>1) & 0x1f
current_next = s[3] & 0x01
section_num = s[4]
last_section = s[5]
ts_id = s[6]
net_id = s[7]
seg_last_sect = s[8]
last_table_id = s[9]
data=data[:3+section_length] # remove any padding at end of table
if table_id != 0x4e: # only interested in present/following data for this TS
continue
if not syntax:
print "wrong syntax"
continue
if not current_next: # subtable not yet applicable
continue
# which subtable (uniquely identified by table_id, service(channel), TS and network)
subtable_id = (table_id, service_id, ts_id, net_id)
# print "EIT table_id=",hex(table_id)
# print subtable_id
# print section_num,last_section,seg_last_sect
if crc32(data): # fail on non-zero result
print "EIT packet CRC error"
continue
msg['service'] = service_id
msg['transportstream'] = ts_id
# go through events
pos = 14
while pos < len(data) - 4: # 4 bytes for final checksum
e = struct.unpack(">HHBBBBBBH",data[pos:pos+12])
event_id = e[0]
date = parseMJD(e[1]) # Y, M, D
time = unBCD(e[2]), unBCD(e[3]), unBCD(e[4]) # HH, MM, SS
duration = unBCD(e[5]), unBCD(e[6]), unBCD(e[7]) # HH, MM, SS
running_status = (e[8] & 0xe000) >> 13
free_CA_mode = e[8] & 0x1000
descriptors_len = e[8] & 0x0fff
if running_status in [1,2]:
msg['when'] = "NEXT"
elif running_status in [3,4]:
msg['when'] = "NOW"
msg['startdate'] = date
msg['starttime'] = time
msg['duration'] = duration
pos = pos + 12
descriptors_end = pos + descriptors_len
# go through descriptors
while pos < descriptors_end:
desc_tag = ord(data[pos])
desc_len = ord(data[pos+1])
if desc_tag == 0x4d: # only interested in Short Event Descriptor
lang = data[pos+2:pos+5]
namelen = ord(data[pos+5])
name = data[pos+6:pos+6+namelen]
textlen = ord(data[pos+6+namelen])
text = data[pos+7+namelen:pos+7+namelen+textlen]
msg['name'] = name
msg['description'] = text
pos = pos + 2 + desc_len
self.send(msg, "outbox")
self.pause()
yield 1
def crc32(data):
poly = 0x4c11db7
crc = 0xffffffffL
for byte in data:
byte = ord(byte)
for bit in range(7,-1,-1): # MSB to LSB
z32 = crc>>31 # top bit
crc = crc << 1
if ((byte>>bit)&1) ^ z32:
crc = crc ^ poly
crc = crc & 0xffffffffL
return crc
def parseMJD(MJD):
"""Parse 16 bit unsigned int containing Modified Julian Date, as per DVB-SI spec
returning year,month,day"""
YY = int( (MJD - 15078.2) / 365.25 )
MM = int( (MJD - 14956.1 - int(YY*365.25) ) / 30.6001 )
D = MJD - 14956 - int(YY*365.25) - int(MM * 30.6001)
K=0
if MM == 14 or MM == 15:
K=1
return (1900 + YY+K), (MM-1-K*12), D
def unBCD(byte):
return (byte>>4)*10 + (byte & 0xf)
class NowNextChanges(component):
"""\
Simple attempt to filter DVB now and next info for multiple services,
such that we only send output when the data changes.
"""
def shutdown(self):
while self.dataReady("control"):
msg = self.recv("control")
self.send(msg,"signal")
if isinstance(msg, (shutdownMicroprocess, producerFinished)):
return True
return False
def main(self):
current = {}
while not self.shutdown():
while self.dataReady("inbox"):
event = self.recv("inbox")
# only interested in 'now' events, not 'next' events
if event['when'] != "NOW":
continue
uid = event['service'], event['transportstream']
if current.get(uid,None) != event:
current[uid] = event
self.send(current[uid],"outbox")
self.pause()
yield 1
class NowNextServiceFilter(component):
"""\
Filters now/next event data for only specified services.
"""
def __init__(self, *services):
super(NowNextServiceFilter,self).__init__()
self.services = services
def shutdown(self):
while self.dataReady("control"):
msg = self.recv("control")
self.send(msg,"signal")
if isinstance(msg, (shutdownMicroprocess, producerFinished)):
return True
return False
def main(self):
while not self.shutdown():
while self.dataReady("inbox"):
event = self.recv("inbox")
if event['service'] in self.services:
self.send(event,"outbox")
self.pause()
yield 1
class TimeAndDatePacketParser(component):
"""\
Parses "Time and Date" packets.
"""
Inboxes = { "inbox" : "PES packets",
"control" : "NOT USED",
}
Outboxes = { "outbox" : "Parsed date and time",
"signal" : "NOT USED",
}
def shutdown(self):
while self.dataReady("control"):
msg = self.recv("control")
self.send(msg,"signal")
if isinstance(msg, (shutdownMicroprocess, producerFinished)):
return True
return False
def main(self):
while not self.shutdown():
while self.dataReady("inbox"):
data = self.recv("inbox")
msg = {}
s = struct.unpack(">BHHBBB", data[:8])
table_id = s[0]
syntax = s[1] & 0x8000;
section_length = s[1] & 0x0fff
data=data[:3+section_length] # remove any padding at end of table
if table_id != 0x70: # only interested Date & Time packets
continue
if syntax:
print "wrong syntax"
continue
date = parseMJD(s[2]) # Y, M, D
time = unBCD(s[3]), unBCD(s[4]), unBCD(s[5]) # HH, MM, SS
msg['date'] = date
msg['time'] = time
self.send(msg, "outbox")
self.pause()
yield 1
__kamaelia_components__ = ( PSIPacketReconstructor, EITPacketParser, NowNextChanges, NowNextServiceFilter, TimeAndDatePacketParser, )
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.File.Writing import SimpleFileWriter
from Kamaelia.File.ReadFileAdaptor import ReadFileAdaptor
from Kamaelia.Chassis.Graphline import Graphline
from Kamaelia.Util.Console import ConsoleEchoer
import dvb3.frontend
feparams = {
"inversion" : dvb3.frontend.INVERSION_AUTO,
"constellation" : dvb3.frontend.QAM_16,
"code_rate_HP" : dvb3.frontend.FEC_3_4,
"code_rate_LP" : dvb3.frontend.FEC_3_4,
}
Graphline(
SOURCE=DVB_Multiplex(505833330.0/1000000.0, [18,20,600,601], feparams),
DEMUX=DVB_Demuxer({ 18: ["_EIT_"], 20:["_DATETIME_"] }),
EIT = Pipeline( PSIPacketReconstructor(),
EITPacketParser(),
NowNextServiceFilter(4164, 4228), # BBC ONE & BBC TWO
NowNextChanges(),
ConsoleEchoer(),
),
DATETIME = Pipeline( PSIPacketReconstructor(),
TimeAndDatePacketParser(),
ConsoleEchoer(),
),
linkages={ ("SOURCE", "outbox"):("DEMUX","inbox"),
("DEMUX", "_EIT_"): ("EIT", "inbox"),
("DEMUX", "_DATETIME_"): ("DATETIME", "inbox"),
}
).run()
# RELEASE: MH, MPS
|
|
"""
Read a SAS XPort format file into a Pandas DataFrame.
Based on code from Jack Cushman (github.com/jcushman/xport).
The file format is defined here:
https://support.sas.com/techsup/technote/ts140.pdf
"""
from datetime import datetime
import pandas as pd
from pandas.io.common import get_filepath_or_buffer, BaseIterator
from pandas import compat
import struct
import numpy as np
from pandas.util._decorators import Appender
import warnings
_correct_line1 = ("HEADER RECORD*******LIBRARY HEADER RECORD!!!!!!!"
"000000000000000000000000000000 ")
_correct_header1 = ("HEADER RECORD*******MEMBER HEADER RECORD!!!!!!!"
"000000000000000001600000000")
_correct_header2 = ("HEADER RECORD*******DSCRPTR HEADER RECORD!!!!!!!"
"000000000000000000000000000000 ")
_correct_obs_header = ("HEADER RECORD*******OBS HEADER RECORD!!!!!!!"
"000000000000000000000000000000 ")
_fieldkeys = ['ntype', 'nhfun', 'field_length', 'nvar0', 'name', 'label',
'nform', 'nfl', 'num_decimals', 'nfj', 'nfill', 'niform',
'nifl', 'nifd', 'npos', '_']
_base_params_doc = """\
Parameters
----------
filepath_or_buffer : string or file-like object
Path to SAS file or object implementing binary read method."""
_params2_doc = """\
index : identifier of index column
Identifier of column that should be used as index of the DataFrame.
encoding : string
Encoding for text data.
chunksize : int
Read file `chunksize` lines at a time, returns iterator."""
_format_params_doc = """\
format : string
File format, only `xport` is currently supported."""
_iterator_doc = """\
iterator : boolean, default False
Return XportReader object for reading file incrementally."""
_read_sas_doc = """Read a SAS file into a DataFrame.
%(_base_params_doc)s
%(_format_params_doc)s
%(_params2_doc)s
%(_iterator_doc)s
Returns
-------
DataFrame or XportReader
Examples
--------
Read a SAS Xport file:
>>> df = pd.read_sas('filename.XPT')
Read a Xport file in 10,000 line chunks:
>>> itr = pd.read_sas('filename.XPT', chunksize=10000)
>>> for chunk in itr:
>>> do_something(chunk)
""" % {"_base_params_doc": _base_params_doc,
"_format_params_doc": _format_params_doc,
"_params2_doc": _params2_doc,
"_iterator_doc": _iterator_doc}
_xport_reader_doc = """\
Class for reading SAS Xport files.
%(_base_params_doc)s
%(_params2_doc)s
Attributes
----------
member_info : list
Contains information about the file
fields : list
Contains information about the variables in the file
""" % {"_base_params_doc": _base_params_doc,
"_params2_doc": _params2_doc}
_read_method_doc = """\
Read observations from SAS Xport file, returning as data frame.
Parameters
----------
nrows : int
Number of rows to read from data file; if None, read whole
file.
Returns
-------
A DataFrame.
"""
def _parse_date(datestr):
""" Given a date in xport format, return Python date. """
try:
# e.g. "16FEB11:10:07:55"
return datetime.strptime(datestr, "%d%b%y:%H:%M:%S")
except ValueError:
return pd.NaT
def _split_line(s, parts):
"""
Parameters
----------
s: string
Fixed-length string to split
parts: list of (name, length) pairs
Used to break up string, name '_' will be filtered from output.
Returns
-------
Dict of name:contents of string at given location.
"""
out = {}
start = 0
for name, length in parts:
out[name] = s[start:start + length].strip()
start += length
del out['_']
return out
def _handle_truncated_float_vec(vec, nbytes):
# This feature is not well documented, but some SAS XPORT files
# have 2-7 byte "truncated" floats. To read these truncated
# floats, pad them with zeros on the right to make 8 byte floats.
#
# References:
# https://github.com/jcushman/xport/pull/3
# The R "foreign" library
if nbytes != 8:
vec1 = np.zeros(len(vec), np.dtype('S8'))
dtype = np.dtype('S%d,S%d' % (nbytes, 8 - nbytes))
vec2 = vec1.view(dtype=dtype)
vec2['f0'] = vec
return vec2
return vec
def _parse_float_vec(vec):
"""
Parse a vector of float values representing IBM 8 byte floats into
native 8 byte floats.
"""
dtype = np.dtype('>u4,>u4')
vec1 = vec.view(dtype=dtype)
xport1 = vec1['f0']
xport2 = vec1['f1']
# Start by setting first half of ieee number to first half of IBM
# number sans exponent
ieee1 = xport1 & 0x00ffffff
# Get the second half of the ibm number into the second half of
# the ieee number
ieee2 = xport2
# The fraction bit to the left of the binary point in the ieee
# format was set and the number was shifted 0, 1, 2, or 3
# places. This will tell us how to adjust the ibm exponent to be a
# power of 2 ieee exponent and how to shift the fraction bits to
# restore the correct magnitude.
shift = np.zeros(len(vec), dtype=np.uint8)
shift[np.where(xport1 & 0x00200000)] = 1
shift[np.where(xport1 & 0x00400000)] = 2
shift[np.where(xport1 & 0x00800000)] = 3
# shift the ieee number down the correct number of places then
# set the second half of the ieee number to be the second half
# of the ibm number shifted appropriately, ored with the bits
# from the first half that would have been shifted in if we
# could shift a double. All we are worried about are the low
# order 3 bits of the first half since we're only shifting by
# 1, 2, or 3.
ieee1 >>= shift
ieee2 = (xport2 >> shift) | ((xport1 & 0x00000007) << (29 + (3 - shift)))
# clear the 1 bit to the left of the binary point
ieee1 &= 0xffefffff
# set the exponent of the ieee number to be the actual exponent
# plus the shift count + 1023. Or this into the first half of the
# ieee number. The ibm exponent is excess 64 but is adjusted by 65
# since during conversion to ibm format the exponent is
# incremented by 1 and the fraction bits left 4 positions to the
# right of the radix point. (had to add >> 24 because C treats &
# 0x7f as 0x7f000000 and Python doesn't)
ieee1 |= ((((((xport1 >> 24) & 0x7f) - 65) << 2) +
shift + 1023) << 20) | (xport1 & 0x80000000)
ieee = np.empty((len(ieee1),), dtype='>u4,>u4')
ieee['f0'] = ieee1
ieee['f1'] = ieee2
ieee = ieee.view(dtype='>f8')
ieee = ieee.astype('f8')
return ieee
class XportReader(BaseIterator):
__doc__ = _xport_reader_doc
def __init__(self, filepath_or_buffer, index=None, encoding='ISO-8859-1',
chunksize=None):
self._encoding = encoding
self._lines_read = 0
self._index = index
self._chunksize = chunksize
if isinstance(filepath_or_buffer, str):
(filepath_or_buffer, encoding,
compression, should_close) = get_filepath_or_buffer(
filepath_or_buffer, encoding=encoding)
if isinstance(filepath_or_buffer, (str, compat.text_type, bytes)):
self.filepath_or_buffer = open(filepath_or_buffer, 'rb')
else:
# Copy to BytesIO, and ensure no encoding
contents = filepath_or_buffer.read()
try:
contents = contents.encode(self._encoding)
except:
pass
self.filepath_or_buffer = compat.BytesIO(contents)
self._read_header()
def close(self):
self.filepath_or_buffer.close()
def _get_row(self):
return self.filepath_or_buffer.read(80).decode()
def _read_header(self):
self.filepath_or_buffer.seek(0)
# read file header
line1 = self._get_row()
if line1 != _correct_line1:
self.close()
raise ValueError("Header record is not an XPORT file.")
line2 = self._get_row()
fif = [['prefix', 24], ['version', 8], ['OS', 8],
['_', 24], ['created', 16]]
file_info = _split_line(line2, fif)
if file_info['prefix'] != "SAS SAS SASLIB":
self.close()
raise ValueError("Header record has invalid prefix.")
file_info['created'] = _parse_date(file_info['created'])
self.file_info = file_info
line3 = self._get_row()
file_info['modified'] = _parse_date(line3[:16])
# read member header
header1 = self._get_row()
header2 = self._get_row()
headflag1 = header1.startswith(_correct_header1)
headflag2 = (header2 == _correct_header2)
if not (headflag1 and headflag2):
self.close()
raise ValueError("Member header not found")
# usually 140, could be 135
fieldnamelength = int(header1[-5:-2])
# member info
mem = [['prefix', 8], ['set_name', 8], ['sasdata', 8],
['version', 8], ['OS', 8], ['_', 24], ['created', 16]]
member_info = _split_line(self._get_row(), mem)
mem = [['modified', 16], ['_', 16], ['label', 40], ['type', 8]]
member_info.update(_split_line(self._get_row(), mem))
member_info['modified'] = _parse_date(member_info['modified'])
member_info['created'] = _parse_date(member_info['created'])
self.member_info = member_info
# read field names
types = {1: 'numeric', 2: 'char'}
fieldcount = int(self._get_row()[54:58])
datalength = fieldnamelength * fieldcount
# round up to nearest 80
if datalength % 80:
datalength += 80 - datalength % 80
fielddata = self.filepath_or_buffer.read(datalength)
fields = []
obs_length = 0
while len(fielddata) >= fieldnamelength:
# pull data for one field
field, fielddata = (fielddata[:fieldnamelength],
fielddata[fieldnamelength:])
# rest at end gets ignored, so if field is short, pad out
# to match struct pattern below
field = field.ljust(140)
fieldstruct = struct.unpack('>hhhh8s40s8shhh2s8shhl52s', field)
field = dict(zip(_fieldkeys, fieldstruct))
del field['_']
field['ntype'] = types[field['ntype']]
fl = field['field_length']
if field['ntype'] == 'numeric' and ((fl < 2) or (fl > 8)):
self.close()
msg = "Floating field width {0} is not between 2 and 8."
raise TypeError(msg.format(fl))
for k, v in field.items():
try:
field[k] = v.strip()
except AttributeError:
pass
obs_length += field['field_length']
fields += [field]
header = self._get_row()
if not header == _correct_obs_header:
self.close()
raise ValueError("Observation header not found.")
self.fields = fields
self.record_length = obs_length
self.record_start = self.filepath_or_buffer.tell()
self.nobs = self._record_count()
self.columns = [x['name'].decode() for x in self.fields]
# Setup the dtype.
dtypel = []
for i, field in enumerate(self.fields):
dtypel.append(('s' + str(i), "S" + str(field['field_length'])))
dtype = np.dtype(dtypel)
self._dtype = dtype
def __next__(self):
return self.read(nrows=self._chunksize or 1)
def _record_count(self):
"""
Get number of records in file.
This is maybe suboptimal because we have to seek to the end of
the file.
Side effect: returns file position to record_start.
"""
self.filepath_or_buffer.seek(0, 2)
total_records_length = (self.filepath_or_buffer.tell() -
self.record_start)
if total_records_length % 80 != 0:
warnings.warn("xport file may be corrupted")
if self.record_length > 80:
self.filepath_or_buffer.seek(self.record_start)
return total_records_length // self.record_length
self.filepath_or_buffer.seek(-80, 2)
last_card = self.filepath_or_buffer.read(80)
last_card = np.frombuffer(last_card, dtype=np.uint64)
# 8 byte blank
ix = np.flatnonzero(last_card == 2314885530818453536)
if len(ix) == 0:
tail_pad = 0
else:
tail_pad = 8 * len(ix)
self.filepath_or_buffer.seek(self.record_start)
return (total_records_length - tail_pad) // self.record_length
def get_chunk(self, size=None):
"""
Reads lines from Xport file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
def _missing_double(self, vec):
v = vec.view(dtype='u1,u1,u2,u4')
miss = (v['f1'] == 0) & (v['f2'] == 0) & (v['f3'] == 0)
miss1 = (((v['f0'] >= 0x41) & (v['f0'] <= 0x5a)) |
(v['f0'] == 0x5f) | (v['f0'] == 0x2e))
miss &= miss1
return miss
@Appender(_read_method_doc)
def read(self, nrows=None):
if nrows is None:
nrows = self.nobs
read_lines = min(nrows, self.nobs - self._lines_read)
read_len = read_lines * self.record_length
if read_len <= 0:
self.close()
raise StopIteration
raw = self.filepath_or_buffer.read(read_len)
data = np.frombuffer(raw, dtype=self._dtype, count=read_lines)
df = pd.DataFrame(index=range(read_lines))
for j, x in enumerate(self.columns):
vec = data['s%d' % j]
ntype = self.fields[j]['ntype']
if ntype == "numeric":
vec = _handle_truncated_float_vec(
vec, self.fields[j]['field_length'])
miss = self._missing_double(vec)
v = _parse_float_vec(vec)
v[miss] = np.nan
elif self.fields[j]['ntype'] == 'char':
v = [y.rstrip() for y in vec]
if compat.PY3:
if self._encoding is not None:
v = [y.decode(self._encoding) for y in v]
df[x] = v
if self._index is None:
df.index = range(self._lines_read, self._lines_read + read_lines)
else:
df = df.set_index(self._index)
self._lines_read += read_lines
return df
|
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from oslo_utils import units
from oslo_utils import uuidutils
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
from oslo_vmware import vim_util as vutil
import six
from nova.compute import power_state
from nova import context
from nova import exception
from nova.network import model as network_model
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
import nova.tests.unit.image.fake
from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
from nova.tests.unit.virt.vmwareapi import stubs
from nova import version
from nova.virt import hardware
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import images
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
class DsPathMatcher(object):
def __init__(self, expected_ds_path_str):
self.expected_ds_path_str = expected_ds_path_str
def __eq__(self, ds_path_param):
return str(ds_path_param) == self.expected_ds_path_str
class VMwareVMOpsTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVMOpsTestCase, self).setUp()
vmwareapi_fake.reset()
stubs.set_stubs(self.stubs)
self.flags(image_cache_subdirectory_name='vmware_base',
my_ip='',
flat_injected=True,
vnc_enabled=True)
self._context = context.RequestContext('fake_user', 'fake_project')
self._session = driver.VMwareAPISession()
self._virtapi = mock.Mock()
self._image_id = nova.tests.unit.image.fake.get_valid_image_id()
fake_ds_ref = vmwareapi_fake.ManagedObjectReference('fake-ds')
self._ds = ds_obj.Datastore(
ref=fake_ds_ref, name='fake_ds',
capacity=10 * units.Gi,
freespace=10 * units.Gi)
self._dc_info = vmops.DcInfo(
ref='fake_dc_ref', name='fake_dc',
vmFolder='fake_vm_folder')
cluster = vmwareapi_fake.create_cluster('fake_cluster', fake_ds_ref)
self._instance_values = {
'display_name': 'fake_display_name',
'name': 'fake_name',
'uuid': 'fake_uuid',
'vcpus': 1,
'memory_mb': 512,
'image_ref': self._image_id,
'root_gb': 10,
'node': '%s(%s)' % (cluster.mo_id, cluster.name),
'expected_attrs': ['system_metadata'],
}
self._instance = fake_instance.fake_instance_obj(
self._context, **self._instance_values)
self._flavor = objects.Flavor(name='m1.small', memory_mb=512, vcpus=1,
root_gb=10, ephemeral_gb=0, swap=0,
extra_specs={})
self._instance.flavor = self._flavor
self._vmops = vmops.VMwareVMOps(self._session, self._virtapi, None,
cluster=cluster.obj)
self._cluster = cluster
subnet_4 = network_model.Subnet(cidr='192.168.0.1/24',
dns=[network_model.IP('192.168.0.1')],
gateway=
network_model.IP('192.168.0.1'),
ips=[
network_model.IP('192.168.0.100')],
routes=None)
subnet_6 = network_model.Subnet(cidr='dead:beef::1/64',
dns=None,
gateway=
network_model.IP('dead:beef::1'),
ips=[network_model.IP(
'dead:beef::dcad:beff:feef:0')],
routes=None)
network = network_model.Network(id=0,
bridge='fa0',
label='fake',
subnets=[subnet_4, subnet_6],
vlan=None,
bridge_interface=None,
injected=True)
self._network_values = {
'id': None,
'address': 'DE:AD:BE:EF:00:00',
'network': network,
'type': None,
'devname': None,
'ovs_interfaceid': None,
'rxtx_cap': 3
}
self.network_info = network_model.NetworkInfo([
network_model.VIF(**self._network_values)
])
pure_IPv6_network = network_model.Network(id=0,
bridge='fa0',
label='fake',
subnets=[subnet_6],
vlan=None,
bridge_interface=None,
injected=True)
self.pure_IPv6_network_info = network_model.NetworkInfo([
network_model.VIF(id=None,
address='DE:AD:BE:EF:00:00',
network=pure_IPv6_network,
type=None,
devname=None,
ovs_interfaceid=None,
rxtx_cap=3)
])
self._metadata = (
"name:fake_display_name\n"
"userid:fake_user\n"
"username:None\n"
"projectid:fake_project\n"
"projectname:None\n"
"flavor:name:m1.micro\n"
"flavor:memory_mb:6\n"
"flavor:vcpus:28\n"
"flavor:ephemeral_gb:8128\n"
"flavor:root_gb:496\n"
"flavor:swap:33550336\n"
"imageid:70a599e0-31e7-49b7-b260-868f441e862b\n"
"package:%s\n" % version.version_string_with_package())
def test_get_machine_id_str(self):
result = vmops.VMwareVMOps._get_machine_id_str(self.network_info)
self.assertEqual('DE:AD:BE:EF:00:00;192.168.0.100;255.255.255.0;'
'192.168.0.1;192.168.0.255;192.168.0.1#', result)
result = vmops.VMwareVMOps._get_machine_id_str(
self.pure_IPv6_network_info)
self.assertEqual('DE:AD:BE:EF:00:00;;;;;#', result)
def _setup_create_folder_mocks(self):
ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
base_name = 'folder'
ds_name = "datastore"
ds_ref = mock.Mock()
ds_ref.value = 1
dc_ref = mock.Mock()
ops._datastore_dc_mapping[ds_ref.value] = vmops.DcInfo(
ref=dc_ref,
name='fake-name',
vmFolder='fake-folder')
path = ds_obj.DatastorePath(ds_name, base_name)
return ds_name, ds_ref, ops, path, dc_ref
@mock.patch.object(ds_util, 'mkdir')
def test_create_folder_if_missing(self, mock_mkdir):
ds_name, ds_ref, ops, path, dc = self._setup_create_folder_mocks()
ops._create_folder_if_missing(ds_name, ds_ref, 'folder')
mock_mkdir.assert_called_with(ops._session, path, dc)
@mock.patch.object(ds_util, 'mkdir')
def test_create_folder_if_missing_exception(self, mock_mkdir):
ds_name, ds_ref, ops, path, dc = self._setup_create_folder_mocks()
ds_util.mkdir.side_effect = vexc.FileAlreadyExistsException()
ops._create_folder_if_missing(ds_name, ds_ref, 'folder')
mock_mkdir.assert_called_with(ops._session, path, dc)
@mock.patch.object(vutil, 'continue_retrieval', return_value=None)
def test_get_valid_vms_from_retrieve_result(self, _mock_cont):
ops = vmops.VMwareVMOps(self._session, mock.Mock(), mock.Mock())
fake_objects = vmwareapi_fake.FakeRetrieveResult()
fake_objects.add_object(vmwareapi_fake.VirtualMachine(
name=uuidutils.generate_uuid()))
fake_objects.add_object(vmwareapi_fake.VirtualMachine(
name=uuidutils.generate_uuid()))
fake_objects.add_object(vmwareapi_fake.VirtualMachine(
name=uuidutils.generate_uuid()))
vms = ops._get_valid_vms_from_retrieve_result(fake_objects)
self.assertEqual(3, len(vms))
@mock.patch.object(vutil, 'continue_retrieval', return_value=None)
def test_get_valid_vms_from_retrieve_result_with_invalid(self,
_mock_cont):
ops = vmops.VMwareVMOps(self._session, mock.Mock(), mock.Mock())
fake_objects = vmwareapi_fake.FakeRetrieveResult()
fake_objects.add_object(vmwareapi_fake.VirtualMachine(
name=uuidutils.generate_uuid()))
invalid_vm1 = vmwareapi_fake.VirtualMachine(
name=uuidutils.generate_uuid())
invalid_vm1.set('runtime.connectionState', 'orphaned')
invalid_vm2 = vmwareapi_fake.VirtualMachine(
name=uuidutils.generate_uuid())
invalid_vm2.set('runtime.connectionState', 'inaccessible')
fake_objects.add_object(invalid_vm1)
fake_objects.add_object(invalid_vm2)
vms = ops._get_valid_vms_from_retrieve_result(fake_objects)
self.assertEqual(1, len(vms))
def test_delete_vm_snapshot(self):
def fake_call_method(module, method, *args, **kwargs):
self.assertEqual('RemoveSnapshot_Task', method)
self.assertEqual('fake_vm_snapshot', args[0])
self.assertFalse(kwargs['removeChildren'])
self.assertTrue(kwargs['consolidate'])
return 'fake_remove_snapshot_task'
with contextlib.nested(
mock.patch.object(self._session, '_wait_for_task'),
mock.patch.object(self._session, '_call_method', fake_call_method)
) as (_wait_for_task, _call_method):
self._vmops._delete_vm_snapshot(self._instance,
"fake_vm_ref", "fake_vm_snapshot")
_wait_for_task.assert_has_calls([
mock.call('fake_remove_snapshot_task')])
def test_create_vm_snapshot(self):
method_list = ['CreateSnapshot_Task', 'get_dynamic_property']
def fake_call_method(module, method, *args, **kwargs):
expected_method = method_list.pop(0)
self.assertEqual(expected_method, method)
if (expected_method == 'CreateSnapshot_Task'):
self.assertEqual('fake_vm_ref', args[0])
self.assertFalse(kwargs['memory'])
self.assertTrue(kwargs['quiesce'])
return 'fake_snapshot_task'
elif (expected_method == 'get_dynamic_property'):
task_info = mock.Mock()
task_info.result = "fake_snapshot_ref"
self.assertEqual(('fake_snapshot_task', 'Task', 'info'), args)
return task_info
with contextlib.nested(
mock.patch.object(self._session, '_wait_for_task'),
mock.patch.object(self._session, '_call_method', fake_call_method)
) as (_wait_for_task, _call_method):
snap = self._vmops._create_vm_snapshot(self._instance,
"fake_vm_ref")
self.assertEqual("fake_snapshot_ref", snap)
_wait_for_task.assert_has_calls([
mock.call('fake_snapshot_task')])
def test_update_instance_progress(self):
with mock.patch.object(self._instance, 'save') as mock_save:
self._vmops._update_instance_progress(self._instance._context,
self._instance, 5, 10)
mock_save.assert_called_once_with()
self.assertEqual(50, self._instance.progress)
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake_ref')
def test_get_info(self, mock_get_vm_ref):
props = ['summary.config.numCpu', 'summary.config.memorySizeMB',
'runtime.powerState']
prop_cpu = vmwareapi_fake.Prop(props[0], 4)
prop_mem = vmwareapi_fake.Prop(props[1], 128)
prop_state = vmwareapi_fake.Prop(props[2], 'poweredOn')
prop_list = [prop_state, prop_mem, prop_cpu]
obj_content = vmwareapi_fake.ObjectContent(None, prop_list=prop_list)
result = vmwareapi_fake.FakeRetrieveResult()
result.add_object(obj_content)
def mock_call_method(module, method, *args, **kwargs):
if method == 'continue_retrieval':
return
return result
with mock.patch.object(self._session, '_call_method',
mock_call_method):
info = self._vmops.get_info(self._instance)
mock_get_vm_ref.assert_called_once_with(self._session,
self._instance)
expected = hardware.InstanceInfo(state=power_state.RUNNING,
max_mem_kb=128 * 1024,
mem_kb=128 * 1024,
num_cpu=4)
self.assertEqual(expected, info)
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake_ref')
def test_get_info_when_ds_unavailable(self, mock_get_vm_ref):
props = ['summary.config.numCpu', 'summary.config.memorySizeMB',
'runtime.powerState']
prop_state = vmwareapi_fake.Prop(props[2], 'poweredOff')
# when vm's ds not available, only power state can be received
prop_list = [prop_state]
obj_content = vmwareapi_fake.ObjectContent(None, prop_list=prop_list)
result = vmwareapi_fake.FakeRetrieveResult()
result.add_object(obj_content)
def mock_call_method(module, method, *args, **kwargs):
if method == 'continue_retrieval':
return
return result
with mock.patch.object(self._session, '_call_method',
mock_call_method):
info = self._vmops.get_info(self._instance)
mock_get_vm_ref.assert_called_once_with(self._session,
self._instance)
self.assertEqual(hardware.InstanceInfo(state=power_state.SHUTDOWN),
info)
def _test_get_datacenter_ref_and_name(self, ds_ref_exists=False):
instance_ds_ref = mock.Mock()
instance_ds_ref.value = "ds-1"
_vcvmops = vmops.VMwareVMOps(self._session, None, None)
if ds_ref_exists:
ds_ref = mock.Mock()
ds_ref.value = "ds-1"
else:
ds_ref = None
self._continue_retrieval = True
self._fake_object1 = vmwareapi_fake.FakeRetrieveResult()
self._fake_object2 = vmwareapi_fake.FakeRetrieveResult()
def fake_call_method(module, method, *args, **kwargs):
self._fake_object1.add_object(vmwareapi_fake.Datacenter(
ds_ref=ds_ref))
if not ds_ref:
# Token is set for the fake_object1, so it will continue to
# fetch the next object.
setattr(self._fake_object1, 'token', 'token-0')
if self._continue_retrieval:
if self._continue_retrieval:
self._continue_retrieval = False
self._fake_object2.add_object(
vmwareapi_fake.Datacenter())
return self._fake_object2
return
if method == "continue_retrieval":
return
return self._fake_object1
with mock.patch.object(self._session, '_call_method',
side_effect=fake_call_method) as fake_call:
dc_info = _vcvmops.get_datacenter_ref_and_name(instance_ds_ref)
if ds_ref:
self.assertEqual(1, len(_vcvmops._datastore_dc_mapping))
calls = [mock.call(vim_util, "get_objects", "Datacenter",
["name", "datastore", "vmFolder"]),
mock.call(vutil, 'continue_retrieval',
self._fake_object1)]
fake_call.assert_has_calls(calls)
self.assertEqual("ha-datacenter", dc_info.name)
else:
calls = [mock.call(vim_util, "get_objects", "Datacenter",
["name", "datastore", "vmFolder"]),
mock.call(vutil, 'continue_retrieval',
self._fake_object2)]
fake_call.assert_has_calls(calls)
self.assertIsNone(dc_info)
def test_get_datacenter_ref_and_name(self):
self._test_get_datacenter_ref_and_name(ds_ref_exists=True)
def test_get_datacenter_ref_and_name_with_no_datastore(self):
self._test_get_datacenter_ref_and_name()
@mock.patch.object(vm_util, 'power_off_instance')
@mock.patch.object(ds_util, 'disk_copy')
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
@mock.patch.object(vm_util, 'get_values_from_object_properties')
@mock.patch.object(vm_util, 'find_rescue_device')
@mock.patch.object(vm_util, 'get_vm_boot_spec')
@mock.patch.object(vm_util, 'reconfigure_vm')
@mock.patch.object(vm_util, 'power_on_instance')
@mock.patch.object(ds_obj, 'get_datastore_by_ref')
def test_rescue(self, mock_get_ds_by_ref, mock_power_on, mock_reconfigure,
mock_get_boot_spec, mock_find_rescue,
mock_get_values, mock_get_vm_ref, mock_disk_copy,
mock_power_off):
_volumeops = mock.Mock()
self._vmops._volumeops = _volumeops
ds = ds_obj.Datastore('fake-ref', 'ds1')
mock_get_ds_by_ref.return_value = ds
mock_find_rescue.return_value = 'fake-rescue-device'
mock_get_boot_spec.return_value = 'fake-boot-spec'
device = vmwareapi_fake.DataObject()
backing = vmwareapi_fake.DataObject()
backing.datastore = ds.ref
device.backing = backing
vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk',
'fake-adapter',
'fake-disk',
'fake-capacity',
device)
with contextlib.nested(
mock.patch.object(self._vmops, 'get_datacenter_ref_and_name'),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk)
) as (_get_dc_ref_and_name, fake_vmdk_info):
dc_info = mock.Mock()
_get_dc_ref_and_name.return_value = dc_info
self._vmops.rescue(self._context, self._instance, None, None)
mock_power_off.assert_called_once_with(self._session,
self._instance,
'fake-ref')
uuid = self._instance.image_ref
cache_path = ds.build_path('vmware_base', uuid, uuid + '.vmdk')
rescue_path = ds.build_path('fake_uuid', uuid + '-rescue.vmdk')
mock_disk_copy.assert_called_once_with(self._session, dc_info.ref,
cache_path, rescue_path)
_volumeops.attach_disk_to_vm.assert_called_once_with('fake-ref',
self._instance, mock.ANY, mock.ANY, rescue_path)
mock_get_boot_spec.assert_called_once_with(mock.ANY,
'fake-rescue-device')
mock_reconfigure.assert_called_once_with(self._session,
'fake-ref',
'fake-boot-spec')
mock_power_on.assert_called_once_with(self._session,
self._instance,
vm_ref='fake-ref')
def test_unrescue_power_on(self):
self._test_unrescue(True)
def test_unrescue_power_off(self):
self._test_unrescue(False)
def _test_unrescue(self, power_on):
_volumeops = mock.Mock()
self._vmops._volumeops = _volumeops
vm_ref = mock.Mock()
def fake_call_method(module, method, *args, **kwargs):
expected_args = (vm_ref, 'VirtualMachine',
'config.hardware.device')
self.assertEqual('get_dynamic_property', method)
self.assertEqual(expected_args, args)
with contextlib.nested(
mock.patch.object(vm_util, 'power_on_instance'),
mock.patch.object(vm_util, 'find_rescue_device'),
mock.patch.object(vm_util, 'get_vm_ref', return_value=vm_ref),
mock.patch.object(self._session, '_call_method',
fake_call_method),
mock.patch.object(vm_util, 'power_off_instance')
) as (_power_on_instance, _find_rescue, _get_vm_ref,
_call_method, _power_off):
self._vmops.unrescue(self._instance, power_on=power_on)
if power_on:
_power_on_instance.assert_called_once_with(self._session,
self._instance, vm_ref=vm_ref)
else:
self.assertFalse(_power_on_instance.called)
_get_vm_ref.assert_called_once_with(self._session,
self._instance)
_power_off.assert_called_once_with(self._session, self._instance,
vm_ref)
_volumeops.detach_disk_from_vm.assert_called_once_with(
vm_ref, self._instance, mock.ANY, destroy_disk=True)
def _test_finish_migration(self, power_on=True, resize_instance=False):
with contextlib.nested(
mock.patch.object(self._vmops, '_resize_create_ephemerals'),
mock.patch.object(self._vmops, "_update_instance_progress"),
mock.patch.object(vm_util, "power_on_instance"),
mock.patch.object(vm_util, "get_vm_ref",
return_value='fake-ref')
) as (fake_resize_create_ephemerals, fake_update_instance_progress,
fake_power_on, fake_get_vm_ref):
self._vmops.finish_migration(context=self._context,
migration=None,
instance=self._instance,
disk_info=None,
network_info=None,
block_device_info=None,
resize_instance=resize_instance,
image_meta=None,
power_on=power_on)
fake_resize_create_ephemerals.called_once_with('fake-ref',
self._instance,
None)
if power_on:
fake_power_on.assert_called_once_with(self._session,
self._instance,
vm_ref='fake-ref')
else:
self.assertFalse(fake_power_on.called)
calls = [
mock.call(self._context, self._instance, step=5,
total_steps=vmops.RESIZE_TOTAL_STEPS),
mock.call(self._context, self._instance, step=6,
total_steps=vmops.RESIZE_TOTAL_STEPS)]
fake_update_instance_progress.assert_has_calls(calls)
def test_finish_migration_power_on(self):
self._test_finish_migration(power_on=True, resize_instance=False)
def test_finish_migration_power_off(self):
self._test_finish_migration(power_on=False, resize_instance=False)
def test_finish_migration_power_on_resize(self):
self._test_finish_migration(power_on=True, resize_instance=True)
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(vmops.VMwareVMOps, '_resize_create_ephemerals')
@mock.patch.object(vmops.VMwareVMOps, '_remove_ephemerals')
@mock.patch.object(ds_util, 'disk_delete')
@mock.patch.object(ds_util, 'disk_move')
@mock.patch.object(ds_util, 'file_exists',
return_value=True)
@mock.patch.object(vmops.VMwareVMOps, '_get_ds_browser',
return_value='fake-browser')
@mock.patch.object(vm_util, 'reconfigure_vm')
@mock.patch.object(vm_util, 'get_vm_resize_spec',
return_value='fake-spec')
@mock.patch.object(vm_util, 'power_off_instance')
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
@mock.patch.object(vm_util, 'power_on_instance')
def _test_finish_revert_migration(self, fake_power_on,
fake_get_vm_ref, fake_power_off,
fake_resize_spec, fake_reconfigure_vm,
fake_get_browser,
fake_original_exists, fake_disk_move,
fake_disk_delete,
fake_remove_ephemerals,
fake_resize_create_ephemerals,
fake_get_extra_specs,
power_on):
"""Tests the finish_revert_migration method on vmops."""
datastore = ds_obj.Datastore(ref='fake-ref', name='fake')
device = vmwareapi_fake.DataObject()
backing = vmwareapi_fake.DataObject()
backing.datastore = datastore.ref
device.backing = backing
vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk',
'fake-adapter',
'fake-disk',
'fake-capacity',
device)
dc_info = vmops.DcInfo(ref='fake_ref', name='fake',
vmFolder='fake_folder')
extra_specs = vm_util.ExtraSpecs()
fake_get_extra_specs.return_value = extra_specs
with contextlib.nested(
mock.patch.object(self._vmops, 'get_datacenter_ref_and_name',
return_value=dc_info),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk)
) as (fake_get_dc_ref_and_name, fake_get_vmdk_info):
self._vmops._volumeops = mock.Mock()
mock_attach_disk = self._vmops._volumeops.attach_disk_to_vm
mock_detach_disk = self._vmops._volumeops.detach_disk_from_vm
self._vmops.finish_revert_migration(self._context,
instance=self._instance,
network_info=None,
block_device_info=None,
power_on=power_on)
fake_get_vm_ref.assert_called_once_with(self._session,
self._instance)
fake_power_off.assert_called_once_with(self._session,
self._instance,
'fake-ref')
# Validate VM reconfiguration
metadata = ('name:fake_display_name\n'
'userid:fake_user\n'
'username:None\n'
'projectid:fake_project\n'
'projectname:None\n'
'flavor:name:m1.small\n'
'flavor:memory_mb:512\n'
'flavor:vcpus:1\n'
'flavor:ephemeral_gb:0\n'
'flavor:root_gb:10\n'
'flavor:swap:0\n'
'imageid:70a599e0-31e7-49b7-b260-868f441e862b\n'
'package:%s\n' % version.version_string_with_package())
fake_resize_spec.assert_called_once_with(
self._session.vim.client.factory,
int(self._instance.vcpus),
int(self._instance.memory_mb),
extra_specs,
metadata=metadata)
fake_reconfigure_vm.assert_called_once_with(self._session,
'fake-ref',
'fake-spec')
# Validate disk configuration
fake_get_vmdk_info.assert_called_once_with(
self._session, 'fake-ref', uuid=self._instance.uuid)
fake_get_browser.assert_called_once_with('fake-ref')
fake_original_exists.assert_called_once_with(
self._session, 'fake-browser',
ds_obj.DatastorePath(datastore.name, 'uuid'),
'original.vmdk')
mock_detach_disk.assert_called_once_with('fake-ref',
self._instance,
device)
fake_disk_delete.assert_called_once_with(
self._session, dc_info.ref, '[fake] uuid/root.vmdk')
fake_disk_move.assert_called_once_with(
self._session, dc_info.ref,
'[fake] uuid/original.vmdk',
'[fake] uuid/root.vmdk')
mock_attach_disk.assert_called_once_with(
'fake-ref', self._instance, 'fake-adapter', 'fake-disk',
'[fake] uuid/root.vmdk')
fake_remove_ephemerals.called_once_with('fake-ref')
fake_resize_create_ephemerals.called_once_with('fake-ref',
self._instance,
None)
if power_on:
fake_power_on.assert_called_once_with(self._session,
self._instance)
else:
self.assertFalse(fake_power_on.called)
def test_finish_revert_migration_power_on(self):
self._test_finish_revert_migration(power_on=True)
def test_finish_revert_migration_power_off(self):
self._test_finish_revert_migration(power_on=False)
@mock.patch.object(vmops.VMwareVMOps, '_get_instance_metadata')
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(vm_util, 'reconfigure_vm')
@mock.patch.object(vm_util, 'get_vm_resize_spec',
return_value='fake-spec')
def test_resize_vm(self, fake_resize_spec, fake_reconfigure,
fake_get_extra_specs, fake_get_metadata):
extra_specs = vm_util.ExtraSpecs()
fake_get_extra_specs.return_value = extra_specs
fake_get_metadata.return_value = self._metadata
flavor = objects.Flavor(name='m1.small',
memory_mb=1024,
vcpus=2,
extra_specs={})
self._vmops._resize_vm(self._context, self._instance, 'vm-ref', flavor)
fake_resize_spec.assert_called_once_with(
self._session.vim.client.factory, 2, 1024, extra_specs,
metadata=self._metadata)
fake_reconfigure.assert_called_once_with(self._session,
'vm-ref', 'fake-spec')
@mock.patch.object(vmops.VMwareVMOps, '_extend_virtual_disk')
@mock.patch.object(ds_util, 'disk_move')
@mock.patch.object(ds_util, 'disk_copy')
def test_resize_disk(self, fake_disk_copy, fake_disk_move,
fake_extend):
datastore = ds_obj.Datastore(ref='fake-ref', name='fake')
device = vmwareapi_fake.DataObject()
backing = vmwareapi_fake.DataObject()
backing.datastore = datastore.ref
device.backing = backing
vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk',
'fake-adapter',
'fake-disk',
self._instance.root_gb * units.Gi,
device)
dc_info = vmops.DcInfo(ref='fake_ref', name='fake',
vmFolder='fake_folder')
with mock.patch.object(self._vmops, 'get_datacenter_ref_and_name',
return_value=dc_info) as fake_get_dc_ref_and_name:
self._vmops._volumeops = mock.Mock()
mock_attach_disk = self._vmops._volumeops.attach_disk_to_vm
mock_detach_disk = self._vmops._volumeops.detach_disk_from_vm
flavor = {'root_gb': self._instance.root_gb + 1}
self._vmops._resize_disk(self._instance, 'fake-ref', vmdk, flavor)
fake_get_dc_ref_and_name.assert_called_once_with(datastore.ref)
fake_disk_copy.assert_called_once_with(
self._session, dc_info.ref, '[fake] uuid/root.vmdk',
'[fake] uuid/resized.vmdk')
mock_detach_disk.assert_called_once_with('fake-ref',
self._instance,
device)
fake_extend.assert_called_once_with(
self._instance, flavor['root_gb'] * units.Mi,
'[fake] uuid/resized.vmdk', dc_info.ref)
calls = [
mock.call(self._session, dc_info.ref,
'[fake] uuid/root.vmdk',
'[fake] uuid/original.vmdk'),
mock.call(self._session, dc_info.ref,
'[fake] uuid/resized.vmdk',
'[fake] uuid/root.vmdk')]
fake_disk_move.assert_has_calls(calls)
mock_attach_disk.assert_called_once_with(
'fake-ref', self._instance, 'fake-adapter', 'fake-disk',
'[fake] uuid/root.vmdk')
@mock.patch.object(ds_util, 'disk_delete')
@mock.patch.object(ds_util, 'file_exists',
return_value=True)
@mock.patch.object(vmops.VMwareVMOps, '_get_ds_browser',
return_value='fake-browser')
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
def test_confirm_migration(self, fake_get_vm_ref, fake_get_browser,
fake_original_exists,
fake_disk_delete):
"""Tests the confirm_migration method on vmops."""
datastore = ds_obj.Datastore(ref='fake-ref', name='fake')
device = vmwareapi_fake.DataObject()
backing = vmwareapi_fake.DataObject()
backing.datastore = datastore.ref
device.backing = backing
vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk',
'fake-adapter',
'fake-disk',
'fake-capacity',
device)
dc_info = vmops.DcInfo(ref='fake_ref', name='fake',
vmFolder='fake_folder')
with contextlib.nested(
mock.patch.object(self._vmops, 'get_datacenter_ref_and_name',
return_value=dc_info),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk)
) as (fake_get_dc_ref_and_name, fake_get_vmdk_info):
self._vmops.confirm_migration(None,
self._instance,
None)
fake_get_vm_ref.assert_called_once_with(self._session,
self._instance)
fake_get_vmdk_info.assert_called_once_with(
self._session, 'fake-ref', uuid=self._instance.uuid)
fake_get_browser.assert_called_once_with('fake-ref')
fake_original_exists.assert_called_once_with(
self._session, 'fake-browser',
ds_obj.DatastorePath(datastore.name, 'uuid'),
'original.vmdk')
fake_disk_delete.assert_called_once_with(
self._session, dc_info.ref, '[fake] uuid/original.vmdk')
def test_migrate_disk_and_power_off(self):
self._test_migrate_disk_and_power_off(
flavor_root_gb=self._instance.root_gb + 1)
def test_migrate_disk_and_power_off_zero_disk_flavor(self):
self._instance.root_gb = 0
self._test_migrate_disk_and_power_off(flavor_root_gb=0)
def test_migrate_disk_and_power_off_disk_shrink(self):
self.assertRaises(exception.InstanceFaultRollback,
self._test_migrate_disk_and_power_off,
flavor_root_gb=self._instance.root_gb - 1)
@mock.patch.object(vmops.VMwareVMOps, "_remove_ephemerals")
@mock.patch.object(vm_util, 'get_vmdk_info')
@mock.patch.object(vmops.VMwareVMOps, "_resize_disk")
@mock.patch.object(vmops.VMwareVMOps, "_resize_vm")
@mock.patch.object(vm_util, 'power_off_instance')
@mock.patch.object(vmops.VMwareVMOps, "_update_instance_progress")
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
def _test_migrate_disk_and_power_off(self, fake_get_vm_ref, fake_progress,
fake_power_off, fake_resize_vm,
fake_resize_disk, fake_get_vmdk_info,
fake_remove_ephemerals,
flavor_root_gb):
vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk',
'fake-adapter',
'fake-disk',
self._instance.root_gb * units.Gi,
'fake-device')
fake_get_vmdk_info.return_value = vmdk
flavor = {'root_gb': flavor_root_gb}
self._vmops.migrate_disk_and_power_off(self._context,
self._instance,
None,
flavor)
fake_get_vm_ref.assert_called_once_with(self._session,
self._instance)
fake_power_off.assert_called_once_with(self._session,
self._instance,
'fake-ref')
fake_resize_vm.assert_called_once_with(self._context, self._instance,
'fake-ref', flavor)
fake_resize_disk.assert_called_once_with(self._instance, 'fake-ref',
vmdk, flavor)
calls = [mock.call(self._context, self._instance, step=i,
total_steps=vmops.RESIZE_TOTAL_STEPS)
for i in range(4)]
fake_progress.assert_has_calls(calls)
@mock.patch.object(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
@mock.patch.object(vmops.VMwareVMOps, '_create_config_drive')
def test_configure_config_drive(self,
mock_create_config_drive,
mock_attach_cdrom_to_vm):
injected_files = mock.Mock()
admin_password = mock.Mock()
vm_ref = mock.Mock()
mock_create_config_drive.return_value = "fake_iso_path"
self._vmops._configure_config_drive(
self._instance, vm_ref, self._dc_info, self._ds,
injected_files, admin_password)
upload_iso_path = self._ds.build_path("fake_iso_path")
mock_create_config_drive.assert_called_once_with(self._instance,
injected_files, admin_password, self._ds.name,
self._dc_info.name, self._instance.uuid, "Fake-CookieJar")
mock_attach_cdrom_to_vm.assert_called_once_with(
vm_ref, self._instance, self._ds.ref, str(upload_iso_path))
@mock.patch.object(vmops.LOG, 'debug')
@mock.patch.object(vmops.VMwareVMOps, '_fetch_image_if_missing')
@mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info')
@mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine')
@mock.patch.object(vmops.lockutils, 'lock')
def test_spawn_mask_block_device_info_password(self, mock_lock,
mock_build_virtual_machine, mock_get_vm_config_info,
mock_fetch_image_if_missing, mock_debug):
# Very simple test that just ensures block_device_info auth_password
# is masked when logged; the rest of the test just fails out early.
data = {'auth_password': 'scrubme'}
bdm = [{'boot_index': 0, 'disk_bus': constants.DEFAULT_ADAPTER_TYPE,
'connection_info': {'data': data}}]
bdi = {'block_device_mapping': bdm}
self.password_logged = False
# Tests that the parameters to the to_xml method are sanitized for
# passwords when logged.
def fake_debug(*args, **kwargs):
if 'auth_password' in args[0]:
self.password_logged = True
self.assertNotIn('scrubme', args[0])
mock_debug.side_effect = fake_debug
self.flags(flat_injected=False, vnc_enabled=False)
# Call spawn(). We don't care what it does as long as it generates
# the log message, which we check below.
with mock.patch.object(self._vmops, '_volumeops') as mock_vo:
mock_vo.attach_root_volume.side_effect = test.TestingException
try:
self._vmops.spawn(
self._context, self._instance, {},
injected_files=None, admin_password=None,
network_info=[], block_device_info=bdi
)
except test.TestingException:
pass
# Check that the relevant log message was generated, and therefore
# that we checked it was scrubbed
self.assertTrue(self.password_logged)
def _get_metadata(self, is_image_used=True):
if is_image_used:
image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
else:
image_id = None
return ("name:fake_display_name\n"
"userid:fake_user\n"
"username:None\n"
"projectid:fake_project\n"
"projectname:None\n"
"flavor:name:m1.small\n"
"flavor:memory_mb:512\n"
"flavor:vcpus:1\n"
"flavor:ephemeral_gb:0\n"
"flavor:root_gb:10\n"
"flavor:swap:0\n"
"imageid:%(image_id)s\n"
"package:%(version)s\n" % {
'image_id': image_id,
'version': version.version_string_with_package()})
@mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
@mock.patch.object(vmops.VMwareVMOps, '_use_disk_image_as_linked_clone')
@mock.patch.object(vmops.VMwareVMOps, '_fetch_image_if_missing')
@mock.patch(
'nova.virt.vmwareapi.imagecache.ImageCacheManager.enlist_image')
@mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine')
@mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info')
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_non_root_block_device(self, from_image,
get_extra_specs,
get_vm_config_info,
build_virtual_machine,
enlist_image, fetch_image,
use_disk_image,
power_on_instance):
self._instance.flavor = self._flavor
extra_specs = get_extra_specs.return_value
connection_info1 = {'data': 'fake-data1', 'serial': 'volume-fake-id1'}
connection_info2 = {'data': 'fake-data2', 'serial': 'volume-fake-id2'}
bdm = [{'connection_info': connection_info1,
'disk_bus': constants.ADAPTER_TYPE_IDE,
'mount_device': '/dev/sdb'},
{'connection_info': connection_info2,
'disk_bus': constants.DEFAULT_ADAPTER_TYPE,
'mount_device': '/dev/sdc'}]
bdi = {'block_device_mapping': bdm, 'root_device_name': '/dev/sda'}
self.flags(flat_injected=False, vnc_enabled=False)
image_size = (self._instance.root_gb) * units.Gi / 2
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=image_size)
vi = get_vm_config_info.return_value
from_image.return_value = image_info
build_virtual_machine.return_value = 'fake-vm-ref'
with mock.patch.object(self._vmops, '_volumeops') as volumeops:
self._vmops.spawn(self._context, self._instance, {},
injected_files=None, admin_password=None,
network_info=[], block_device_info=bdi)
from_image.assert_called_once_with(self._instance.image_ref, {})
get_vm_config_info.assert_called_once_with(self._instance,
image_info, extra_specs.storage_policy)
build_virtual_machine.assert_called_once_with(self._instance,
image_info, vi.dc_info, vi.datastore, [],
extra_specs, self._get_metadata())
enlist_image.assert_called_once_with(image_info.image_id,
vi.datastore, vi.dc_info.ref)
fetch_image.assert_called_once_with(self._context, vi)
use_disk_image.assert_called_once_with('fake-vm-ref', vi)
volumeops.attach_volume.assert_any_call(
connection_info1, self._instance, constants.ADAPTER_TYPE_IDE)
volumeops.attach_volume.assert_any_call(
connection_info2, self._instance,
constants.DEFAULT_ADAPTER_TYPE)
@mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
@mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine')
@mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info')
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_with_no_image_and_block_devices(self, from_image,
get_extra_specs,
get_vm_config_info,
build_virtual_machine,
power_on_instance):
self._instance.image_ref = None
self._instance.flavor = self._flavor
extra_specs = get_extra_specs.return_value
connection_info1 = {'data': 'fake-data1', 'serial': 'volume-fake-id1'}
connection_info2 = {'data': 'fake-data2', 'serial': 'volume-fake-id2'}
connection_info3 = {'data': 'fake-data3', 'serial': 'volume-fake-id3'}
bdm = [{'boot_index': 0,
'connection_info': connection_info1,
'disk_bus': constants.ADAPTER_TYPE_IDE},
{'boot_index': 1,
'connection_info': connection_info2,
'disk_bus': constants.DEFAULT_ADAPTER_TYPE},
{'boot_index': 2,
'connection_info': connection_info3,
'disk_bus': constants.ADAPTER_TYPE_LSILOGICSAS}]
bdi = {'block_device_mapping': bdm}
self.flags(flat_injected=False, vnc_enabled=False)
image_info = mock.sentinel.image_info
vi = get_vm_config_info.return_value
from_image.return_value = image_info
build_virtual_machine.return_value = 'fake-vm-ref'
with mock.patch.object(self._vmops, '_volumeops') as volumeops:
self._vmops.spawn(self._context, self._instance, {},
injected_files=None, admin_password=None,
network_info=[], block_device_info=bdi)
from_image.assert_called_once_with(self._instance.image_ref, {})
get_vm_config_info.assert_called_once_with(self._instance,
image_info, extra_specs.storage_policy)
build_virtual_machine.assert_called_once_with(self._instance,
image_info, vi.dc_info, vi.datastore, [],
extra_specs, self._get_metadata(is_image_used=False))
volumeops.attach_root_volume.assert_called_once_with(
connection_info1, self._instance, vi.datastore.ref,
constants.ADAPTER_TYPE_IDE)
volumeops.attach_volume.assert_any_call(
connection_info2, self._instance,
constants.DEFAULT_ADAPTER_TYPE)
volumeops.attach_volume.assert_any_call(
connection_info3, self._instance,
constants.ADAPTER_TYPE_LSILOGICSAS)
@mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
@mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine')
@mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info')
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_unsupported_hardware(self, from_image,
get_extra_specs,
get_vm_config_info,
build_virtual_machine,
power_on_instance):
self._instance.image_ref = None
self._instance.flavor = self._flavor
extra_specs = get_extra_specs.return_value
connection_info = {'data': 'fake-data', 'serial': 'volume-fake-id'}
bdm = [{'boot_index': 0,
'connection_info': connection_info,
'disk_bus': 'invalid_adapter_type'}]
bdi = {'block_device_mapping': bdm}
self.flags(flat_injected=False, vnc_enabled=False)
image_info = mock.sentinel.image_info
vi = get_vm_config_info.return_value
from_image.return_value = image_info
build_virtual_machine.return_value = 'fake-vm-ref'
self.assertRaises(exception.UnsupportedHardware, self._vmops.spawn,
self._context, self._instance, {},
injected_files=None,
admin_password=None, network_info=[],
block_device_info=bdi)
from_image.assert_called_once_with(self._instance.image_ref, {})
get_vm_config_info.assert_called_once_with(
self._instance, image_info, extra_specs.storage_policy)
build_virtual_machine.assert_called_once_with(self._instance,
image_info, vi.dc_info, vi.datastore, [],
extra_specs, self._get_metadata(is_image_used=False))
def test_get_ds_browser(self):
cache = self._vmops._datastore_browser_mapping
ds_browser = mock.Mock()
moref = vmwareapi_fake.ManagedObjectReference('datastore-100')
self.assertIsNone(cache.get(moref.value))
mock_call_method = mock.Mock(return_value=ds_browser)
with mock.patch.object(self._session, '_call_method',
mock_call_method):
ret = self._vmops._get_ds_browser(moref)
mock_call_method.assert_called_once_with(vim_util,
'get_dynamic_property', moref, 'Datastore', 'browser')
self.assertIs(ds_browser, ret)
self.assertIs(ds_browser, cache.get(moref.value))
@mock.patch.object(
vmops.VMwareVMOps, '_sized_image_exists', return_value=False)
@mock.patch.object(vmops.VMwareVMOps, '_extend_virtual_disk')
@mock.patch.object(vm_util, 'copy_virtual_disk')
def _test_use_disk_image_as_linked_clone(self,
mock_copy_virtual_disk,
mock_extend_virtual_disk,
mock_sized_image_exists,
flavor_fits_image=False):
file_size = 10 * units.Gi if flavor_fits_image else 5 * units.Gi
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=file_size,
linked_clone=False)
cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
mock_imagecache = mock.Mock()
mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
vi = vmops.VirtualMachineInstanceConfigInfo(
self._instance, image_info,
self._ds, self._dc_info, mock_imagecache)
sized_cached_image_ds_loc = cache_root_folder.join(
"%s.%s.vmdk" % (self._image_id, vi.root_gb))
self._vmops._volumeops = mock.Mock()
mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
self._vmops._use_disk_image_as_linked_clone("fake_vm_ref", vi)
mock_copy_virtual_disk.assert_called_once_with(
self._session, self._dc_info.ref,
str(vi.cache_image_path),
str(sized_cached_image_ds_loc))
if not flavor_fits_image:
mock_extend_virtual_disk.assert_called_once_with(
self._instance, vi.root_gb * units.Mi,
str(sized_cached_image_ds_loc),
self._dc_info.ref)
mock_attach_disk_to_vm.assert_called_once_with(
"fake_vm_ref", self._instance, vi.ii.adapter_type,
vi.ii.disk_type,
str(sized_cached_image_ds_loc),
vi.root_gb * units.Mi, False)
def test_use_disk_image_as_linked_clone(self):
self._test_use_disk_image_as_linked_clone()
def test_use_disk_image_as_linked_clone_flavor_fits_image(self):
self._test_use_disk_image_as_linked_clone(flavor_fits_image=True)
@mock.patch.object(vmops.VMwareVMOps, '_extend_virtual_disk')
@mock.patch.object(vm_util, 'copy_virtual_disk')
def _test_use_disk_image_as_full_clone(self,
mock_copy_virtual_disk,
mock_extend_virtual_disk,
flavor_fits_image=False):
file_size = 10 * units.Gi if flavor_fits_image else 5 * units.Gi
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=file_size,
linked_clone=False)
cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
mock_imagecache = mock.Mock()
mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
vi = vmops.VirtualMachineInstanceConfigInfo(
self._instance, image_info,
self._ds, self._dc_info, mock_imagecache)
self._vmops._volumeops = mock.Mock()
mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
self._vmops._use_disk_image_as_full_clone("fake_vm_ref", vi)
mock_copy_virtual_disk.assert_called_once_with(
self._session, self._dc_info.ref,
str(vi.cache_image_path),
'[fake_ds] fake_uuid/fake_uuid.vmdk')
if not flavor_fits_image:
mock_extend_virtual_disk.assert_called_once_with(
self._instance, vi.root_gb * units.Mi,
'[fake_ds] fake_uuid/fake_uuid.vmdk', self._dc_info.ref)
mock_attach_disk_to_vm.assert_called_once_with(
"fake_vm_ref", self._instance, vi.ii.adapter_type,
vi.ii.disk_type, '[fake_ds] fake_uuid/fake_uuid.vmdk',
vi.root_gb * units.Mi, False)
def test_use_disk_image_as_full_clone(self):
self._test_use_disk_image_as_full_clone()
def test_use_disk_image_as_full_clone_image_too_big(self):
self._test_use_disk_image_as_full_clone(flavor_fits_image=True)
@mock.patch.object(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
@mock.patch.object(vm_util, 'create_virtual_disk')
def _test_use_iso_image(self,
mock_create_virtual_disk,
mock_attach_cdrom,
with_root_disk):
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=10 * units.Mi,
linked_clone=True)
cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
mock_imagecache = mock.Mock()
mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
vi = vmops.VirtualMachineInstanceConfigInfo(
self._instance, image_info,
self._ds, self._dc_info, mock_imagecache)
self._vmops._volumeops = mock.Mock()
mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
self._vmops._use_iso_image("fake_vm_ref", vi)
mock_attach_cdrom.assert_called_once_with(
"fake_vm_ref", self._instance, self._ds.ref,
str(vi.cache_image_path))
if with_root_disk:
mock_create_virtual_disk.assert_called_once_with(
self._session, self._dc_info.ref,
vi.ii.adapter_type, vi.ii.disk_type,
'[fake_ds] fake_uuid/fake_uuid.vmdk',
vi.root_gb * units.Mi)
linked_clone = False
mock_attach_disk_to_vm.assert_called_once_with(
"fake_vm_ref", self._instance,
vi.ii.adapter_type, vi.ii.disk_type,
'[fake_ds] fake_uuid/fake_uuid.vmdk',
vi.root_gb * units.Mi, linked_clone)
def test_use_iso_image_with_root_disk(self):
self._test_use_iso_image(with_root_disk=True)
def test_use_iso_image_without_root_disk(self):
self._test_use_iso_image(with_root_disk=False)
def _verify_spawn_method_calls(self, mock_call_method, extras=None):
# TODO(vui): More explicit assertions of spawn() behavior
# are waiting on additional refactoring pertaining to image
# handling/manipulation. Till then, we continue to assert on the
# sequence of VIM operations invoked.
expected_methods = ['get_dynamic_property',
'SearchDatastore_Task',
'CreateVirtualDisk_Task',
'DeleteDatastoreFile_Task',
'MoveDatastoreFile_Task',
'DeleteDatastoreFile_Task',
'SearchDatastore_Task',
'ExtendVirtualDisk_Task',
]
if extras:
expected_methods.extend(extras)
recorded_methods = [c[1][1] for c in mock_call_method.mock_calls]
self.assertEqual(expected_methods, recorded_methods)
@mock.patch(
'nova.virt.vmwareapi.vmops.VMwareVMOps._configure_config_drive')
@mock.patch('nova.virt.vmwareapi.ds_util.get_datastore')
@mock.patch(
'nova.virt.vmwareapi.vmops.VMwareVMOps.get_datacenter_ref_and_name')
@mock.patch('nova.virt.vmwareapi.vif.get_vif_info',
return_value=[])
@mock.patch('nova.utils.is_neutron',
return_value=False)
@mock.patch('nova.virt.vmwareapi.vm_util.get_vm_create_spec',
return_value='fake_create_spec')
@mock.patch('nova.virt.vmwareapi.vm_util.create_vm',
return_value='fake_vm_ref')
@mock.patch('nova.virt.vmwareapi.ds_util.mkdir')
@mock.patch('nova.virt.vmwareapi.vmops.VMwareVMOps._set_machine_id')
@mock.patch(
'nova.virt.vmwareapi.imagecache.ImageCacheManager.enlist_image')
@mock.patch.object(vmops.VMwareVMOps, '_get_and_set_vnc_config')
@mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
@mock.patch('nova.virt.vmwareapi.vm_util.copy_virtual_disk')
# TODO(dims): Need to add tests for create_virtual_disk after the
# disk/image code in spawn gets refactored
def _test_spawn(self,
mock_copy_virtual_disk,
mock_power_on_instance,
mock_get_and_set_vnc_config,
mock_enlist_image,
mock_set_machine_id,
mock_mkdir,
mock_create_vm,
mock_get_create_spec,
mock_is_neutron,
mock_get_vif_info,
mock_get_datacenter_ref_and_name,
mock_get_datastore,
mock_configure_config_drive,
block_device_info=None,
extra_specs=None,
config_drive=False):
image_size = (self._instance.root_gb) * units.Gi / 2
image = {
'id': self._image_id,
'disk_format': 'vmdk',
'size': image_size,
}
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=image_size)
vi = self._vmops._get_vm_config_info(
self._instance, image_info)
self._vmops._volumeops = mock.Mock()
network_info = mock.Mock()
mock_get_datastore.return_value = self._ds
mock_get_datacenter_ref_and_name.return_value = self._dc_info
mock_call_method = mock.Mock(return_value='fake_task')
if extra_specs is None:
extra_specs = vm_util.ExtraSpecs()
with contextlib.nested(
mock.patch.object(self._session, '_wait_for_task'),
mock.patch.object(self._session, '_call_method',
mock_call_method),
mock.patch.object(uuidutils, 'generate_uuid',
return_value='tmp-uuid'),
mock.patch.object(images, 'fetch_image'),
mock.patch.object(self._vmops, '_get_extra_specs',
return_value=extra_specs),
mock.patch.object(self._vmops, '_get_instance_metadata',
return_value='fake-metadata')
) as (_wait_for_task, _call_method, _generate_uuid, _fetch_image,
_get_extra_specs, _get_instance_metadata):
self._vmops.spawn(self._context, self._instance, image,
injected_files='fake_files',
admin_password='password',
network_info=network_info,
block_device_info=block_device_info)
mock_is_neutron.assert_called_once_with()
self.assertEqual(2, mock_mkdir.call_count)
mock_get_vif_info.assert_called_once_with(
self._session, self._cluster.obj, False,
constants.DEFAULT_VIF_MODEL, network_info)
mock_get_create_spec.assert_called_once_with(
self._session.vim.client.factory,
self._instance,
'fake_ds',
[],
extra_specs,
'otherGuest',
profile_spec=None,
metadata='fake-metadata')
mock_create_vm.assert_called_once_with(
self._session,
self._instance,
'fake_vm_folder',
'fake_create_spec',
self._cluster.resourcePool)
mock_get_and_set_vnc_config.assert_called_once_with(
self._session.vim.client.factory,
self._instance,
'fake_vm_ref')
mock_set_machine_id.assert_called_once_with(
self._session.vim.client.factory,
self._instance,
network_info,
vm_ref='fake_vm_ref')
mock_power_on_instance.assert_called_once_with(
self._session, self._instance, vm_ref='fake_vm_ref')
if (block_device_info and
'block_device_mapping' in block_device_info):
bdms = block_device_info['block_device_mapping']
for bdm in bdms:
mock_attach_root = (
self._vmops._volumeops.attach_root_volume)
mock_attach = self._vmops._volumeops.attach_volume
adapter_type = bdm.get('disk_bus') or vi.ii.adapter_type
if bdm.get('boot_index') == 0:
mock_attach_root.assert_any_call(
bdm['connection_info'], self._instance,
self._ds.ref, adapter_type)
else:
mock_attach.assert_any_call(
bdm['connection_info'], self._instance,
self._ds.ref, adapter_type)
mock_enlist_image.assert_called_once_with(
self._image_id, self._ds, self._dc_info.ref)
upload_file_name = 'vmware_temp/tmp-uuid/%s/%s-flat.vmdk' % (
self._image_id, self._image_id)
_fetch_image.assert_called_once_with(
self._context,
self._instance,
self._session._host,
self._session._port,
self._dc_info.name,
self._ds.name,
upload_file_name,
cookies='Fake-CookieJar')
self.assertTrue(len(_wait_for_task.mock_calls) > 0)
extras = None
if block_device_info and 'ephemerals' in block_device_info:
extras = ['CreateVirtualDisk_Task']
self._verify_spawn_method_calls(_call_method, extras)
dc_ref = 'fake_dc_ref'
source_file = six.text_type('[fake_ds] vmware_base/%s/%s.vmdk' %
(self._image_id, self._image_id))
dest_file = six.text_type('[fake_ds] vmware_base/%s/%s.%d.vmdk' %
(self._image_id, self._image_id,
self._instance['root_gb']))
# TODO(dims): add more tests for copy_virtual_disk after
# the disk/image code in spawn gets refactored
mock_copy_virtual_disk.assert_called_with(self._session,
dc_ref,
source_file,
dest_file)
if config_drive:
mock_configure_config_drive.assert_called_once_with(
self._instance, 'fake_vm_ref', self._dc_info,
self._ds, 'fake_files', 'password')
@mock.patch.object(ds_util, 'get_datastore')
@mock.patch.object(vmops.VMwareVMOps, 'get_datacenter_ref_and_name')
def _test_get_spawn_vm_config_info(self,
mock_get_datacenter_ref_and_name,
mock_get_datastore,
image_size_bytes=0):
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=image_size_bytes,
linked_clone=True)
mock_get_datastore.return_value = self._ds
mock_get_datacenter_ref_and_name.return_value = self._dc_info
vi = self._vmops._get_vm_config_info(self._instance, image_info)
self.assertEqual(image_info, vi.ii)
self.assertEqual(self._ds, vi.datastore)
self.assertEqual(self._instance.root_gb, vi.root_gb)
self.assertEqual(self._instance, vi.instance)
self.assertEqual(self._instance.uuid, vi.instance.uuid)
cache_image_path = '[%s] vmware_base/%s/%s.vmdk' % (
self._ds.name, self._image_id, self._image_id)
self.assertEqual(cache_image_path, str(vi.cache_image_path))
cache_image_folder = '[%s] vmware_base/%s' % (
self._ds.name, self._image_id)
self.assertEqual(cache_image_folder, str(vi.cache_image_folder))
def test_get_spawn_vm_config_info(self):
image_size = (self._instance.root_gb) * units.Gi / 2
self._test_get_spawn_vm_config_info(image_size_bytes=image_size)
def test_get_spawn_vm_config_info_image_too_big(self):
image_size = (self._instance.root_gb + 1) * units.Gi
self.assertRaises(exception.InstanceUnacceptable,
self._test_get_spawn_vm_config_info,
image_size_bytes=image_size)
def test_spawn(self):
self._test_spawn()
def test_spawn_config_drive_enabled(self):
self.flags(force_config_drive=True)
self._test_spawn(config_drive=True)
def test_spawn_with_block_device_info(self):
block_device_info = {
'block_device_mapping': [{'boot_index': 0,
'connection_info': 'fake',
'mount_device': '/dev/vda'}]
}
self._test_spawn(block_device_info=block_device_info)
def test_spawn_with_block_device_info_with_config_drive(self):
self.flags(force_config_drive=True)
block_device_info = {
'block_device_mapping': [{'boot_index': 0,
'connection_info': 'fake',
'mount_device': '/dev/vda'}]
}
self._test_spawn(block_device_info=block_device_info,
config_drive=True)
def _spawn_with_block_device_info_ephemerals(self, ephemerals):
block_device_info = {'ephemerals': ephemerals}
self._test_spawn(block_device_info=block_device_info)
def test_spawn_with_block_device_info_ephemerals(self):
ephemerals = [{'device_type': 'disk',
'disk_bus': 'virtio',
'device_name': '/dev/vdb',
'size': 1}]
self._spawn_with_block_device_info_ephemerals(ephemerals)
def test_spawn_with_block_device_info_ephemerals_no_disk_bus(self):
ephemerals = [{'device_type': 'disk',
'disk_bus': None,
'device_name': '/dev/vdb',
'size': 1}]
self._spawn_with_block_device_info_ephemerals(ephemerals)
def _get_fake_vi(self):
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=7,
linked_clone=False)
vi = vmops.VirtualMachineInstanceConfigInfo(
self._instance, image_info,
self._ds, self._dc_info, mock.Mock())
return vi
@mock.patch.object(vm_util, 'create_virtual_disk')
def test_create_and_attach_ephemeral_disk(self, mock_create):
vi = self._get_fake_vi()
self._vmops._volumeops = mock.Mock()
mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
path = str(ds_obj.DatastorePath(vi.datastore.name, 'fake_uuid',
'fake-filename'))
self._vmops._create_and_attach_ephemeral_disk(self._instance,
'fake-vm-ref',
vi.dc_info, 1,
'fake-adapter-type',
path)
mock_create.assert_called_once_with(
self._session, self._dc_info.ref, 'fake-adapter-type',
'thin', path, 1)
mock_attach_disk_to_vm.assert_called_once_with(
'fake-vm-ref', self._instance, 'fake-adapter-type',
'thin', path, 1, False)
def test_create_ephemeral_with_bdi(self):
ephemerals = [{'device_type': 'disk',
'disk_bus': 'virtio',
'device_name': '/dev/vdb',
'size': 1}]
block_device_info = {'ephemerals': ephemerals}
vi = self._get_fake_vi()
with mock.patch.object(
self._vmops, '_create_and_attach_ephemeral_disk') as mock_caa:
self._vmops._create_ephemeral(block_device_info,
self._instance,
'fake-vm-ref',
vi.dc_info, vi.datastore,
'fake_uuid',
vi.ii.adapter_type)
mock_caa.assert_called_once_with(
self._instance, 'fake-vm-ref',
vi.dc_info, 1 * units.Mi, 'virtio',
'[fake_ds] fake_uuid/ephemeral_0.vmdk')
def _test_create_ephemeral_from_instance(self, bdi):
vi = self._get_fake_vi()
with mock.patch.object(
self._vmops, '_create_and_attach_ephemeral_disk') as mock_caa:
self._vmops._create_ephemeral(bdi,
self._instance,
'fake-vm-ref',
vi.dc_info, vi.datastore,
'fake_uuid',
vi.ii.adapter_type)
mock_caa.assert_called_once_with(
self._instance, 'fake-vm-ref',
vi.dc_info, 1 * units.Mi, 'lsiLogic',
'[fake_ds] fake_uuid/ephemeral_0.vmdk')
def test_create_ephemeral_with_bdi_but_no_ephemerals(self):
block_device_info = {'ephemerals': []}
self._instance.ephemeral_gb = 1
self._test_create_ephemeral_from_instance(block_device_info)
def test_create_ephemeral_with_no_bdi(self):
self._instance.ephemeral_gb = 1
self._test_create_ephemeral_from_instance(None)
def test_build_virtual_machine(self):
image_id = nova.tests.unit.image.fake.get_valid_image_id()
image = images.VMwareImage(image_id=image_id)
extra_specs = vm_util.ExtraSpecs()
vm_ref = self._vmops.build_virtual_machine(self._instance,
image, self._dc_info,
self._ds,
self.network_info,
extra_specs,
self._metadata)
vm = vmwareapi_fake._get_object(vm_ref)
# Test basic VM parameters
self.assertEqual(self._instance.uuid, vm.name)
self.assertEqual(self._instance.uuid,
vm.get('summary.config.instanceUuid'))
self.assertEqual(self._instance_values['vcpus'],
vm.get('summary.config.numCpu'))
self.assertEqual(self._instance_values['memory_mb'],
vm.get('summary.config.memorySizeMB'))
# Test NSX config
for optval in vm.get('config.extraConfig').OptionValue:
if optval.key == 'nvp.vm-uuid':
self.assertEqual(self._instance_values['uuid'], optval.value)
break
else:
self.fail('nvp.vm-uuid not found in extraConfig')
# Test that the VM is associated with the specified datastore
datastores = vm.datastore.ManagedObjectReference
self.assertEqual(1, len(datastores))
datastore = vmwareapi_fake._get_object(datastores[0])
self.assertEqual(self._ds.name, datastore.get('summary.name'))
# Test that the VM's network is configured as specified
devices = vm.get('config.hardware.device').VirtualDevice
for device in devices:
if device.obj_name != 'ns0:VirtualE1000':
continue
self.assertEqual(self._network_values['address'],
device.macAddress)
break
else:
self.fail('NIC not configured')
def test_spawn_cpu_limit(self):
cpu_limits = vm_util.CpuLimits(cpu_limit=7)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_cpu_reservation(self):
cpu_limits = vm_util.CpuLimits(cpu_reservation=7)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_cpu_allocations(self):
cpu_limits = vm_util.CpuLimits(cpu_limit=7,
cpu_reservation=6)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_cpu_shares_level(self):
cpu_limits = vm_util.CpuLimits(cpu_shares_level='high')
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_cpu_shares_custom(self):
cpu_limits = vm_util.CpuLimits(cpu_shares_level='custom',
cpu_shares_share=1948)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._test_spawn(extra_specs=extra_specs)
def _validate_extra_specs(self, expected, actual):
self.assertEqual(expected.cpu_limits.cpu_limit,
actual.cpu_limits.cpu_limit)
self.assertEqual(expected.cpu_limits.cpu_reservation,
actual.cpu_limits.cpu_reservation)
self.assertEqual(expected.cpu_limits.cpu_shares_level,
actual.cpu_limits.cpu_shares_level)
self.assertEqual(expected.cpu_limits.cpu_shares_share,
actual.cpu_limits.cpu_shares_share)
def _validate_flavor_extra_specs(self, flavor_extra_specs, expected):
# Validate that the extra specs are parsed correctly
flavor = objects.Flavor(extra_specs=flavor_extra_specs)
flavor_extra_specs = self._vmops._get_extra_specs(flavor)
self._validate_extra_specs(expected, flavor_extra_specs)
def test_extra_specs_cpu_limit(self):
flavor_extra_specs = {'quota:cpu_limit': 7}
cpu_limits = vm_util.CpuLimits(cpu_limit=7)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs)
def test_extra_specs_cpu_reservations(self):
flavor_extra_specs = {'quota:cpu_reservation': 7}
cpu_limits = vm_util.CpuLimits(cpu_reservation=7)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs)
def test_extra_specs_cpu_allocations(self):
flavor_extra_specs = {'quota:cpu_limit': 7,
'quota:cpu_reservation': 6}
cpu_limits = vm_util.CpuLimits(cpu_limit=7,
cpu_reservation=6)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs)
def test_extra_specs_cpu_shares_level(self):
flavor_extra_specs = {'quota:cpu_shares_level': 'high'}
cpu_limits = vm_util.CpuLimits(cpu_shares_level='high')
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs)
def test_extra_specs_cpu_shares_custom(self):
flavor_extra_specs = {'quota:cpu_shares_level': 'custom',
'quota:cpu_shares_share': 1948}
cpu_limits = vm_util.CpuLimits(cpu_shares_level='custom',
cpu_shares_share=1948)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs)
def _make_vm_config_info(self, is_iso=False, is_sparse_disk=False):
disk_type = (constants.DISK_TYPE_SPARSE if is_sparse_disk
else constants.DEFAULT_DISK_TYPE)
file_type = (constants.DISK_FORMAT_ISO if is_iso
else constants.DEFAULT_DISK_FORMAT)
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=10 * units.Mi,
file_type=file_type,
disk_type=disk_type,
linked_clone=True)
cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
mock_imagecache = mock.Mock()
mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
vi = vmops.VirtualMachineInstanceConfigInfo(
self._instance, image_info,
self._ds, self._dc_info, mock_imagecache)
return vi
@mock.patch.object(vmops.VMwareVMOps, 'check_cache_folder')
@mock.patch.object(vmops.VMwareVMOps, '_fetch_image_as_file')
@mock.patch.object(vmops.VMwareVMOps, '_prepare_iso_image')
@mock.patch.object(vmops.VMwareVMOps, '_prepare_sparse_image')
@mock.patch.object(vmops.VMwareVMOps, '_prepare_flat_image')
@mock.patch.object(vmops.VMwareVMOps, '_cache_iso_image')
@mock.patch.object(vmops.VMwareVMOps, '_cache_sparse_image')
@mock.patch.object(vmops.VMwareVMOps, '_cache_flat_image')
@mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file')
def _test_fetch_image_if_missing(self,
mock_delete_datastore_file,
mock_cache_flat_image,
mock_cache_sparse_image,
mock_cache_iso_image,
mock_prepare_flat_image,
mock_prepare_sparse_image,
mock_prepare_iso_image,
mock_fetch_image_as_file,
mock_check_cache_folder,
is_iso=False,
is_sparse_disk=False):
tmp_dir_path = mock.Mock()
tmp_image_path = mock.Mock()
if is_iso:
mock_prepare = mock_prepare_iso_image
mock_cache = mock_cache_iso_image
elif is_sparse_disk:
mock_prepare = mock_prepare_sparse_image
mock_cache = mock_cache_sparse_image
else:
mock_prepare = mock_prepare_flat_image
mock_cache = mock_cache_flat_image
mock_prepare.return_value = tmp_dir_path, tmp_image_path
vi = self._make_vm_config_info(is_iso, is_sparse_disk)
self._vmops._fetch_image_if_missing(self._context, vi)
mock_check_cache_folder.assert_called_once_with(
self._ds.name, self._ds.ref)
mock_prepare.assert_called_once_with(vi)
mock_fetch_image_as_file.assert_called_once_with(
self._context, vi, tmp_image_path)
mock_cache.assert_called_once_with(vi, tmp_image_path)
mock_delete_datastore_file.assert_called_once_with(
str(tmp_dir_path), self._dc_info.ref)
def test_fetch_image_if_missing(self):
self._test_fetch_image_if_missing()
def test_fetch_image_if_missing_with_sparse(self):
self._test_fetch_image_if_missing(
is_sparse_disk=True)
def test_fetch_image_if_missing_with_iso(self):
self._test_fetch_image_if_missing(
is_iso=True)
@mock.patch.object(images, 'fetch_image')
def test_fetch_image_as_file(self, mock_fetch_image):
vi = self._make_vm_config_info()
image_ds_loc = mock.Mock()
self._vmops._fetch_image_as_file(self._context, vi, image_ds_loc)
mock_fetch_image.assert_called_once_with(
self._context,
vi.instance,
self._session._host,
self._session._port,
self._dc_info.name,
self._ds.name,
image_ds_loc.rel_path,
cookies='Fake-CookieJar')
@mock.patch.object(images, 'fetch_image_stream_optimized')
def test_fetch_image_as_vapp(self, mock_fetch_image):
vi = self._make_vm_config_info()
image_ds_loc = mock.Mock()
image_ds_loc.parent.basename = 'fake-name'
self._vmops._fetch_image_as_vapp(self._context, vi, image_ds_loc)
mock_fetch_image.assert_called_once_with(
self._context,
vi.instance,
self._session,
'fake-name',
self._ds.name,
vi.dc_info.vmFolder,
self._vmops._root_resource_pool)
@mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
def test_prepare_iso_image(self, mock_generate_uuid):
vi = self._make_vm_config_info(is_iso=True)
tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_iso_image(vi)
expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name)
expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s.iso' % (
self._ds.name, self._image_id, self._image_id)
self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path)
self.assertEqual(str(tmp_image_ds_loc), expected_image_path)
@mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
def test_prepare_sparse_image(self, mock_generate_uuid):
vi = self._make_vm_config_info(is_sparse_disk=True)
tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_sparse_image(vi)
expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name)
expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s' % (
self._ds.name, self._image_id, "tmp-sparse.vmdk")
self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path)
self.assertEqual(str(tmp_image_ds_loc), expected_image_path)
@mock.patch.object(ds_util, 'mkdir')
@mock.patch.object(vm_util, 'create_virtual_disk')
@mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file')
@mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
def test_prepare_flat_image(self,
mock_generate_uuid,
mock_delete_datastore_file,
mock_create_virtual_disk,
mock_mkdir):
vi = self._make_vm_config_info()
tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_flat_image(vi)
expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name)
expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s-flat.vmdk' % (
self._ds.name, self._image_id, self._image_id)
expected_image_path_parent = '[%s] vmware_temp/tmp-uuid/%s' % (
self._ds.name, self._image_id)
expected_path_to_create = '[%s] vmware_temp/tmp-uuid/%s/%s.vmdk' % (
self._ds.name, self._image_id, self._image_id)
mock_mkdir.assert_called_once_with(
self._session, DsPathMatcher(expected_image_path_parent),
self._dc_info.ref)
self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path)
self.assertEqual(str(tmp_image_ds_loc), expected_image_path)
image_info = vi.ii
mock_create_virtual_disk.assert_called_once_with(
self._session, self._dc_info.ref,
image_info.adapter_type,
image_info.disk_type,
DsPathMatcher(expected_path_to_create),
image_info.file_size_in_kb)
mock_delete_datastore_file.assert_called_once_with(
DsPathMatcher(expected_image_path),
self._dc_info.ref)
@mock.patch.object(ds_util, 'file_move')
def test_cache_iso_image(self, mock_file_move):
vi = self._make_vm_config_info(is_iso=True)
tmp_image_ds_loc = mock.Mock()
self._vmops._cache_iso_image(vi, tmp_image_ds_loc)
mock_file_move.assert_called_once_with(
self._session, self._dc_info.ref,
tmp_image_ds_loc.parent,
DsPathMatcher('[fake_ds] vmware_base/%s' % self._image_id))
@mock.patch.object(ds_util, 'file_move')
def test_cache_flat_image(self, mock_file_move):
vi = self._make_vm_config_info()
tmp_image_ds_loc = mock.Mock()
self._vmops._cache_flat_image(vi, tmp_image_ds_loc)
mock_file_move.assert_called_once_with(
self._session, self._dc_info.ref,
tmp_image_ds_loc.parent,
DsPathMatcher('[fake_ds] vmware_base/%s' % self._image_id))
@mock.patch.object(ds_util, 'disk_move')
@mock.patch.object(ds_util, 'mkdir')
def test_cache_stream_optimized_image(self, mock_mkdir, mock_disk_move):
vi = self._make_vm_config_info()
self._vmops._cache_stream_optimized_image(vi, mock.sentinel.tmp_image)
mock_mkdir.assert_called_once_with(
self._session,
DsPathMatcher('[fake_ds] vmware_base/%s' % self._image_id),
self._dc_info.ref)
mock_disk_move.assert_called_once_with(
self._session, self._dc_info.ref,
mock.sentinel.tmp_image,
DsPathMatcher('[fake_ds] vmware_base/%s/%s.vmdk' %
(self._image_id, self._image_id)))
@mock.patch.object(ds_util, 'file_move')
@mock.patch.object(vm_util, 'copy_virtual_disk')
@mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file')
def test_cache_sparse_image(self,
mock_delete_datastore_file,
mock_copy_virtual_disk,
mock_file_move):
vi = self._make_vm_config_info(is_sparse_disk=True)
sparse_disk_path = "[%s] vmware_temp/tmp-uuid/%s/tmp-sparse.vmdk" % (
self._ds.name, self._image_id)
tmp_image_ds_loc = ds_obj.DatastorePath.parse(sparse_disk_path)
self._vmops._cache_sparse_image(vi, tmp_image_ds_loc)
target_disk_path = "[%s] vmware_temp/tmp-uuid/%s/%s.vmdk" % (
self._ds.name,
self._image_id, self._image_id)
mock_copy_virtual_disk.assert_called_once_with(
self._session, self._dc_info.ref,
sparse_disk_path,
DsPathMatcher(target_disk_path))
def test_get_storage_policy_none(self):
flavor = objects.Flavor(name='m1.small',
memory_mb=6,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs={})
self.flags(pbm_enabled=True,
pbm_default_policy='fake-policy', group='vmware')
extra_specs = self._vmops._get_extra_specs(flavor)
self.assertEqual('fake-policy', extra_specs.storage_policy)
def test_get_storage_policy_extra_specs(self):
extra_specs = {'vmware:storage_policy': 'flavor-policy'}
flavor = objects.Flavor(name='m1.small',
memory_mb=6,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs=extra_specs)
self.flags(pbm_enabled=True,
pbm_default_policy='default-policy', group='vmware')
extra_specs = self._vmops._get_extra_specs(flavor)
self.assertEqual('flavor-policy', extra_specs.storage_policy)
def test_get_base_folder_not_set(self):
self.flags(image_cache_subdirectory_name='vmware_base')
base_folder = self._vmops._get_base_folder()
self.assertEqual('vmware_base', base_folder)
def test_get_base_folder_host_ip(self):
self.flags(my_ip='7.7.7.7',
image_cache_subdirectory_name='_base')
base_folder = self._vmops._get_base_folder()
self.assertEqual('7.7.7.7_base', base_folder)
def test_get_base_folder_cache_prefix(self):
self.flags(cache_prefix='my_prefix', group='vmware')
self.flags(image_cache_subdirectory_name='_base')
base_folder = self._vmops._get_base_folder()
self.assertEqual('my_prefix_base', base_folder)
def _test_reboot_vm(self, reboot_type="SOFT"):
expected_methods = ['get_object_properties']
if reboot_type == "SOFT":
expected_methods.append('RebootGuest')
else:
expected_methods.append('ResetVM_Task')
query = {}
query['runtime.powerState'] = "poweredOn"
query['summary.guest.toolsStatus'] = "toolsOk"
query['summary.guest.toolsRunningStatus'] = "guestToolsRunning"
def fake_call_method(module, method, *args, **kwargs):
expected_method = expected_methods.pop(0)
self.assertEqual(expected_method, method)
if (expected_method == 'get_object_properties'):
return 'fake-props'
elif (expected_method == 'ResetVM_Task'):
return 'fake-task'
with contextlib.nested(
mock.patch.object(vm_util, "get_vm_ref",
return_value='fake-vm-ref'),
mock.patch.object(vm_util, "get_values_from_object_properties",
return_value=query),
mock.patch.object(self._session, "_call_method",
fake_call_method),
mock.patch.object(self._session, "_wait_for_task")
) as (_get_vm_ref, _get_values_from_object_properties,
fake_call_method, _wait_for_task):
self._vmops.reboot(self._instance, self.network_info, reboot_type)
_get_vm_ref.assert_called_once_with(self._session,
self._instance)
_get_values_from_object_properties.assert_called_once_with(
self._session,
'fake-props')
if reboot_type == "HARD":
_wait_for_task.assert_has_calls([
mock.call('fake-task')])
def test_reboot_vm_soft(self):
self._test_reboot_vm()
def test_reboot_vm_hard(self):
self._test_reboot_vm(reboot_type="HARD")
def test_get_instance_metadata(self):
flavor = objects.Flavor(id=7,
name='m1.small',
memory_mb=6,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs={})
self._instance.flavor = flavor
metadata = self._vmops._get_instance_metadata(
self._context, self._instance)
expected = ("name:fake_display_name\n"
"userid:fake_user\n"
"username:None\n"
"projectid:fake_project\n"
"projectname:None\n"
"flavor:name:m1.small\n"
"flavor:memory_mb:6\n"
"flavor:vcpus:28\n"
"flavor:ephemeral_gb:8128\n"
"flavor:root_gb:496\n"
"flavor:swap:33550336\n"
"imageid:70a599e0-31e7-49b7-b260-868f441e862b\n"
"package:%s\n" % version.version_string_with_package())
self.assertEqual(expected, metadata)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics utils files to compute certain similarity metrics."""
from absl import flags
import numpy as np
import tensorflow as tf
FLAGS = flags.FLAGS
def VerifyCompatibleImageShapes(img1, img2):
"""Checks if two image tensors are compatible for metric computation.
This function checks if two sets of images have ranks at least 3, and if the
last three dimensions match.
Args:
img1: The first images tensor.
img2: The second images tensor.
Returns:
A tuple of the first tensor shape, the second tensor shape, and a list of
tf.Assert() implementing the checks.
Raises:
ValueError: when static shape check fails.
"""
shape1 = img1.shape.with_rank_at_least(3)
shape2 = img2.shape.with_rank_at_least(3)
if shape1.ndims is not None and shape2.ndims is not None:
for dim1, dim2 in zip(reversed(shape1[:-3]), reversed(shape2[:-3])):
# For TF V1 compatibility.
try:
dim1 = dim1.value
dim2 = dim2.value
except AttributeError:
pass
if not (dim1 in (None, 1) or dim2 in (None, 1) or dim1 == dim2):
raise ValueError('Two images are not compatible: %s and %s' %
(shape1, shape2))
else:
raise ValueError('The two images do not have a defined shape.')
# Now assign shape tensors.
shape1, shape2 = tf.shape_n([img1, img2])
checks = []
checks.append(
tf.Assert(
tf.greater_equal(tf.size(shape1), 3), [shape1, shape2], summarize=10))
checks.append(
tf.Assert(
tf.reduce_all(tf.equal(shape1[-3:], shape2[-3:])), [shape1, shape2],
summarize=10))
return shape1, shape2, checks
def _SSIMHelper(x, y, reducer, max_val, compensation=1.0):
r"""Helper function to SSIM.
Arguments:
x: first set of images.
y: first set of images.
reducer: Function that computes 'local' averages from set of images. For
non-covolutional version, this is usually tf.reduce_mean(x, [1, 2]), and
for convolutional version, this is usually tf.nn.avg_pool or tf.nn.conv2d
with weighted-sum kernel.
max_val: The dynamic range (i.e., the difference between the maximum
possible allowed value and the minimum allowed value).
compensation: Compensation factor. See above.
Returns:
A pair containing the luminance measure and the contrast-structure measure.
"""
c1 = (0.01 * max_val)**2
c2 = (0.03 * max_val)**2
# SSIM luminance measure is
# (2 * mu_x * mu_y + c1) / (mu_x ** 2 + mu_y ** 2 + c1).
mean0 = reducer(x)
mean1 = reducer(y)
num0 = mean0 * mean1 * 2.0
den0 = tf.square(mean0) + tf.square(mean1)
luminance = (num0 + c1) / (den0 + c1)
# SSIM contrast-structure measure is
# (2 * cov_xy + c2) / (cov_xx + cov_yy + c2).
# Note that `reducer` is a weighted sum with weight w_k, \sum_i w_i = 1, then
# cov_xy = \sum_i w_i (x_i - mu_x) (y_i - mu_y)
# = \sum_i w_i x_i y_i - (\sum_i w_i x_i) (\sum_j w_j y_j).
num1 = reducer(x * y) * 2.0
den1 = reducer(tf.square(x) + tf.square(y))
c2 *= compensation
cs = (num1 - num0 + c2) / (den1 - den0 + c2)
# SSIM score is the product of the luminance and contrast-structure measures.
return luminance, cs
def SSIMWithoutFilter(a,
b,
max_val=255.0,
filter_size=(8, 8),
strides=None,
spatial_average=True,
channel_average=True):
"""Computes unfiltered SSIM index between a and b per channel.
Arguments:
a: First set of patches.
b: Second set of patches.
max_val: The dynamic range (i.e., the difference between the maximum
possible allowed value and the minimum allowed value).
filter_size: Determines the moving average filter size to aggregate the SSIM
over. Must be a sequence of length two: [filter_height, filter_width].
strides: The strides of the moving average filter. Must be None or a
sequence of length two: [row_stride, col_stride]. If None, defaults to
`filter_size`.
spatial_average: If True, return the mean value across space. Otherwise,
return the full 2D spatial map.
channel_average: If True, return the mean value across channels. Otherwise,
return SSIM per channel.
Returns:
The SSIM index for each individual element in the batch.
For color images, SSIM is averaged after computed in each channel
separately.
Raises:
ValueError: if a and b don't have the broadcastable shapes, or the ranks of
a and b are not at least 3.
"""
# Enforce rank and shape checks.
shape1, _, checks = VerifyCompatibleImageShapes(a, b)
with tf.control_dependencies(checks):
a = tf.identity(a)
if strides is None:
strides = filter_size
n = float(np.prod(filter_size))
kernel = tf.fill(
dims=list(filter_size) + [shape1[-1], 1],
value=tf.constant(1 / n, dtype=a.dtype))
strides = [1] + list(strides) + [1]
def reducer(x): # pylint: disable=invalid-name
shape = tf.shape(x)
# DepthwiseConv2D takes rank 4 tensors. Flatten leading dimensions.
x = tf.reshape(x, shape=tf.concat([[-1], shape[-3:]], 0))
y = tf.nn.depthwise_conv2d(x, kernel, strides=strides, padding='VALID')
return tf.reshape(y, tf.concat([shape[:-3], tf.shape(y)[1:]], 0))
compensation = (n - 1) / n
luminance, cs = _SSIMHelper(a, b, reducer, max_val, compensation)
ssim = luminance * cs
reduce_axis = [-3, -2] if spatial_average else []
if channel_average:
reduce_axis.append(-1)
if reduce_axis:
ssim = tf.reduce_mean(ssim, axis=reduce_axis)
return ssim
def GradientDifferenceLoss(img1,
img2,
dist_func=tf.square,
reduce_func=tf.reduce_sum,
name=None):
"""Returns an op that calculates loss between image gradients.
This function assumes that `img1` and `img2` are image batches,
i.e. [batch_size, row, col, channels].
Arguments:
img1: First image batch.
img2: Second image batch.
dist_func: A TensorFlow op to apply to edge map differences (e.g. tf.square
for L2 or tf.abs for L1).
reduce_func: A TensorFlow op to reduce edge map distances into a single loss
per image pair (e.g. tf.reduce_sum for a gradient or tf.reduce_mean for a
per-pixel average score).
name: Namespace in which to embed the computation.
Returns:
A tensor with size [batch_size] containing the finite difference edge loss
for each image pair in the batch.
"""
with tf.name_scope(name, 'GDL', [img1, img2]):
_, _, checks = VerifyCompatibleImageShapes(img1, img2)
dy1, dx1 = tf.image.image_gradients(img1)
dy2, dx2 = tf.image.image_gradients(img2)
diff = dist_func(dy1 - dy2) + dist_func(dx1 - dx2)
loss = reduce_func(diff, list(range(-3, 0)))
with tf.control_dependencies(checks):
return tf.identity(loss)
def PSNR(a, b, max_val=255.0, name=None):
"""Returns the Peak Signal-to-Noise Ratio between a and b.
Arguments:
a: first set of images.
b: second set of images.
max_val: the dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
name: namespace to embed the computation in.
Returns:
The scalar PSNR between a and b. The shape of the returned tensor is
[batch_size, 1].
"""
with tf.name_scope(name, 'PSNR', [a, b]):
psnr = tf.image.psnr(a, b, max_val=max_val, name=name)
_, _, checks = VerifyCompatibleImageShapes(a, b)
with tf.control_dependencies(checks):
return tf.identity(psnr)
def ClippedPSNR(img1,
img2,
min_val=0.0,
max_val=255.0,
clip=True,
quantize=True,
max_psnr=100.0,
name=None):
"""Return average Clipped PSNR between `a` and `b`.
Arguments:
img1: first set of images.
img2: second set of images.
min_val: smallest valid value for a pixel.
max_val: largest valid value for a pixel.
clip: If True, pixel values will be clipped to [`min_value`, `max_value`].
quantize: If True, pixel values will be rounded before calculating PSNR.
max_psnr: If not None, PSNR will be clipped by this value before rounding.
name: namespace to embed the computation in.
Returns:
PSNR between img1 and img2 or average PSNR if input is a batch.
"""
with tf.name_scope(name, 'clipped_psnr', [img1, img2]):
if quantize:
img1 = tf.round(img1)
img2 = tf.round(img2)
if clip:
img1 = tf.clip_by_value(img1, min_val, max_val)
img2 = tf.clip_by_value(img2, min_val, max_val)
value_range = max_val - min_val
psnr = PSNR(img1, img2, max_val=value_range)
if max_psnr is not None:
psnr = tf.minimum(psnr, max_psnr)
return tf.reduce_mean(psnr)
def SobelEdgeLoss(img1, img2, dist_func=tf.square, reduce_func=tf.reduce_sum):
"""Returns an op that calculates Sobel edge loss between two images.
Arguments:
img1: First image batch.
img2: Second image batch.
dist_func: A TensorFlow op to apply to edge map differences (e.g. tf.square
for L2 or tf.abs for L1).
reduce_func: A TensorFlow op to reduce edge map distances into a single loss
per image pair (e.g. tf.reduce_sum for a gradient or tf.reduce_mean for a
per-pixel average score).
Returns:
A tensor with size [batch_size] containing the Sobel edge loss for each
image pair in the batch.
"""
_, _, checks = VerifyCompatibleImageShapes(img1, img2)
# Sobel tensor has shape [batch_size, h, w, d, num_kernels].
sobel1 = tf.image.sobel_edges(img1)
sobel2 = tf.image.sobel_edges(img2)
diff = dist_func(sobel1 - sobel2)
# To match GDL, sum across dy and dx regardless of reduce_func.
edge_maps = tf.reduce_sum(diff, axis=-1)
# Reduce over all dimensions except batch_size.
loss = reduce_func(edge_maps, list(range(-3, 0)))
with tf.control_dependencies(checks):
return tf.identity(loss)
|
|
from ionotomo import *
import numpy as np
import pylab as plt
import h5py
import cmocean
import os
import logging as log
import gpflow as gp
class RationalQuadratic(gp.kernels.Stationary):
def __init__(self, input_dim=1, variance=1.0, alpha=1.0, lengthscales=None, active_dims=[0], ARD=False, alpha_prior=None):
gp.kernels.Stationary.__init__(self, input_dim, variance, lengthscales, active_dims, ARD)
self.alpha = gp.params.Parameter(alpha, transform=gp.transforms.positive,prior=alpha_prior)
def K(self, X, X2=None, presliced=False):
if not presliced:
X, X2 = self._slice(X, X2)
return self.variance * tf.pow(1 + (self.square_dist(X, X2) / (2*self.alpha*(self.lengthscales ** 2))),
-1 * self.alpha)
def make_xstar(X,N=50):
"""Make a screen NxN based on obs coords X.
Returns shape (N*N, 2) array"""
xmin = np.min(X[:,0])
ymin = np.min(X[:,1])
xmax = np.max(X[:,0])
ymax = np.max(X[:,1])
xvec = np.linspace(xmin,xmax,N)
yvec = np.linspace(ymin,ymax,N)
x,y = np.meshgrid(xvec,yvec,indexing='ij')
Xstar = np.array([x.flatten(),y.flatten()]).T
return Xstar
def interp_nearest(x,y,z,x_,y_):
"""given input arrays (x,y,z) return nearest neighbour to (x_, y_)
Return same shape as x_"""
dx = np.subtract.outer(x_,x)
dy = np.subtract.outer(y_,y)
r = dx**2
dy *= dy
r += dy
np.sqrt(r,out=r)
arg = np.argmin(r,axis=1)
z_ = z[arg]
return z_
def angular_space(dirs_uvw):
"""Go to angular space, small angles mean not much difference"""
X = np.array([np.arctan2(dirs_uvw.u.value,dirs_uvw.w.value),
np.arctan2(dirs_uvw.v.value,dirs_uvw.w.value)]).T
return X
def plot_data_posterior(x_obs, phase_obs, phase_obs_screen,uncert_obs_screen,phase_post,uncert_post,extent,plot_folder,antenna_label,timestamp):
"""Do the plotting of results"""
# extent=(np.min(X[0][:,0]),np.max(X[0][:,0]),
# np.min(X[0][:,1]),np.max(X[0][:,1]))
# phase_obs_screen = y[0].reshape((res,res))+mean[0]
# uncert_obs_screen = sigma_y[0].reshape((res,res))
# phase_post = ystar.reshape((50,50))+mean[0]
# uncert_post = np.sqrt(np.diag(cov)).reshape((50,50))
# x_obs = X_angular[0]
# phase_obs = phase[i,0,:,0]
# antenna_label = antenna_labels[i]
# timestamp = timestamps[0]
vmin = np.min(phase_obs)
vmax = np.max(phase_obs)
fig = plt.figure(figsize=(2*6,2*6))
ax = fig.add_subplot(2,2,1)
#plot first time slot for example
sc = ax.imshow(phase_obs_screen.T,origin='lower',
extent=extent,
cmap=cmocean.cm.phase,
vmin = vmin, vmax = vmax)#X[0][:,0],X[0][:,1],c=y[0],marker='+')
plt.colorbar(sc)
ax.set_title("Measured phases")
ax = fig.add_subplot(2,2,2)
sc = ax.imshow(uncert_obs_screen.T,origin='lower',
extent=extent,
cmap='bone')#X[0][:,0],X[0][:,1],c=y[0],marker='+')
plt.colorbar(sc)
plt.title("Measurement uncertainty")
ax = fig.add_subplot(2,2,3)
sc = ax.imshow(phase_post.T,origin='lower',
extent=extent,
cmap=cmocean.cm.phase,
vmin=vmin,vmax=vmax)
plt.colorbar(sc)
ax.set_title("Posterior mean")
ax.scatter(x_obs[:,0],x_obs[:,1],
c=phase_obs,
cmap=cmocean.cm.phase,
edgecolors='black',
s = 100,
vmin=vmin,vmax=vmax)
ax = fig.add_subplot(2,2,4)
sc = ax.imshow(uncert_post.T,origin='lower',
extent=extent,
cmap='bone')
plt.colorbar(sc)
ax.set_title("Posterior uncertainty")
plt.tight_layout()
plt.savefig(os.path.join(plot_folder,"measured_and_posterior_{}_{}.png".format(antenna_label,timestamp)),format='png')
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(1,1,1)
ax.hist(uncert_post.flatten(),bins=25)
ax.set_title(r"uncert. dist: ${:.2f} \pm {:.2f}$".format(np.mean(uncert_post),np.std(uncert_post)))
plt.tight_layout()
plt.savefig(os.path.join(plot_folder,"posterior_uncert_dist{}_{}.png".format(antenna_label,timestamp)),format='png')
plt.close("all")
def main(output_folder,datapack_name,datapack_smooth_name,bayes_param_file,time_block_size = 120):
"""Main program to bayesian interpolate/smooth data.
datapack_name : str
The filename of input datapack
datapack_smooth_name : str
The filename of output smoothed datapack
bayes_param_file : str
The file that will contain the bayesian optimized regularization params
time_block_size : int
The number of timestamps to use in statistic correlation determination.
i.e. slow_gain resolution
"""
output_folder = os.path.abspath(output_folder)
diagnostic_folder = os.path.join(output_folder,'diagnostics')
try:
os.makedirs(diagnostic_folder)
except:
pass
log.basicConfig(filename=os.path.join(output_folder,"log"),format='%(asctime)s %(levelname)s:%(message)s', level=log.DEBUG)
log.info("Using output folder {}".format(output_folder))
#bayes_params = h5py.File(os.path.join(output_folder,bayes_param_file),'a')
datapack = DataPack(filename=datapack_name)
datapack_smooth = datapack.clone()
times_all,timestamps_all = datapack.get_times(time_idx=-1)
Na_all = len(times_all)
time_block_idx = 0
while time_block_idx*time_block_size < Na_all:
start_time = time_block_idx * time_block_size
stop_time = min(Na_all,(time_block_idx+1) * time_block_size)
time_idx = range(start_time,stop_time)
log.info("Processing time block {}: {} to {}".format(time_block_idx,timestamps_all[start_time],timestamps_all[stop_time]))
#Will smooth at all antennas, all directions, all freq
ant_idx = -1
dir_idx = -1
freq_idx = -1
#derived from slow_gains
std = np.sqrt(datapack.get_variance(ant_idx=ant_idx, time_idx=time_idx, dir_idx=dir_idx, freq_idx=freq_idx))
#phase from dd and di solutions combined, we phase wrap here
#TODO 2D+time phase unwrap
phase = np.angle(np.exp(1j*datapack.get_phase(ant_idx=ant_idx, time_idx=time_idx, dir_idx=dir_idx, freq_idx=freq_idx)))
directions, patch_names = datapack.get_directions(dir_idx=dir_idx)
antennas, antenna_labels = datapack.get_antennas(ant_idx=ant_idx)
times,timestamps = datapack.get_times(time_idx=time_idx)
freqs = datapack.get_freqs(freq_idx=freq_idx)
Na = len(antennas)
Nt = len(times)
Nd = len(directions)
Nf = len(freqs)
#to phase unwrap in time axis uncomment
# from rathings.phase_unwrap import phase_unwrapp1d
# phase = np.transpose(phase_unwrapp1d(np.transpose(phase,axes=(1,0,2,3)),axis=0),axes=(1,0,2,3))
#define directions with Pointing (UVW fixed to first time but obstime actual)
fixtime = times[0]
fixfreq = freqs[Nf>>1]
phase_center = datapack.get_center_direction()
array_center = datapack.radio_array.get_center()
uvw = [Pointing(location = array_center.earth_location,obstime = times[j],fixtime=fixtime, phase = phase_center) for j in range(Nt)]
ants_uvw = [antennas.transform_to(uvw[j]) for j in range(Nt)]
dirs_uvw = [directions.transform_to(uvw[j]) for j in range(Nt)]
#Make coords in angular per time
X_angular = np.array([angular_space(dirs_uvw[j]) for j in range(Nt)])
def process_ant(i,antenna_labels,X_angular,Na,Nt,Nd,Nf,phase,std,time_block_idx,timestamps,diagnostic_folder,bayes_params_file):
"""Python call to process an antenna.
This gets wrapped by tensorflow to automate parallelization
i : int
the antenna index to process
Note: requires all global variables to be around.
Note: This is stateless, so repeated calls produce the same output.
This enables faster computation.
"""
#make stateless
np.random.seed(i)
#import logging as log
X_angular = list(X_angular)
log.info("working on {}".format(antenna_labels[i]))
res = 12
sample_n = 144
#we will do solve on "faceted solution coords"
#Becaues it doesn't make sense to centralize the solutions at the centroid of facet
#due to how calibration inherently is done for full facet
X_nearest = [make_xstar(X_angular[j],res) for j in range(Nt)]
X = X_nearest
mask = [np.random.choice(res*res,size=res*res - sample_n,replace=False) for j in range(Nt)]
###
# Real Part
###
real = np.cos(phase[:,:,:])
###
# Imag Part
###
imag = np.sin(phase[:,:,:])
###
# std
###
std_real = np.abs(imag) * std[:,:,:]
std_imag = np.abs(real) * std[:,:,:]
y_real = [interp_nearest(X_angular[j][:,0],X_angular[j][:,1],
real[j,:,0], X_nearest[j][:,0], X_nearest[j][:,1]) for j in range(Nt)]
y_imag = [interp_nearest(X_angular[j][:,0],X_angular[j][:,1],
imag[j,:,0], X_nearest[j][:,0], X_nearest[j][:,1]) for j in range(Nt)]
sigma_y_real = [interp_nearest(X_angular[j][:,0],X_angular[j][:,1],
std_real[j,:,0], X_nearest[j][:,0], X_nearest[j][:,1]) for j in range(Nt)]
sigma_y_imag = [interp_nearest(X_angular[j][:,0],X_angular[j][:,1],
std_imag[j,:,0], X_nearest[j][:,0], X_nearest[j][:,1]) for j in range(Nt)]
y_obs = np.angle(y_real[0] + 1j*(y_imag[0] )).reshape((res,res))
std_obs = np.sqrt((y_real[0] )**2 * sigma_y_real[0]**2 + (y_imag[0] )**2 * sigma_y_imag[0]**2).reshape((res,res))
for j in range(Nt):
y_real[j][mask[j]] = np.nan
y_imag[j][mask[j]] = np.nan
sigma_y_real[j][mask[j]] = np.nan
sigma_y_imag[j][mask[j]] = np.nan
#sample the non-masked
for j in range(Nt):
X[j] = X[j][np.bitwise_not(np.isnan(y_real[j])),:]
sigma_y_real[j] = sigma_y_real[j][np.bitwise_not(np.isnan(y_real[j]))]
sigma_y_imag[j] = sigma_y_imag[j][np.bitwise_not(np.isnan(y_real[j]))]
y_real[j] = y_real[j][np.bitwise_not(np.isnan(y_real[j]))]
y_imag[j] = y_imag[j][np.bitwise_not(np.isnan(y_real[j]))]
X = np.concatenate([np.concatenate([x,i*np.ones(x.shape[0])],axis=1) for i,x in enumerate(X)],axis=0)
y_real = np.concatenate([np.stack([y,i*np.ones(x.shape[0])],axis=1) for i,y in enumerate(y_real)],axis=0)
sigma_y_real = np.concatenate([np.stack([y,i*np.ones(x.shape[0])],axis=1) for i,y in enumerate(sigma_y_real)],axis=0)
y_imag = np.concatenate([np.stack([y,i*np.ones(x.shape[0])],axis=1) for i,y in enumerate(y_imag)],axis=0)
sigma_y_imag = np.concatenate([np.stack([y,i*np.ones(x.shape[0])],axis=1) for i,y in enumerate(sigma_y_imag)],axis=0)
#Define GP kernel
mean_real = gp.mean_functions.SwitchedMeanFunction([gp.mean_functions.Linear() for j in range(Nt)])
mean_imag = gp.mean_functions.SwitchedMeanFunction([gp.mean_functions.Linear() for j in range(Nt)])
K1_real = RationalQuadratic(2,variance=0.5**2,lengthscales=0.02,alpha=2.)
K2_real = gp.kernels.White(2,variance=0.)
K_real = K1_real+K2_real
coreg = gp.kernels.Coregion(2, output_dim=2, rank=1, active_dims=[1])
coreg.W.set_trainable(False)
kern_real = K_real* coreg
m_real = gp.models.GPR(X, y_real,
kern=kern_real, mean_function=mean_real)
likelihood = gp.likelihoods.Gaussian(sigma_y_real[:,None]**2)
likelihood.variance.set_trainable(False)
gp.models.GPModel.__init__(m_real,gp.DataHolder(X),gp.DataHolder(y_real),
kern,likelihood,mean)
o = gp.train.ScipyOptimizer(method='BFGS')
log.info("Level 2 Solve...Real")
o.minimize(m_real,maxiter=1000)
lml_real = m_real.compute_log_likelihood()
log.info(m_real)
K1_imag = RationalQuadratic(2,variance=0.5**2,lengthscales=0.02,alpha=2.)
K2_imag = gp.kernels.White(2,variance=0.)
K_imag = K1_imag+K2_imag
kern_imag = K_imag* coreg
m_imag = gp.models.GPR(X, y_imag,
kern=kern_imag, mean_function=mean_imag)
likelihood = gp.likelihoods.Gaussian(sigma_y_imag[:,None]**2)
likelihood.variance.set_trainable(False)
gp.models.GPModel.__init__(m_imag,gp.DataHolder(X),gp.DataHolder(y_real),
kern,likelihood,mean)
o = gp.train.ScipyOptimizer(method='BFGS')
log.info("Level 2 Solve...Imag")
o.minimize(m_imag,maxiter=1000)
lml_imag = m_imag.compute_log_likelihood()
log.info(m_imag)
Xstar = make_xstar(X_nearest[0],N=50)
ystar_real,varstar_real = m_real.predict_f(np.concatenate([Xstar,np.ones_like(Xstar.shape[0])*0],axis=1))
ystar_imag,varstar_imag = m_imag.predict_f(np.concatenate([Xstar,np.ones_like(Xstar.shape[0])*0],axis=1))
log.info("Hamiltonian (real): {}".format( -lml_real))
log.info("Hamiltonian (imag): {}".format( -lml_imag))
ystar = np.angle(ystar_real + 1j*(ystar_imag).reshape((50,50))
stdstar = np.sqrt(ystar_real**2 * var_real + ystar_imag**2 * var_imag).reshape((50,50))
# try:
# with h5py.File(bayes_params_file,'r') as bayes_params:
# K_real.hyperparams = bayes_params['/{}/{}/real'.format(antenna_labels[i],time_block_idx)]
# log.info("Loaded bayes params /{}/{}/real".format(antenna_labels[i],time_block_idx))
# except:
#
# K_real.hyperparams = gp.level2_multidataset_solve(X,y_real,sigma_y_real,K_real,n_random_start=0)
# with h5py.File(bayes_params_file,'a') as bayes_params:
# bayes_params['/{}/{}/real'.format(antenna_labels[i],time_block_idx)] = K_real.hyperparams
# bayes_params.flush()
phase_smooth = np.zeros([1,Nt,Nd,Nf])
variance_smooth = np.zeros([1,Nt,Nd,Nf])
log.info("Smoothing time_block...")
for j in range(Nt):
for l in range(Nf):
mean_real = np.mean(real[j,:,l])
mean_imag = np.mean(imag[j,:,l])
Xstar=X_angular[j]
ystar_real,varstar_real = m_real.predict_f(np.concatenate([Xstar,np.ones_like(Xstar.shape[0])*j],axis=1))
ystar_real,cov_real,lml_real = gp.level1_solve(X_angular[j],real[j,:,l]-mean_real,std_real[j,:,l],Xstar,K_real)
ystar_imag,cov_imag,lml_imag = gp.level1_solve(X_angular[j],imag[j,:,l]-mean_imag,std_imag[j,:,l],Xstar,K_imag)
phase_smooth[0,j,:,l] = np.angle(ystar_real + mean_real + 1j*(ystar_imag + mean_imag))
variance_smooth[0,j,:,l] = np.diag(cov_real) * (ystar_real + mean_real)**2 + np.diag(cov_imag) * (ystar_imag + mean_imag)**2
return [phase_smooth, variance_smooth, X_angular[0], y_obs, std_obs, ystar, stdstar]
## #center on zero for GP solve without basis
## mean = [np.mean(phase[i,j,:,0]) for j in range(Nt)]
## y = [interp_nearest(X_angular[j][:,0],X_angular[j][:,1],
## phase[i,j,:,0]-mean[j], X_nearest[j][:,0], X_nearest[j][:,1]) for j in range(Nt)]
##
## sigma_y = [interp_nearest(X_angular[j][:,0],X_angular[j][:,1],
## std[i,j,:,0], X_nearest[j][:,0], X_nearest[j][:,1]) for j in range(Nt)]
## for j in range(Nt):
## y[j][mask[j]] = np.nan
## sigma_y[j][mask[j]] = np.nan
##
##
## #sample the non-masked
## for j in range(Nt):
## X[j] = X[j][np.bitwise_not(np.isnan(y[j])),:]
## sigma_y[j] = sigma_y[j][np.bitwise_not(np.isnan(y[j]))]
## y[j] = y[j][np.bitwise_not(np.isnan(y[j]))]
##
## #Define GP kernel
## K1 = gp.RationalQuadratic(2,l=0.02,sigma=0.52, alpha=2.)
## K1.set_hyperparams_bounds([0.005,0.10],name='l')
## K1.set_hyperparams_bounds([0.005,4.],name='sigma')
## K1.set_hyperparams_bounds([0.05,100.],name='alpha')
## K2 = gp.Diagonal(2,sigma=0.01)
## K2.set_hyperparams_bounds([0.00,0.20],name='sigma')
## K = K1+K2
## try:
## K.hyperparams = bayes_params['/{}/{}/real'.format(antenna_labels[i],time_block_idx)]
## log.info("Loaded bayes params /{}/{}/real".format(antenna_labels[i],time_block_idx))
## except:
## log.info("Level 2 Solve...")
## K.hyperparams = gp.level2_multidataset_solve(X,y,sigma_y,K,n_random_start=1)
## bayes_params['/{}/{}/real'.format(antenna_labels[i],time_block_idx)] = K.hyperparams
## bayes_params.flush()
## log.info(K)
##
## #plot first timestamp
## Xstar = make_xstar(X[0],N=50)
## ystar,cov,lml = gp.level1_solve(X_angular[0],phase[i,0,:,0]-mean[0],std[i,0,:,0],Xstar,K)
## log.info("Hamiltonian: {}".format( -lml))
## phase_smooth = np.zeros([1,Nt,Nd,Nf])
## variance_smooth = np.zeros([1,Nt,Nd,Nf])
## log.info("Smoothing time_block...")
## for j in range(Nt):
## for l in range(Nf):
## mean = np.mean(phase[i,j,:,l])
## Xstar=X_angular[j]
## ystar,cov,lml = gp.level1_solve(X_angular[j],phase[i,j,:,l]-mean,std[i,j,:,l],Xstar,K)
## phase_smooth[0,j,:,l] = ystar + mean
## variance_smooth[0,j,:,l] = np.diag(cov)
## return [phase_smooth, variance_smooth]
# log.info("Building TF graph")
# g = tf.Graph()
# sess = tf.InteractiveSession(graph=g,config=tf.ConfigProto(operation_timeout_in_ms=2000, inter_op_parallelism_threads=2, intra_op_parallelism_threads=1))
# with g.as_default():
# smooth_ = []
#
#
# for i in range(Na):
# args = [tf.constant(phase[i,:,:,:]),
# tf.constant(std[i,:,:,:])]
# smooth_.append(tf.py_func(lambda phase,std : process_ant(i,antenna_labels,X_angular,Na,Nt,Nd,Nf,phase,std,time_block_idx,timestamps,diagnostic_folder,os.path.join(output_folder,bayes_param_file))
# ,args,[tf.float64,tf.float64,tf.float64,tf.float64,tf.float64,tf.float64,tf.float64],stateful=False))
#
# log.info("Running graph")
# res = sess.run(smooth_)
#
# sess.close()
log.info("Building dask graph")
dsk = {}
dsk['antenna_labels'] = antenna_labels
dsk['X_angular'] = X_angular
dsk['Na'] = Na
dsk['Nt'] = Nt
dsk['Nd'] = Nd
dsk['Nf'] = Nf
dsk['time_block_idx'] = time_block_idx
dsk['timestamps'] = timestamps
dsk['diagnostic_folder'] = diagnostic_folder
dsk['bayes_param_file'] = os.path.join(output_folder,bayes_param_file)
#res = []
smooth_ = []
for i in range(Na):
dsk['phase'] = phase[i,:,:,:]
dsk['std'] = std[i,:,:,:]
#res.append(process_ant(i,antenna_labels,X_angular,Na,Nt,Nd,Nf,phase,std,time_block_idx,timestamps,diagnostic_folder,bayes_params))
dsk[antenna_labels[i]] = (process_ant,i,'antenna_labels','X_angular','Na','Nt','Nd','Nf','phase','std','time_block_idx','timestamps','diagnostic_folder','bayes_param_file')
smooth_.append(antenna_labels[i])
log.info("Running graph")
res = get(dsk,smooth_,num_workers=2)
log.info("Storing results in datapack_smooth")
for i in range(Na):
phase_smooth, variance_smooth, x_obs, y_obs, std_obs, ystar, stdstar = res[i]
extent = (np.min(x_obs[:,0]), np.max(x_obs[:,0]), np.min(x_obs[:,1]), np.max(x_obs[:,1]))
plot_data_posterior(x_obs, phase[i,0,:,0], y_obs, std_obs, ystar, stdstar, extent, diagnostic_folder, antenna_labels[i], timestamps[0])
datapack_smooth.set_phase(phase_smooth,ant_idx=[i],time_idx=time_idx,dir_idx=dir_idx,freq_idx=freq_idx)
datapack_smooth.set_variance(variance_smooth,ant_idx=[i],time_idx=time_idx,dir_idx=dir_idx,freq_idx=freq_idx)
log.info("Saving {}".format(datapack_smooth_name))
datapack_smooth.save(datapack_smooth_name)
time_block_idx += 1
#bayes_params.close()
if __name__=='__main__':
main("output_complex","rvw_datapack_full_phase.hdf5","rvw_datapack_full_phase_smooth_complex.hdf5","bayes_parameters_complex.hdf5",120)
|
|
import uuid
from django.test import SimpleTestCase, TestCase
from django.test.utils import override_settings
from casexml.apps.case.mock import CaseFactory, CaseStructure, CaseIndex
from casexml.apps.case.const import CASE_INDEX_EXTENSION, UNOWNED_EXTENSION_OWNER_ID
from casexml.apps.case.tests.util import delete_all_cases
from casexml.apps.phone.cleanliness import set_cleanliness_flags, hint_still_valid, \
get_cleanliness_flag_from_scratch, get_case_footprint_info, get_dependent_case_info
from casexml.apps.phone.data_providers.case.clean_owners import pop_ids
from casexml.apps.phone.exceptions import InvalidDomainError, InvalidOwnerIdError
from casexml.apps.phone.models import OwnershipCleanlinessFlag
from casexml.apps.phone.tests.test_sync_mode import SyncBaseTest
from corehq.form_processor.tests.utils import run_with_all_backends
@override_settings(TESTS_SHOULD_TRACK_CLEANLINESS=None)
class OwnerCleanlinessTest(SyncBaseTest):
def setUp(self):
super(OwnerCleanlinessTest, self).setUp()
self.owner_id = uuid.uuid4().hex
self.synclog_id = uuid.uuid4().hex
self.domain = uuid.uuid4().hex
self.factory = CaseFactory(
domain=self.domain,
case_defaults={
'create': True,
'owner_id': self.owner_id,
'user_id': self.owner_id,
}
)
self.assert_owner_clean() # this first call creates the OwnershipCleanliness doc
self.sample_case = self.factory.create_case()
self.child, self.parent = self.factory.create_or_update_case(
CaseStructure(
indices=[
CaseIndex(),
]
)
)
self.extension, self.host = self.factory.create_or_update_case(
CaseStructure(
attrs={'owner_id': UNOWNED_EXTENSION_OWNER_ID},
indices=[
CaseIndex(
relationship=CASE_INDEX_EXTENSION
),
]
)
)
self.assert_owner_clean() # this is an actual assertion
def _verify_set_cleanliness_flags(self, owner_id=None):
"""
Can be run at the end of any relevant test to check the current state of the
OwnershipCleanliness object and verify that rebuilding it from scratch produces
the same result
"""
if owner_id is None:
owner_id = self.owner_id
owner_cleanliness = self._owner_cleanliness_for_id(owner_id)
is_clean = owner_cleanliness.is_clean
hint = owner_cleanliness.hint
owner_cleanliness.delete()
set_cleanliness_flags(self.domain, owner_id, force_full=True)
new_cleanliness = OwnershipCleanlinessFlag.objects.get(owner_id=owner_id)
self.assertEqual(is_clean, new_cleanliness.is_clean)
self.assertEqual(hint, new_cleanliness.hint)
if hint:
self.assertTrue(hint_still_valid(self.domain, hint))
@property
def owner_cleanliness(self):
return self._owner_cleanliness_for_id(self.owner_id)
def _owner_cleanliness_for_id(self, owner_id):
return OwnershipCleanlinessFlag.objects.get_or_create(
owner_id=owner_id,
domain=self.domain,
defaults={'is_clean': True}
)[0]
def assert_owner_clean(self):
self.assertTrue(self.owner_cleanliness.is_clean)
def assert_owner_dirty(self):
self.assertFalse(self.owner_cleanliness.is_clean)
def assert_owner_temporarily_dirty(self):
"""
Changing any case's owner makes the previous owner ID temporarily dirty, to allow
syncs to happen, but the should become clean on a rebuild.
This checks that workflow and rebuilds the cleanliness flag.
"""
self.assertFalse(self.owner_cleanliness.is_clean)
set_cleanliness_flags(self.domain, self.owner_id, force_full=True)
self.assertTrue(self.owner_cleanliness.is_clean)
def _set_owner(self, case_id, owner_id):
case = self.factory.create_or_update_case(
CaseStructure(case_id=case_id, attrs={'create': False, 'owner_id': owner_id})
)[0]
self.assertEqual(owner_id, case.owner_id)
@run_with_all_backends
def test_add_normal_case_stays_clean(self):
"""Owned case with no indices remains clean"""
self.factory.create_case()
self.assert_owner_clean()
self._verify_set_cleanliness_flags()
@run_with_all_backends
def test_change_owner_stays_clean(self):
"""change the owner ID of a normal case, should remain clean"""
new_owner = uuid.uuid4().hex
self._set_owner(self.sample_case.case_id, new_owner)
self.assert_owner_temporarily_dirty()
self._verify_set_cleanliness_flags()
@run_with_all_backends
def test_change_owner_child_case_stays_clean(self):
"""change the owner ID of a child case, should remain clean"""
new_owner = uuid.uuid4().hex
self._set_owner(self.child.case_id, new_owner)
self.assert_owner_temporarily_dirty()
self._verify_set_cleanliness_flags()
@run_with_all_backends
def test_add_clean_parent_stays_clean(self):
"""add a parent with the same owner, should remain clean"""
self.factory.create_or_update_case(CaseStructure(indices=[CaseIndex()]))
self.assert_owner_clean()
self._verify_set_cleanliness_flags()
@run_with_all_backends
def test_create_dirty_makes_dirty(self):
"""create a case and a parent case with a different owner at the same time
make sure the owner becomes dirty.
"""
new_owner = uuid.uuid4().hex
[child, parent] = self.factory.create_or_update_case(
CaseStructure(
indices=[
CaseIndex(CaseStructure(attrs={'owner_id': new_owner}))
]
)
)
self.assert_owner_dirty()
self.assertEqual(child.case_id, self.owner_cleanliness.hint)
self._verify_set_cleanliness_flags()
@run_with_all_backends
def test_add_dirty_parent_makes_dirty(self):
"""add parent with a different owner and make sure the owner becomes dirty"""
new_owner = uuid.uuid4().hex
[child, parent] = self.factory.create_or_update_case(
CaseStructure(
case_id=self.sample_case.case_id,
indices=[
CaseIndex(CaseStructure(attrs={'owner_id': new_owner}))
]
)
)
self.assert_owner_dirty()
self.assertEqual(child.case_id, self.owner_cleanliness.hint)
self._verify_set_cleanliness_flags()
@run_with_all_backends
def test_change_parent_owner_makes_dirty(self):
"""change the owner id of a parent case and make sure the owner becomes dirty"""
new_owner = uuid.uuid4().hex
self._set_owner(self.parent.case_id, new_owner)
self.assert_owner_dirty()
self.assertEqual(self.child.case_id, self.owner_cleanliness.hint)
self._verify_set_cleanliness_flags()
@run_with_all_backends
def test_change_host_owner_remains_clean(self):
"""change owner for unowned extension, owner remains clean"""
new_owner = uuid.uuid4().hex
self._owner_cleanliness_for_id(new_owner)
self._set_owner(self.host.case_id, new_owner)
self.assert_owner_temporarily_dirty()
self.assertTrue(self._owner_cleanliness_for_id(new_owner).is_clean)
self._verify_set_cleanliness_flags()
@run_with_all_backends
def test_change_host_owner_makes_both_owners_dirty(self):
"""change owner for extension, both owners dirty"""
new_owner = uuid.uuid4().hex
self._owner_cleanliness_for_id(new_owner)
self._set_owner(self.extension.case_id, new_owner)
self.assert_owner_dirty()
self.assertFalse(self._owner_cleanliness_for_id(new_owner).is_clean)
@run_with_all_backends
def test_set_flag_clean_no_data(self):
unused_owner_id = uuid.uuid4().hex
set_cleanliness_flags(self.domain, unused_owner_id)
self.assertTrue(OwnershipCleanlinessFlag.objects.get(owner_id=unused_owner_id).is_clean)
@run_with_all_backends
def test_hint_invalidation(self):
new_owner = uuid.uuid4().hex
self._set_owner(self.parent.case_id, new_owner)
self._set_owner(self.parent.case_id, self.owner_id)
# after the submission the dirtiness flag should still be set
# since it isn't invalidated right away
self.assert_owner_dirty()
# explicitly make sure the hint is no longer valid
self.assertFalse(hint_still_valid(self.domain, self.owner_cleanliness.hint))
# reset the cleanliness flag and ensure it worked
set_cleanliness_flags(self.domain, self.owner_id)
self.assert_owner_clean()
self.assertEqual(None, self.owner_cleanliness.hint)
@run_with_all_backends
def test_hint_invalidation_extensions(self):
other_owner_id = uuid.uuid4().hex
[extension, host] = self.factory.create_or_update_case(
CaseStructure(
case_id='extension',
attrs={'owner_id': other_owner_id},
indices=[
CaseIndex(
CaseStructure(case_id="host"),
relationship=CASE_INDEX_EXTENSION
)
]
)
)
self.assert_owner_dirty()
self.assertTrue(hint_still_valid(self.domain, self.owner_cleanliness.hint))
self._set_owner(extension.case_id, UNOWNED_EXTENSION_OWNER_ID)
self.assertFalse(hint_still_valid(self.domain, self.owner_cleanliness.hint))
@run_with_all_backends
def test_hint_invalidation_extension_chain(self):
other_owner_id = uuid.uuid4().hex
self._owner_cleanliness_for_id(other_owner_id)
host = CaseStructure(case_id=self.sample_case.case_id, attrs={'create': False})
extension_1 = CaseStructure(
case_id="extension1",
attrs={'owner_id': UNOWNED_EXTENSION_OWNER_ID},
indices=[
CaseIndex(
host,
relationship=CASE_INDEX_EXTENSION,
)
]
)
extension_2 = CaseStructure(
case_id="extension2",
attrs={'owner_id': other_owner_id},
indices=[
CaseIndex(
extension_1,
relationship=CASE_INDEX_EXTENSION,
)
]
)
self.factory.create_or_update_case(extension_2)
self.assert_owner_dirty()
self._set_owner(extension_2.case_id, UNOWNED_EXTENSION_OWNER_ID)
self.assertFalse(hint_still_valid(self.domain, self.owner_cleanliness.hint))
@run_with_all_backends
def test_cross_domain_on_submission(self):
"""create a form that makes a dirty owner with the same ID but in a different domain
make sure the original owner stays clean"""
new_domain = uuid.uuid4().hex
# initialize the new cleanliness flag
OwnershipCleanlinessFlag.objects.create(domain=new_domain, owner_id=self.owner_id, is_clean=True)
self.factory.domain = new_domain
self.factory.create_or_update_case(
CaseStructure(
indices=[
CaseIndex(CaseStructure(attrs={'owner_id': uuid.uuid4().hex}))
]
)
)
self.assert_owner_clean()
self.assertEqual(
False,
OwnershipCleanlinessFlag.objects.get(owner_id=self.owner_id, domain=new_domain).is_clean,
)
@run_with_all_backends
def test_cross_domain_both_clean(self):
new_domain = uuid.uuid4().hex
self.factory.domain = new_domain
self.factory.create_or_update_case(
CaseStructure(
indices=[
CaseIndex(),
]
)
)
# two clean ownership models in different domains should report clean
self.assertTrue(get_cleanliness_flag_from_scratch(self.domain, self.owner_id).is_clean)
self.assertTrue(get_cleanliness_flag_from_scratch(new_domain, self.owner_id).is_clean)
@run_with_all_backends
def test_cross_domain_dirty(self):
new_domain = uuid.uuid4().hex
new_owner = uuid.uuid4().hex
self.factory.domain = new_domain
self.factory.create_or_update_case(
CaseStructure(
indices=[
CaseIndex(CaseStructure(attrs={'owner_id': new_owner})),
]
)
)
# original domain should stay clean but the new one should be dirty
self.assertTrue(get_cleanliness_flag_from_scratch(self.domain, self.owner_id).is_clean)
self.assertFalse(get_cleanliness_flag_from_scratch(new_domain, self.owner_id).is_clean)
@run_with_all_backends
def test_non_existent_parent(self):
self.factory.create_or_update_case(
CaseStructure(
indices=[
CaseIndex(CaseStructure()),
],
walk_related=False,
)
)
self.assertTrue(get_cleanliness_flag_from_scratch(self.domain, self.owner_id).is_clean)
@override_settings(TESTS_SHOULD_TRACK_CLEANLINESS=False)
@run_with_all_backends
def test_autocreate_flag_off(self):
new_owner = uuid.uuid4().hex
self.factory.create_or_update_case(
CaseStructure(case_id=uuid.uuid4().hex, attrs={'create': True, 'owner_id': new_owner})
)[0]
self.assertFalse(OwnershipCleanlinessFlag.objects.filter(domain=self.domain, owner_id=new_owner).exists())
@override_settings(TESTS_SHOULD_TRACK_CLEANLINESS=True)
@run_with_all_backends
def test_autocreate_flag_on(self):
new_owner = uuid.uuid4().hex
self.factory.create_or_update_case(
CaseStructure(case_id=uuid.uuid4().hex, attrs={'create': True, 'owner_id': new_owner})
)[0]
flag = OwnershipCleanlinessFlag.objects.get(domain=self.domain, owner_id=new_owner)
self.assertEqual(True, flag.is_clean)
@run_with_all_backends
def test_simple_unowned_extension(self):
"""Simple unowned extensions should be clean"""
self.factory.create_or_update_case(
CaseStructure(
case_id=uuid.uuid4().hex,
attrs={'owner_id': UNOWNED_EXTENSION_OWNER_ID},
indices=[
CaseIndex(
CaseStructure(),
relationship=CASE_INDEX_EXTENSION
)
]
)
)
self.assert_owner_clean()
self._verify_set_cleanliness_flags()
@run_with_all_backends
def test_owned_extension(self):
"""Extension owned by another owner should be dirty"""
other_owner_id = uuid.uuid4().hex
self._owner_cleanliness_for_id(other_owner_id)
[extension, host] = self.factory.create_or_update_case(
CaseStructure(
case_id='extension',
attrs={'owner_id': other_owner_id},
indices=[
CaseIndex(
CaseStructure(case_id="host"),
relationship=CASE_INDEX_EXTENSION
)
]
)
)
self.assert_owner_dirty()
self.assertFalse(self._owner_cleanliness_for_id(other_owner_id).is_clean)
self.assertEqual(host.case_id, self.owner_cleanliness.hint)
self.assertEqual(extension.case_id, self._owner_cleanliness_for_id(other_owner_id).hint)
self._verify_set_cleanliness_flags(self.owner_id)
self._verify_set_cleanliness_flags(other_owner_id)
@run_with_all_backends
def test_extension_chain_with_other_owner_makes_dirty(self):
"""An extension chain of unowned extensions that ends at a case owned by a different owner is dirty"""
other_owner_id = uuid.uuid4().hex
self._owner_cleanliness_for_id(other_owner_id)
host = CaseStructure(case_id=self.sample_case.case_id, attrs={'create': False})
extension_1 = CaseStructure(
case_id="extension1",
attrs={'owner_id': UNOWNED_EXTENSION_OWNER_ID},
indices=[
CaseIndex(
host,
relationship=CASE_INDEX_EXTENSION,
)
]
)
extension_2 = CaseStructure(
case_id="extension2",
attrs={'owner_id': other_owner_id},
indices=[
CaseIndex(
extension_1,
relationship=CASE_INDEX_EXTENSION,
)
]
)
self.factory.create_or_update_case(extension_2)
self.assert_owner_dirty()
self.assertFalse(self._owner_cleanliness_for_id(other_owner_id).is_clean)
self.assertEqual(host.case_id, self.owner_cleanliness.hint)
self.assertEqual(extension_2.case_id, self._owner_cleanliness_for_id(other_owner_id).hint)
self._verify_set_cleanliness_flags(self.owner_id)
self._verify_set_cleanliness_flags(other_owner_id)
@run_with_all_backends
def test_multiple_indices_multiple_owners(self):
"""Extension that indexes a case with another owner should make all owners dirty"""
other_owner_id = uuid.uuid4().hex
self._owner_cleanliness_for_id(other_owner_id)
host_1 = CaseStructure()
host_2 = CaseStructure(attrs={'owner_id': other_owner_id})
self.factory.create_or_update_case(
CaseStructure(
case_id=self.sample_case.case_id,
attrs={'owner_id': UNOWNED_EXTENSION_OWNER_ID},
indices=[
CaseIndex(
host_1,
relationship=CASE_INDEX_EXTENSION,
identifier="host_1",
),
CaseIndex(
host_2,
relationship=CASE_INDEX_EXTENSION,
identifier="host_2",
)
]
)
)
self.assert_owner_dirty()
self.assertFalse(self._owner_cleanliness_for_id(other_owner_id).is_clean)
self.assertEqual(host_1.case_id, self.owner_cleanliness.hint)
self.assertEqual(host_2.case_id, self._owner_cleanliness_for_id(other_owner_id).hint)
self._verify_set_cleanliness_flags(self.owner_id)
self._verify_set_cleanliness_flags(other_owner_id)
@run_with_all_backends
def test_long_extension_chain_with_branches(self):
"""An extension chain of unowned extensions that ends at an owned case is dirty"""
owner_1 = uuid.uuid4().hex
self._owner_cleanliness_for_id(owner_1)
owner_2 = uuid.uuid4().hex
self._owner_cleanliness_for_id(owner_2)
host = CaseStructure(case_id=self.sample_case.case_id, attrs={'create': False})
host_2 = CaseStructure(
case_id="host_with_other_owner",
attrs={'owner_id': owner_1}
)
extension_1 = CaseStructure(
case_id="extension1",
attrs={'owner_id': UNOWNED_EXTENSION_OWNER_ID},
indices=[
CaseIndex(
host,
relationship=CASE_INDEX_EXTENSION,
identifier="host_1",
),
CaseIndex(
host_2,
relationship=CASE_INDEX_EXTENSION,
identifier="host_2",
)
]
)
extension_2 = CaseStructure(
case_id="extension2",
attrs={'owner_id': owner_2},
indices=[
CaseIndex(
extension_1,
relationship=CASE_INDEX_EXTENSION,
)
]
)
self.factory.create_or_update_case(extension_2)
self.assert_owner_dirty()
self.assertFalse(self._owner_cleanliness_for_id(owner_1).is_clean)
self.assertFalse(self._owner_cleanliness_for_id(owner_2).is_clean)
self.assertEqual(host.case_id, self.owner_cleanliness.hint)
self._verify_set_cleanliness_flags()
class SetCleanlinessFlagsTest(TestCase):
@run_with_all_backends
def test_set_bad_domains(self):
test_cases = [None, '', 'something-too-long' * 10]
for invalid_domain in test_cases:
with self.assertRaises(InvalidDomainError):
set_cleanliness_flags(invalid_domain, 'whatever')
@run_with_all_backends
def test_set_bad_owner_ids(self):
test_cases = [None, '', 'something-too-long' * 10]
for invalid_owner in test_cases:
with self.assertRaises(InvalidOwnerIdError):
set_cleanliness_flags('whatever', invalid_owner)
class CleanlinessUtilitiesTest(SimpleTestCase):
def test_pop_ids(self):
five = set(range(5))
three = pop_ids(five, 3)
self.assertEqual(3, len(three))
self.assertEqual(2, len(five))
self.assertEqual(five | set(three), set(range(5)))
def test_pop_ids_too_many(self):
five = set(range(5))
back = pop_ids(five, 6)
self.assertEqual(5, len(back))
self.assertEqual(0, len(five))
self.assertEqual(set(back), set(range(5)))
class GetCaseFootprintInfoTest(TestCase):
@classmethod
def setUpClass(cls):
super(GetCaseFootprintInfoTest, cls).setUpClass()
delete_all_cases()
def setUp(self):
super(GetCaseFootprintInfoTest, self).setUp()
self.domain = 'domain'
self.owner_id = uuid.uuid4().hex
self.other_owner_id = uuid.uuid4().hex
self.factory = CaseFactory(self.domain)
@run_with_all_backends
def test_simple_footprint(self):
""" should only return open cases from user """
case = CaseStructure(case_id=uuid.uuid4().hex, attrs={'owner_id': self.owner_id})
closed_case = CaseStructure(case_id=uuid.uuid4().hex, attrs={'owner_id': self.owner_id, 'close': True})
other_case = CaseStructure(case_id=uuid.uuid4().hex, attrs={'owner_id': self.other_owner_id})
self.factory.create_or_update_cases([case, other_case, closed_case])
footprint_info = get_case_footprint_info(self.domain, self.owner_id)
self.assertEqual(footprint_info.all_ids, set([case.case_id]))
@run_with_all_backends
def test_footprint_with_parent(self):
""" should return open cases with parents """
parent = CaseStructure(
case_id=uuid.uuid4().hex,
attrs={'owner_id': self.other_owner_id, 'close': True}
)
child = CaseStructure(
case_id=uuid.uuid4().hex,
attrs={'owner_id': self.owner_id},
indices=[CaseIndex(parent)]
)
self.factory.create_or_update_cases([parent, child])
footprint_info = get_case_footprint_info(self.domain, self.owner_id)
self.assertEqual(footprint_info.all_ids, set([child.case_id, parent.case_id]))
self.assertEqual(footprint_info.base_ids, set([child.case_id]))
@run_with_all_backends
def test_footprint_with_extension(self):
"""
Extensions are brought in if the host case is owned;
Host case is brought in if the extension is owned
"""
host = CaseStructure(
case_id=uuid.uuid4().hex,
attrs={'owner_id': self.owner_id}
)
extension = CaseStructure(
case_id=uuid.uuid4().hex,
attrs={'owner_id': self.other_owner_id},
indices=[CaseIndex(host, relationship=CASE_INDEX_EXTENSION)]
)
self.factory.create_or_update_cases([host, extension])
footprint_info = get_case_footprint_info(self.domain, self.owner_id)
self.assertEqual(footprint_info.all_ids, set([extension.case_id, host.case_id]))
self.assertEqual(footprint_info.base_ids, set([host.case_id]))
footprint_info = get_case_footprint_info(self.domain, self.other_owner_id)
self.assertEqual(footprint_info.all_ids, set([extension.case_id, host.case_id]))
self.assertEqual(footprint_info.base_ids, set([extension.case_id]))
@run_with_all_backends
def test_footprint_with_extension_of_parent(self):
""" Extensions of parents should be included """
parent = CaseStructure(
case_id=uuid.uuid4().hex,
attrs={'owner_id': self.other_owner_id, 'close': True}
)
child = CaseStructure(
case_id=uuid.uuid4().hex,
attrs={'owner_id': self.owner_id},
indices=[CaseIndex(parent)]
)
extension = CaseStructure(
case_id=uuid.uuid4().hex,
attrs={'owner_id': self.other_owner_id},
indices=[CaseIndex(parent, relationship=CASE_INDEX_EXTENSION)]
)
self.factory.create_or_update_cases([parent, child, extension])
footprint_info = get_case_footprint_info(self.domain, self.owner_id)
self.assertEqual(footprint_info.all_ids, set([extension.case_id, parent.case_id, child.case_id]))
@run_with_all_backends
def test_footprint_with_extension_of_child(self):
""" Extensions of children should be included """
parent = CaseStructure(
case_id=uuid.uuid4().hex,
attrs={'owner_id': self.other_owner_id, 'close': True}
)
child = CaseStructure(
case_id=uuid.uuid4().hex,
attrs={'owner_id': self.owner_id},
indices=[CaseIndex(parent)]
)
extension = CaseStructure(
case_id=uuid.uuid4().hex,
attrs={'owner_id': self.other_owner_id},
indices=[CaseIndex(child, relationship=CASE_INDEX_EXTENSION)]
)
self.factory.create_or_update_cases([parent, child, extension])
footprint_info = get_case_footprint_info(self.domain, self.owner_id)
self.assertEqual(footprint_info.all_ids, set([extension.case_id, parent.case_id, child.case_id]))
@run_with_all_backends
def test_cousins(self):
"""http://manage.dimagi.com/default.asp?189528
"""
grandparent = CaseStructure(
case_id="Steffon",
attrs={'owner_id': self.other_owner_id}
)
parent_1 = CaseStructure(
case_id="Stannis",
attrs={'owner_id': self.other_owner_id},
indices=[CaseIndex(grandparent)]
)
parent_2 = CaseStructure(
case_id="Robert",
attrs={'owner_id': self.other_owner_id},
indices=[CaseIndex(grandparent)]
)
child_1 = CaseStructure(
case_id="Shireen",
attrs={'owner_id': self.owner_id},
indices=[CaseIndex(parent_1)]
)
child_2 = CaseStructure(
case_id="Joffrey",
attrs={'owner_id': self.owner_id},
indices=[CaseIndex(parent_2)]
)
self.factory.create_or_update_cases([grandparent, parent_1, parent_2, child_1, child_2])
footprint_info = get_case_footprint_info(self.domain, self.owner_id)
self.assertEqual(
footprint_info.all_ids,
set([grandparent.case_id,
parent_1.case_id,
parent_2.case_id,
child_1.case_id,
child_2.case_id])
)
class GetDependentCasesTest(TestCase):
@classmethod
def setUpClass(cls):
super(GetDependentCasesTest, cls).setUpClass()
delete_all_cases()
def setUp(self):
super(GetDependentCasesTest, self).setUp()
self.domain = 'domain'
self.owner_id = uuid.uuid4().hex
self.other_owner_id = uuid.uuid4().hex
self.factory = CaseFactory(self.domain)
@run_with_all_backends
def test_returns_nothing_with_no_dependencies(self):
case = CaseStructure()
self.factory.create_or_update_case(case)
self.assertEqual(set(), get_dependent_case_info(self.domain, [case.case_id]).all_ids)
@run_with_all_backends
def test_returns_simple_extension(self):
host = CaseStructure(
case_id=uuid.uuid4().hex,
attrs={'owner_id': self.owner_id}
)
extension = CaseStructure(
case_id=uuid.uuid4().hex,
attrs={'owner_id': self.other_owner_id},
indices=[CaseIndex(host, relationship=CASE_INDEX_EXTENSION)]
)
all_ids = set([host.case_id, extension.case_id])
self.factory.create_or_update_cases([host, extension])
self.assertEqual(all_ids, get_dependent_case_info(self.domain, [host.case_id]).all_ids)
self.assertEqual(all_ids, get_dependent_case_info(self.domain, [extension.case_id]).all_ids)
self.assertEqual(set([extension.case_id]),
get_dependent_case_info(self.domain, [host.case_id]).extension_ids)
@run_with_all_backends
def test_returns_extension_of_extension(self):
host = CaseStructure(
case_id=uuid.uuid4().hex,
attrs={'owner_id': self.owner_id}
)
extension = CaseStructure(
case_id=uuid.uuid4().hex,
attrs={'owner_id': self.other_owner_id},
indices=[CaseIndex(host, relationship=CASE_INDEX_EXTENSION)]
)
extension_2 = CaseStructure(
case_id=uuid.uuid4().hex,
indices=[CaseIndex(extension, relationship=CASE_INDEX_EXTENSION)]
)
all_ids = set([host.case_id, extension.case_id, extension_2.case_id])
self.factory.create_or_update_cases([extension_2])
self.assertEqual(all_ids, get_dependent_case_info(self.domain, [host.case_id]).all_ids)
self.assertEqual(all_ids, get_dependent_case_info(self.domain, [extension.case_id]).all_ids)
self.assertEqual(all_ids, get_dependent_case_info(self.domain, [extension_2.case_id]).all_ids)
self.assertEqual(set([extension.case_id, extension_2.case_id]),
get_dependent_case_info(self.domain, [host.case_id]).extension_ids)
@run_with_all_backends
def test_children_and_extensions(self):
parent = CaseStructure(
case_id=uuid.uuid4().hex,
attrs={'owner_id': self.other_owner_id, 'close': True}
)
child = CaseStructure(
case_id=uuid.uuid4().hex,
attrs={'owner_id': self.owner_id},
indices=[CaseIndex(parent)]
)
extension = CaseStructure(
case_id=uuid.uuid4().hex,
attrs={'owner_id': self.other_owner_id},
indices=[CaseIndex(child, relationship=CASE_INDEX_EXTENSION)]
)
self.factory.create_or_update_cases([parent, child, extension])
all_ids = set([parent.case_id, child.case_id, extension.case_id])
self.assertEqual(all_ids, get_dependent_case_info(self.domain, [child.case_id]).all_ids)
self.assertEqual(set([]), get_dependent_case_info(self.domain, [parent.case_id]).all_ids)
self.assertEqual(set([extension.case_id]),
get_dependent_case_info(self.domain, [child.case_id]).extension_ids)
self.assertEqual(set([]),
get_dependent_case_info(self.domain, [parent.case_id]).extension_ids)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Inventory handlers for Placement API."""
import copy
from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
import webob
from nova.api.openstack.placement import util
from nova import exception
from nova.i18n import _
from nova import objects
RESOURCE_CLASS_IDENTIFIER = "^[A-Z0-9_]+$"
BASE_INVENTORY_SCHEMA = {
"type": "object",
"properties": {
"resource_provider_generation": {
"type": "integer"
},
"total": {
"type": "integer"
},
"reserved": {
"type": "integer"
},
"min_unit": {
"type": "integer"
},
"max_unit": {
"type": "integer"
},
"step_size": {
"type": "integer"
},
"allocation_ratio": {
"type": "number"
},
},
"required": [
"total",
"resource_provider_generation"
],
"additionalProperties": False
}
POST_INVENTORY_SCHEMA = copy.deepcopy(BASE_INVENTORY_SCHEMA)
POST_INVENTORY_SCHEMA['properties']['resource_class'] = {
"type": "string",
"pattern": RESOURCE_CLASS_IDENTIFIER,
}
POST_INVENTORY_SCHEMA['required'].append('resource_class')
POST_INVENTORY_SCHEMA['required'].remove('resource_provider_generation')
PUT_INVENTORY_RECORD_SCHEMA = copy.deepcopy(BASE_INVENTORY_SCHEMA)
PUT_INVENTORY_RECORD_SCHEMA['required'].remove('resource_provider_generation')
PUT_INVENTORY_SCHEMA = {
"type": "object",
"properties": {
"resource_provider_generation": {
"type": "integer"
},
"inventories": {
"type": "object",
"patternProperties": {
RESOURCE_CLASS_IDENTIFIER: PUT_INVENTORY_RECORD_SCHEMA,
}
}
},
"required": [
"resource_provider_generation",
"inventories"
],
"additionalProperties": False
}
# NOTE(cdent): We keep our own representation of inventory defaults
# and output fields, separate from the versioned object to avoid
# inadvertent API changes when the object defaults are changed.
OUTPUT_INVENTORY_FIELDS = [
'total',
'reserved',
'min_unit',
'max_unit',
'step_size',
'allocation_ratio',
]
INVENTORY_DEFAULTS = {
'reserved': 0,
'min_unit': 0,
'max_unit': 0,
'step_size': 1,
'allocation_ratio': 1.0
}
def _extract_inventory(body, schema):
"""Extract and validate inventory from JSON body."""
data = util.extract_json(body, schema)
inventory_data = copy.copy(INVENTORY_DEFAULTS)
inventory_data.update(data)
return inventory_data
def _extract_inventories(body, schema):
"""Extract and validate multiple inventories from JSON body."""
data = util.extract_json(body, schema)
inventories = {}
for res_class, raw_inventory in data['inventories'].items():
inventory_data = copy.copy(INVENTORY_DEFAULTS)
inventory_data.update(raw_inventory)
inventories[res_class] = inventory_data
data['inventories'] = inventories
return data
def _make_inventory_object(resource_provider, resource_class, **data):
"""Single place to catch malformed Inventories."""
# TODO(cdent): Some of the validation checks that are done here
# could be done via JSONschema (using, for example, "minimum":
# 0) for non-negative integers. It's not clear if that is
# duplication or decoupling so leaving it as this for now.
try:
inventory = objects.Inventory(
resource_provider=resource_provider,
resource_class=resource_class, **data)
except (ValueError, TypeError) as exc:
raise webob.exc.HTTPBadRequest(
_('Bad inventory %(class)s for resource provider '
'%(rp_uuid)s: %(error)s') % {'class': resource_class,
'rp_uuid': resource_provider.uuid,
'error': exc},
json_formatter=util.json_error_formatter)
return inventory
def _send_inventories(response, resource_provider, inventories):
"""Send a JSON representation of a list of inventories."""
response.status = 200
response.body = jsonutils.dumps(_serialize_inventories(
inventories, resource_provider.generation))
response.content_type = 'application/json'
return response
def _send_inventory(response, resource_provider, inventory, status=200):
"""Send a JSON representation of one single inventory."""
response.status = status
response.body = jsonutils.dumps(_serialize_inventory(
inventory, generation=resource_provider.generation))
response.content_type = 'application/json'
return response
def _serialize_inventory(inventory, generation=None):
"""Turn a single inventory into a dictionary."""
data = {
field: getattr(inventory, field)
for field in OUTPUT_INVENTORY_FIELDS
}
if generation:
data['resource_provider_generation'] = generation
return data
def _serialize_inventories(inventories, generation):
"""Turn a list of inventories in a dict by resource class."""
inventories_by_class = {inventory.resource_class: inventory
for inventory in inventories}
inventories_dict = {}
for resource_class, inventory in inventories_by_class.items():
inventories_dict[resource_class] = _serialize_inventory(
inventory, generation=None)
return {'resource_provider_generation': generation,
'inventories': inventories_dict}
@webob.dec.wsgify
@util.require_content('application/json')
def create_inventory(req):
"""POST to create one inventory.
On success return a 201 response, a location header pointing
to the newly created inventory and an application/json representation
of the inventory.
"""
context = req.environ['placement.context']
uuid = util.wsgi_path_item(req.environ, 'uuid')
resource_provider = objects.ResourceProvider.get_by_uuid(
context, uuid)
data = _extract_inventory(req.body, POST_INVENTORY_SCHEMA)
resource_class = data.pop('resource_class')
inventory = _make_inventory_object(resource_provider,
resource_class,
**data)
try:
resource_provider.add_inventory(inventory)
except (exception.ConcurrentUpdateDetected,
db_exc.DBDuplicateEntry) as exc:
raise webob.exc.HTTPConflict(
_('Update conflict: %(error)s') % {'error': exc},
json_formatter=util.json_error_formatter)
except exception.InvalidInventoryCapacity as exc:
raise webob.exc.HTTPBadRequest(
_('Unable to create inventory for resource provider '
'%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid,
'error': exc},
json_formatter=util.json_error_formatter)
response = req.response
response.location = util.inventory_url(
req.environ, resource_provider, resource_class)
return _send_inventory(response, resource_provider, inventory,
status=201)
@webob.dec.wsgify
def delete_inventory(req):
"""DELETE to destroy a single inventory.
If the inventory is in use or resource provider generation is out
of sync return a 409.
On success return a 204 and an empty body.
"""
context = req.environ['placement.context']
uuid = util.wsgi_path_item(req.environ, 'uuid')
resource_class = util.wsgi_path_item(req.environ, 'resource_class')
resource_provider = objects.ResourceProvider.get_by_uuid(
context, uuid)
try:
resource_provider.delete_inventory(resource_class)
except (exception.ConcurrentUpdateDetected,
exception.InventoryInUse) as exc:
raise webob.exc.HTTPConflict(
_('Unable to delete inventory of class %(class)s: %(error)s') %
{'class': resource_class, 'error': exc},
json_formatter=util.json_error_formatter)
response = req.response
response.status = 204
response.content_type = None
return response
@webob.dec.wsgify
@util.check_accept('application/json')
def get_inventories(req):
"""GET a list of inventories.
On success return a 200 with an application/json body representing
a collection of inventories.
"""
context = req.environ['placement.context']
uuid = util.wsgi_path_item(req.environ, 'uuid')
resource_provider = objects.ResourceProvider.get_by_uuid(
context, uuid)
inventories = objects.InventoryList.get_all_by_resource_provider_uuid(
context, resource_provider.uuid)
return _send_inventories(req.response, resource_provider, inventories)
@webob.dec.wsgify
@util.check_accept('application/json')
def get_inventory(req):
"""GET one inventory.
On success return a 200 an application/json body representing one
inventory.
"""
context = req.environ['placement.context']
uuid = util.wsgi_path_item(req.environ, 'uuid')
resource_class = util.wsgi_path_item(req.environ, 'resource_class')
resource_provider = objects.ResourceProvider.get_by_uuid(
context, uuid)
inventory = objects.InventoryList.get_all_by_resource_provider_uuid(
context, resource_provider.uuid).find(resource_class)
if not inventory:
raise webob.exc.HTTPNotFound(
_('No inventory of class %(class)s for %(rp_uuid)s') %
{'class': resource_class, 'rp_uuid': resource_provider.uuid},
json_formatter=util.json_error_formatter)
return _send_inventory(req.response, resource_provider, inventory)
@webob.dec.wsgify
@util.require_content('application/json')
def set_inventories(req):
"""PUT to set all inventory for a resource provider.
Create, update and delete inventory as required to reset all
the inventory.
If the resource generation is out of sync, return a 409.
If an inventory to be deleted is in use, return a 409.
If any inventory to be created or updated has settings which are
invalid (for example reserved exceeds capacity), return a 400.
On success return a 200 with an application/json body representing
the inventories.
"""
context = req.environ['placement.context']
uuid = util.wsgi_path_item(req.environ, 'uuid')
resource_provider = objects.ResourceProvider.get_by_uuid(
context, uuid)
data = _extract_inventories(req.body, PUT_INVENTORY_SCHEMA)
if data['resource_provider_generation'] != resource_provider.generation:
raise webob.exc.HTTPConflict(
_('resource provider generation conflict'),
json_formatter=util.json_error_formatter)
inv_list = []
for res_class, inventory_data in data['inventories'].items():
inventory = _make_inventory_object(
resource_provider, res_class, **inventory_data)
inv_list.append(inventory)
inventories = objects.InventoryList(objects=inv_list)
try:
resource_provider.set_inventory(inventories)
except (exception.ConcurrentUpdateDetected,
exception.InventoryInUse,
db_exc.DBDuplicateEntry) as exc:
raise webob.exc.HTTPConflict(
_('update conflict: %(error)s') % {'error': exc},
json_formatter=util.json_error_formatter)
except exception.InvalidInventoryCapacity as exc:
raise webob.exc.HTTPBadRequest(
_('Unable to update inventory for resource provider '
'%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid,
'error': exc},
json_formatter=util.json_error_formatter)
return _send_inventories(req.response, resource_provider, inventories)
@webob.dec.wsgify
@util.require_content('application/json')
def update_inventory(req):
"""PUT to update one inventory.
If the resource generation is out of sync, return a 409.
If the inventory has settings which are invalid (for example
reserved exceeds capacity), return a 400.
On success return a 200 with an application/json body representing
the inventory.
"""
context = req.environ['placement.context']
uuid = util.wsgi_path_item(req.environ, 'uuid')
resource_class = util.wsgi_path_item(req.environ, 'resource_class')
resource_provider = objects.ResourceProvider.get_by_uuid(
context, uuid)
data = _extract_inventory(req.body, BASE_INVENTORY_SCHEMA)
if data['resource_provider_generation'] != resource_provider.generation:
raise webob.exc.HTTPConflict(
_('resource provider generation conflict'),
json_formatter=util.json_error_formatter)
inventory = _make_inventory_object(resource_provider,
resource_class,
**data)
try:
resource_provider.update_inventory(inventory)
except (exception.ConcurrentUpdateDetected,
db_exc.DBDuplicateEntry) as exc:
raise webob.exc.HTTPConflict(
_('update conflict: %(error)s') % {'error': exc},
json_formatter=util.json_error_formatter)
except exception.InvalidInventoryCapacity as exc:
raise webob.exc.HTTPBadRequest(
_('Unable to update inventory for resource provider '
'%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid,
'error': exc},
json_formatter=util.json_error_formatter)
return _send_inventory(req.response, resource_provider, inventory)
|
|
#! /opt/rocks/bin/python
#
# @Copyright@
#
# Rocks(r)
# www.rocksclusters.org
# version 6.1.1 (Sand Boa)
#
# Copyright (c) 2000 - 2014 The Regents of the University of California.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice unmodified and in its entirety, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. All advertising and press materials, printed or electronic, mentioning
# features or use of this software must display the following acknowledgement:
#
# "This product includes software developed by the Rocks(r)
# Cluster Group at the San Diego Supercomputer Center at the
# University of California, San Diego and its contributors."
#
# 4. Except as permitted for the purposes of acknowledgment in paragraph 3,
# neither the name or logo of this software nor the names of its
# authors may be used to endorse or promote products derived from this
# software without specific prior written permission. The name of the
# software includes the following terms, and any derivatives thereof:
# "Rocks", "Rocks Clusters", and "Avalanche Installer". For licensing of
# the associated name, interested parties should contact Technology
# Transfer & Intellectual Property Services, University of California,
# San Diego, 9500 Gilman Drive, Mail Code 0910, La Jolla, CA 92093-0910,
# Ph: (858) 534-5815, FAX: (858) 534-7345, E-MAIL:[email protected]
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# @Copyright@
#
# $Log: clusterdb.py,v $
# Revision 1.24 2012/11/27 00:48:40 phil
# Copyright Storm for Emerald Boa
#
# Revision 1.23 2012/05/06 05:48:46 phil
# Copyright Storm for Mamba
#
# Revision 1.22 2011/07/23 02:30:49 phil
# Viper Copyright
#
# Revision 1.21 2010/09/07 23:53:08 bruno
# star power for gb
#
# Revision 1.20 2009/05/08 22:14:34 anoop
# Add os attribute to the node_attributes table
#
# Revision 1.19 2009/05/01 19:07:08 mjk
# chimi con queso
#
# Revision 1.18 2009/03/23 23:03:57 bruno
# can build frontends and computes
#
# Revision 1.17 2008/10/18 00:56:02 mjk
# copyright 5.1
#
# Revision 1.16 2008/07/23 00:29:55 anoop
# Modified the database to support per-node OS field. This will help
# determine the kind of provisioning for each node
#
# Modification to insert-ethers, rocks command line, and pylib to
# support the same.
#
# Revision 1.15 2008/03/06 23:41:44 mjk
# copyright storm on
#
# Revision 1.14 2007/06/23 04:03:24 mjk
# mars hill copyright
#
# Revision 1.13 2007/06/09 00:27:08 anoop
# Again, moving away from using device names, to using subnets.
#
# Revision 1.12 2007/06/06 20:28:00 bruno
# need to set the device to eth0 when insert-ethers discovers a redhat rocks
# machine
#
# Revision 1.11 2007/06/05 16:38:37 anoop
# Modified clusterdb.py to accomadate changes to the database schema. Now
# just a little less buggy than my other checkins. Sorry for the delay
#
# Revision 1.10 2007/05/30 20:43:15 anoop
# *** empty log message ***
#
# Revision 1.9 2006/09/11 22:47:23 mjk
# monkey face copyright
#
# Revision 1.8 2006/08/10 00:09:41 mjk
# 4.2 copyright
#
# Revision 1.7 2006/01/16 06:48:59 mjk
# fix python path for source built foundation python
#
# Revision 1.6 2005/10/12 18:08:42 mjk
# final copyright for 4.1
#
# Revision 1.5 2005/09/16 01:02:21 mjk
# updated copyright
#
# Revision 1.4 2005/07/11 23:51:35 mjk
# use rocks version of python
#
# Revision 1.3 2005/05/24 21:21:57 mjk
# update copyright, release is not any closer
#
# Revision 1.2 2005/05/23 23:59:24 fds
# Frontend Restore
#
# Revision 1.1 2005/03/31 02:32:09 fds
# Initial design. Knows how to safely insert nodes into the cluster db.
#
#
import os
class Nodes:
"""A class that knows how to insert/delete rocks appliances from
the cluster database"""
def __init__(self, sql):
# An open connection to the cluster database. (a rocks.sql.App)
self.sql = sql
self.nodeid = -1
def getNodeId(self):
"Returns the id of the last node inserted"
return self.nodeid
def insert(self, name, mid, rack, rank, mac=None, ip=None,
netmask=None, subnet='private', osname='linux', vlanid=None):
"""Inserts a new node into the database. Optionally inserts
networking information as well."""
nodeid, mac_exists = self.checkNameAndMAC(name, mac, subnet, vlanid)
self.checkMembership(mid)
self.checkIP(ip)
self.checkSubnet(subnet)
if(not nodeid):
#
# create a new row in nodes table
#
insert = ('insert into nodes (name,membership,rack,rank,os) '
'values ("%s", %d, %d, %d, "%s") ' %
(name, mid, rack, rank, osname))
self.sql.execute(insert)
# The last insert id.
nodeid = self.sql.insertId()
# Set the value of the OS in the host attributes table
db_cmd = ('insert into node_attributes '
'(node, attr, value) '
'values (%d, "%s","%s")' % (nodeid, 'os', osname))
self.sql.execute(db_cmd)
# Do not go further if there is no networking info.
if ip is None:
return
#
# now create a new row in the networks table
#
# First get the subnet you want to insert the node into. The
# default is "private", but this should be dynamic enough
# to accept any kind of string that is valid
self.sql.execute("select id from subnets where name='%s'"
% (subnet))
subnet_id = int(self.sql.fetchone()[0])
if mac is None:
# Happens for a frontend
insert = ('insert into networks '
'(node,ip,netmask,name,subnet) '
'values (%d, "%s", "%s", "%s", %d) '
% (nodeid, ip, netmask, name, subnet_id))
else:
insert = ('insert into networks '
'(node,mac,ip,netmask,name,subnet) '
'values (%d, "%s", "%s", "%s", "%s", %d) '
% (nodeid, mac, \
ip, netmask, name, subnet_id))
self.sql.execute(insert)
self.nodeid = nodeid
sql_command = 'update networks set device=\'%s\' where node=%d and subnet=%d'%(subnet, int(nodeid), int(subnet_id))
self.sql.execute(sql_command);
if(vlanid):
sql_command = 'update networks set vlanid=%d where node=%d and subnet=%d'%(int(vlanid), int(nodeid), int(subnet_id))
self.sql.execute(sql_command);
def checkNameAndMAC(self, checkname, mac, subnet, vlanid):
"""Check to make sure we don't insert a duplicate node name or
other bad things into the DB"""
msg = self.checkNameValidity(checkname)
if msg :
raise ValueError, msg
host = self.sql.getNodeId(checkname)
if mac:
query = 'select mac from networks where mac = "%s"' % mac
self.sql.execute(query)
if(not self.sql.fetchone()):
mac = None;
if not host and not mac:
return None, None;
if not host and mac:
msg = 'Node '+checkname+' does not exist but MAC '+mac+' found in DB';
raise ValueError, msg;
if host and subnet:
sql_command = """select networks.node from networks,subnets where subnets.name='%s' and
subnets.id=networks.subnet and networks.node=%d"""%(subnet, int(host))
self.sql.execute(sql_command);
if(self.sql.fetchone()): #node already exists in subnet, error
msg = 'Node '+checkname+' already exists in the network '+subnet;
raise ValueError, msg;
if(mac):
sql_command = """select networks.node from networks,subnets where subnets.name='%s' and
subnets.id=networks.subnet and networks.mac='%s'"""%(subnet, str(mac))
self.sql.execute(sql_command);
if(self.sql.fetchone()): #MAC already exists in subnet, error
msg = 'MAC '+str(mac)+' already exists in the network '+subnet;
raise ValueError, msg;
return host, mac
def checkName(self, checkname):
"""Check to make sure we don't insert a duplicate node name or
other bad things into the DB"""
host = self.sql.getNodeId(checkname)
if host:
msg = 'Node %s already exists.\n' % checkname
msg += 'Select a different hostname, cabinet '
msg += 'and/or rank value.'
raise ValueError, msg
msg = self.checkNameValidity(checkname)
if msg :
raise ValueError, msg
def checkNameValidity(self, checkname):
"""check that the checkname is not an appliance name or it is not
in the form of rack<number> (used by rocks.command.* too).
If it is incorrect it return an error string otherwise None
"""
# check for invalid names for hosts
# they can not be in the form of rack<number>
if checkname.startswith('rack'):
number = checkname.split('rack')[1]
try:
int(number)
msg = ('Hostname %s can not be in the form ' \
+ 'of rack<number>.\n') % checkname
msg += 'Select a different hostname.\n'
return msg
except ValueError:
return None
# they can not be equal to any appliance name
self.sql.execute('select name from appliances')
for name, in self.sql.fetchall():
if checkname == name:
msg = 'Hostname %s can not be equal to an appliance'\
' name.\n' % (checkname)
msg += 'Select a different hostname.\n'
return msg
return None
def checkSubnet(self,subnet):
"Check to see if the subnet exists"
rows = self.sql.execute("select id from subnets where name='%s'" % subnet);
if (rows == 0):
msg = "subnet %s does not exist. Bailing out" % (subnet)
raise KeyError, msg
return
def checkIP(self, ipaddr):
"Check if the address is already in the database"
if ipaddr is None:
return
nodeid = self.sql.getNodeId(ipaddr)
if nodeid:
msg = "Duplicate IP '%s' Specified" % ipaddr
raise ValueError, msg
def checkMAC(self, mac):
"""Mac addresses are unique accross all sites."""
#
# check if mac is already in the database
# Special Handling for literal "None"
if mac is None:
return
query = 'select mac from networks where mac = "%s"' % mac
if self.sql.execute(query) == 1:
msg = "Duplicate MAC '%s' Specified" % mac
raise ValueError, msg
def checkMembershipName(self, name):
query='select name from memberships where name="%s" ' % (name)
if self.sql.execute(query) == 0:
msg = 'Could not find Membership "%s"' % name
raise ValueError, msg
def checkMembership(self, mid):
query='select id from memberships where id="%s"' % mid
if self.sql.execute(query) == 0:
msg = 'Invalid Membership ID "%s"' % mid
raise ValueError, msg
|
|
"""Subsystems vocabulary from http://ontology.projectchronos.eu/subsystems/?format=jsonld."""
# Listed classes:
# "Spacecraft_Communication": models.Classes(name="Spacecraft_Communication"),
# "Spacecraft_Propulsion": models.Classes(name="Spacecraft_Propulsion"),
# "Spacecraft_Detector": models.Classes(name="Spacecraft_Detector"),
# "Spacecraft_PrimaryPower": models.Classes(name="Spacecraft_PrimaryPower"),
# "Spacecraft_BackupPower": models.Classes(name="Spacecraft_BackupPower"),
# "Spacecraft_Thermal": models.Classes(name="Spacecraft_Thermal"),
# "Spacecraft_Structure": models.Classes(name="Spacecraft_Structure"),
# "Spacecraft_CDH": models.Classes(name="Spacecraft_CDH"),
# "Spacecraft_AODCS": models.Classes(name="Spacecraft_AODCS"),
# "Spacecraft": models.Classes(name="Spacecraft"),
# "Subsystem_Spacecraft": models.Classes(name="Subsystem_Spacecraft"), # all the subsystems types, except detectors (or experiments)
# "Payload_Spacecraft": models.Classes(name="Payload_Spacecraft") # Detectors are payload not strictly subssytems
subsystem_data = {
"defines": [
{
"@type": [
{
"@id": "http://www.w3.org/2002/07/owl#ObjectProperty"
}
],
"@id": "http://ontology.projectchronos.eu/subsystems/manufacturer",
"rdf:label": "manufacturer",
"owl:sameAs": {
"@id": "http://sw.opencyc.org/2012/05/10/concept/Mx8Ngh4rvkpzWpwpEbGdrcN5Y29ycB4rvViQlZwpEbGdrcN5Y29ycA"
}
},
{
"@type": [
{
"@id": "http://www.w3.org/2002/07/owl#AsymmetricProperty"
},
{
"@id": "http://www.w3.org/2002/07/owl#IrreflexiveProperty"
},
{
"@id": "http://www.w3.org/2002/07/owl#ObjectProperty"
}
],
"rdf:domain": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Subsystem_Spacecraft"
}
],
"rdf:comment": "The function and the objective what the device performs or make possible",
"rdf:label": "function",
"@id": "http://ontology.projectchronos.eu/subsystems/function",
"skos:altLabel": "is used for"
},
{
"@type": "http://www.w3.org/2002/07/owl#ObjectProperty",
"@id": "http://ontology.projectchronos.eu/subsystems/cubicMillimeters",
"rdf:comment": "unit of measure for volume",
"rdf:label": "cubicMillimeters"
},
{
"skos:prefLabel": "A property that references the subsystem to the kind of the devices it holds.",
"@type": {
"@id": "http://www.w3.org/2002/07/owl#ObjectProperty"
},
"rdf:domain": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Subsystem_Spacecraft"
}
],
"rdf:comment": "Every subsystem contains an homogeneous group of devices.",
"rdf:label": "subSystemType",
"@id": "http://ontology.projectchronos.eu/subsystems/subSystemType"
},
{
"skos:prefLabel": "A property that references the standard platform for which the subsystem has been designed.",
"@type": {
"@id": "http://www.w3.org/2002/07/owl#ObjectProperty"
},
"rdf:domain": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Subsystem_Spacecraft"
}
],
"rdf:comment": "There are many different standards that imply quantitative and qualitative differences",
"rdf:label": "isStandard",
"@id": "http://ontology.projectchronos.eu/subsystems/isStandard"
},
{
"@type": {
"@id": "http://www.w3.org/2002/07/owl#ObjectProperty"
},
"rdf:domain": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Subsystem_Spacecraft"
}
],
"rdf:label": "hasVolume",
"rdf:range": [
{
"@id": "http://www.w3.org/2001/XMLSchema#float"
}
],
"@id": "http://ontology.projectchronos.eu/subsystems/hasVolume",
"owl:sameAs": [
{
"@id": "http://umbel.org/umbel/rc/Volume"
},
{
"@id": "http://live.dbpedia.org/data/Volume.ntriples"
},
{
"@id": "http://sw.opencyc.org/2012/05/10/concept/Mx4rvVju5JwpEbGdrcN5Y29ycA"
}
]
},
{
"@type": {
"@id": "http://www.w3.org/2002/07/owl#ObjectProperty"
},
"rdf:domain": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Subsystem_Spacecraft"
}
],
"rdf:label": "hasMinAmpere",
"rdf:range": [
{
"@id": "http://www.w3.org/2001/XMLSchema#float"
}
],
"@id": "http://ontology.projectchronos.eu/subsystems/hasMinAmpere",
"owl:sameAs": [
{
"@id": "http://umbel.org/umbel/rc/Ampere"
},
{
"@id": "http://live.dbpedia.org/data/Ampere.ntriples"
},
{
"@id": "http://sw.opencyc.org/2012/05/10/concept/Mx4rvVieG5wpEbGdrcN5Y29ycA"
}
]
},
{
"@type": {
"@id": "http://www.w3.org/2002/07/owl#ObjectProperty"
},
"rdf:domain": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Subsystem_Spacecraft"
}
],
"rdf:label": "hasMaxAmpere",
"rdf:range": [
{
"@id": "http://www.w3.org/2001/XMLSchema#float"
}
],
"@id": "http://ontology.projectchronos.eu/subsystems/hasMaxAmpere",
"owl:sameAs": [
{
"@id": "http://umbel.org/umbel/rc/Ampere"
},
{
"@id": "http://live.dbpedia.org/data/Ampere.ntriples"
},
{
"@id": "http://sw.opencyc.org/2012/05/10/concept/Mx4rvVieG5wpEbGdrcN5Y29ycA"
}
]
},
{
"@type": {
"@id": "http://www.w3.org/2002/07/owl#ObjectProperty"
},
"rdf:domain": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Subsystem_Spacecraft"
}
],
"rdf:label": "hasMass",
"rdf:range": [
{
"@id": "http://www.w3.org/2001/XMLSchema#float"
}
],
"@id": "http://ontology.projectchronos.eu/subsystems/hasMass",
"owl:sameAs": [
{
"@id": "http://umbel.org/umbel/rc/Mass"
},
{
"@id": "http://live.dbpedia.org/data/Mass.ntriples"
},
{
"@id": "http://schema.org/Mass"
},
{
"@id": "http://sw.opencyc.org/2012/05/10/concept/Mx4rvVjb5pwpEbGdrcN5Y29ycA"
}
]
},
{
"@type": {
"@id": "http://www.w3.org/2002/07/owl#ObjectProperty"
},
"rdf:domain": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Subsystem_Spacecraft"
}
],
"rdf:label": "minWorkingTemperature",
"rdf:range": [
{
"@id": "http://sw.opencyc.org/2012/05/10/concept/en/DegreeCelsius"
}
],
"@id": "http://ontology.projectchronos.eu/subsystems/minWorkingTemperature",
"owl:sameAs": [
{
"@id": "http://umbel.org/umbel/rc/Temperature"
},
{
"@id": "http://live.dbpedia.org/data/Temperature.ntriples"
},
{
"@id": "http://sw.opencyc.org/concept/Mx4rvVixf5wpEbGdrcN5Y29ycA"
}
]
},
{
"@type": {
"@id": "http://www.w3.org/2002/07/owl#ObjectProperty"
},
"rdf:domain": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Subsystem_Spacecraft"
}
],
"rdf:label": "maxWorkingTemperature",
"rdf:range": [
{
"@id": "http://sw.opencyc.org/2012/05/10/concept/en/DegreeCelsius"
}
],
"@id": "http://ontology.projectchronos.eu/subsystems/maxWorkingTemperature",
"owl:sameAs": [
{
"@id": "http://umbel.org/umbel/rc/Temperature"
},
{
"@id": "http://live.dbpedia.org/data/Temperature.ntriples"
},
{
"@id": "http://sw.opencyc.org/concept/Mx4rvVixf5wpEbGdrcN5Y29ycA"
}
]
},
{
"@type": {
"@id": "http://www.w3.org/2002/07/owl#ObjectProperty"
},
"rdf:domain": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Subsystem_Spacecraft"
}
],
"rdf:label": "hasPower",
"rdf:range": [
{
"@id": "http://www.w3.org/2001/XMLSchema#float"
}
],
"@id": "http://ontology.projectchronos.eu/subsystems/hasPower",
"owl:sameAs": [
{
"@id": "http://umbel.org/umbel/rc/Power"
},
{
"@id": "http://live.dbpedia.org/data/Power_(physics).ntriples"
},
{
"@id": "http://sw.opencyc.org/2012/05/10/concept/Mx4rvVjcq5wpEbGdrcN5Y29ycA"
}
]
},
{
"@type": {
"@id": "http://www.w3.org/2002/07/owl#ObjectProperty"
},
"rdf:domain": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Subsystem_Spacecraft"
}
],
"rdf:label": "hasSpecificImpulse",
"rdf:range": [
{
"@id": "http://www.w3.org/2001/XMLSchema#float"
}
],
"@id": "http://ontology.projectchronos.eu/subsystems/hasSpecificImpulse",
"owl:sameAs": {
"@id": "http://live.dbpedia.org/data/Specific_impulse.ntriples"
}
},
{
"rdf:domain": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Spacecraft_Detector"
}
],
"@id": "http://ontology.projectchronos.eu/subsystems/holdsSensor",
"@type": {
"@id": "http://www.w3.org/2002/07/owl#ObjectProperty"
},
"rdf:label": "holdsSensor"
},
{
"@type": {
"@id": "http://www.w3.org/2002/07/owl#ObjectProperty"
},
"rdf:domain": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Subsystem_Spacecraft"
}
],
"rdf:comment": "Amount of money it can be bought for, or an esteem of value.",
"rdf:label": "hasMonetaryValue",
"rdf:range": [
{
"@id": "http://www.w3.org/2001/XMLSchema#float"
}
],
"@id": "http://ontology.projectchronos.eu/subsystems/hasMonetaryValue",
"owl:sameAs": [
{
"@id": "http://live.dbpedia.org/data/Price.ntriples"
},
{
"@id": "http://umbel.org/umbel/rc/MonetaryValue"
},
{
"@id": "http://schema.org/PriceSpecification"
}
]
},
{
"@type": {
"@id": "http://www.w3.org/2002/07/owl#ObjectProperty"
},
"rdf:domain": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Subsystem_Spacecraft"
}
],
"rdf:comment": "This device receive input from another device",
"rdf:label": "hasWireInWith",
"@id": "http://ontology.projectchronos.eu/subsystems/hasWireInWith",
"rdf:range": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Subsystem_Spacecraft"
}
]
},
{
"@type": {
"@id": "http://www.w3.org/2002/07/owl#ObjectProperty"
},
"rdf:domain": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Subsystem_Spacecraft"
}
],
"rdf:comment": "This device send output to another device",
"rdf:label": "hasWireOutWith",
"@id": "http://ontology.projectchronos.eu/subsystems/hasWireOutWith",
"rdf:range": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Subsystem_Spacecraft"
}
]
},
{
"rdf:domain": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Spacecraft_Propulsion"
}
],
"@id": "http://ontology.projectchronos.eu/subsystems/typeOfPropellant",
"rdf:label": "typeOfPropellant",
"@type": {
"@id": "http://www.w3.org/2002/07/owl#ObjectProperty"
},
"rdf:comment": "Type of fueling used by a spacecraft rocket engine"
},
{
"skos:prefLabel": "Any scientific instrument carried by a space probe or an artificial satellite",
"@type": "http://www.w3.org/2002/07/owl#Class",
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_Detector",
"chronos:relConcept": {
"@id": "http://hypermedia.projectchronos.eu/data/dbpediadocs/sensor"
},
"rdf:comment": "A space detector is a sensor supported by another device that let it collect data, that is deployed into a spacecraft and works outside Earth lower atmosphere",
"spacecraft:isComponentOf": {
"@id": "http://ontology.projectchronos.eu/spacecraft/Payload_Spacecraft"
},
"rdf:label": "Spacecraft_Detector",
"rdfs:subClassOf": [
{
"@id": "http://umbel.org/umbel/rc/Sensor_Device"
},
{
"@id": "http://ontology.projectchronos.eu/spacecraft/SubSystems_Spacecraft"
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsytems/objective"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsytems/isComponentOf"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": {
"@id": "http://ontology.projectchronos.eu/spacecraft/Payload_Spacecraft"
}
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasWireOutWith"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": [
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_Communication"
},
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_CDH"
}
]
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasWireInWith"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": "http://ontology.projectchronos.eu/subsystems/Spacecraft_PrimaryPower"
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/maxWorkingTemperature"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/minWorkingTemperature"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasVoltage"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
}
]
},
{
"skos:prefLabel": "The set of subsystems needed to make a spacecraft moving in space",
"@type": "http://www.w3.org/2002/07/owl#Class",
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_Propulsion",
"chronos:relConcept": {
"@id": "http://hypermedia.projectchronos.eu/data/dbpediadocs/propulsion"
},
"rdf:comment": "Complex devices-subsystems used for impelling (processes of applying a force which results in translational motion) a spacecraft, in the specific http://umbel.org/umbel/rc/ProjectilePropelling",
"spacecraft:isComponentOf": "http://ontology.projectchronos.eu/spacecraft/Spacecraft",
"rdf:label": "Spacecraft_Propulsion",
"rdfs:subClassOf": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Subsystem_Spacecraft"
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsytems/function"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": {
"@id": "http://umbel.org/umbel/rc/ProjectilePropelling"
}
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/spacecraft/isSubsystemOf"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": {
"@id": "http://ontology.projectchronos.eu/spacecraft/Spacecraft"
}
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/typeOfPropellant"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasSpecificImpulse"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/maxWorkingTemperature"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/minWorkingTemperature"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/subSystemType"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": {
"@id": "http://umbel.org/umbel/rc/RocketEngine"
}
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasWireInWith"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": [
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_PrimaryPower"
},
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_CDH"
}
]
}
]
},
{
"skos:prefLabel": "The set of subsystems needed to make a spacecraft to collect energy to operate",
"@type": "http://www.w3.org/2002/07/owl#Class",
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_PrimaryPower",
"rdf:comment": "Complex devices-subsystems used for collecting energy.",
"spacecraft:isComponentOf": {
"@id": "http://ontology.projectchronos.eu/spacecraft/Spacecraft"
},
"rdf:label": "Spacecraft_PrimaryPower",
"rdfs:subClassOf": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Subsystem_Spacecraft"
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsytems/function"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": {
"@id": "http://umbel.org/umbel/rc/ElectricalPowerGeneration"
}
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/spacecraft/isSubsystemOf"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": {
"@id": "http://ontology.projectchronos.eu/spacecraft/Spacecraft"
}
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasEfficiency"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasVoltage"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/subSystemType"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": {
"@id": "http://umbel.org/umbel/rc/ElectricalDevice"
}
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/maxWorkingTemperature"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/minWorkingTemperature"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasWireOutWith"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": [
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_Communication"
},
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_CDH"
},
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_AODCS"
},
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_Propulsion"
},
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_Thermal"
}
]
}
]
},
{
"skos:prefLabel": "The set of subsystems needed to make a spacecraft to store energy from the primary power source.",
"@type": "http://www.w3.org/2002/07/owl#Class",
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_BackupPower",
"rdf:comment": "Complex devices-subsystems used for storing energy.",
"spacecraft:isComponentOf": {
"@id": "http://ontology.projectchronos.eu/spacecraft/Spacecraft"
},
"rdf:label": "Spacecraft_BackupPower",
"rdfs:subClassOf": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Subsystem_Spacecraft"
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsytems/function"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": {
"@id": "http://live.dbpedia.org/data/rc/Energy_storage.ntriples"
}
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/spacecraft/isSubsystemOf"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": {
"@id": "http://ontology.projectchronos.eu/spacecraft/Spacecraft"
}
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/subSystemType"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": {
"@id": "http://umbel.org/umbel/rc/ElectricalDevice"
}
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasWireInWith"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": [
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_PrimaryPower"
}
]
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/maxWorkingTemperature"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/minWorkingTemperature"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasWireOutWith"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": [
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_CDH"
},
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_Propulsion"
},
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_Thermal"
},
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_Communication"
}
]
}
]
},
{
"skos:prefLabel": "Artifacts or devices used to maintain the temperature of spacecraft's subsystems and payloads into a given range, to permit nominal and survival mode for all the duration of the mission .",
"@type": "http://www.w3.org/2002/07/owl#Class",
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_Thermal",
"rdf:comment": "Shields, shells or any device insulation from/reflecting radiation exploiting emission and absorption events",
"rdf:label": "Spacecraft_Thermal",
"rdfs:subClassOf": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Subsystem_Spacecraft"
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsytems/function"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": {
"@id": "http://live.dbpedia.org/data/Process_control.ntriples"
}
}
]
},
{
"skos:prefLabel": "Device of of class Thermal that does NOT consume energy from the spacecraft power source.",
"@type": "http://www.w3.org/2002/07/owl#Class",
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_Thermal_PassiveDevice",
"rdf:comment": "They are passive because they mostly transform radiation into heating/cooling ",
"spacecraft:isComponentOf": {
"@id": "http://ontology.projectchronos.eu/spacecraft/Spacecraft"
},
"rdf:label": "Spacecraft_Thermal_PassiveDevice",
"rdfs:subClassOf": [
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_Thermal"
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/subSystemType"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": {
"@id": "http://umbel.org/umbel/rc/PhysicalDevice"
}
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/maxWorkingTemperature"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/minWorkingTemperature"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
}
]
},
{
"skos:prefLabel": "Device that of class Thermal that consumes energy from the spacecraft power source.",
"@type": "http://www.w3.org/2002/07/owl#Class",
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_Thermal_ActiveDevice",
"rdf:comment": "Complex devices-subsystems used to protect sensors or electronic devices from over/under-heating, like refrigeration absorption.",
"rdf:label": "Spacecraft_Thermal_ActiveDevice",
"rdfs:subClassOf": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Spacecraft_Thermal"
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/subSystemType"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": {
"@id": "http://umbel.org/umbel/rc/PoweredDevice"
}
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasWireInWith"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": [
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_PrimaryPower"
},
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_BackupPower"
}
]
}
]
},
{
"skos:prefLabel": "Artifacts or rigid devices used to create a supporting structure for all the others devices.",
"@type": "http://www.w3.org/2002/07/owl#Class",
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_Structure",
"rdf:comment": "It's the skeleton and framework of the spacecraft.",
"spacecraft:isComponentOf": {
"@id": "http://ontology.projectchronos.eu/spacecraft/Spacecraft"
},
"rdf:label": "Spacecraft_Structure",
"rdfs:subClassOf": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Subsystem_Spacecraft"
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsytems/function"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": {
"@id": "http://live.dbpedia.org/data/Structural_system.ntriples"
}
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/spacecraft/isSubsystemOf"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": {
"@id": "http://ontology.projectchronos.eu/spacecraft/Spacecraft"
}
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/maxWorkingTemperature"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/minWorkingTemperature"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/standsMaxTemperature"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/subSystemType"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": {
"@id": "http://umbel.org/umbel/rc/PhysicalDevice"
}
}
]
},
{
"skos:prefLabel": "Command and Data Handling, it is the device that connects the other devices, it processes and deliver information.",
"@type": "http://www.w3.org/2002/07/owl#Class",
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_CDH",
"rdf:comment": "The DH system shall: Enable HK and science data flow \u2013 Housekeeping data (Temperatures, Pressures, Voltages, Currents, Status,...) \u2013 Attitude data \u2013 Payload data (e.g., Science data) - Receive and distribute commands - Perform TM and TC protocols - Distribute timing signals - Synchronization of data \u2013 Time stamping of data - Provide data storage - Execute commands and schedules - Control subsystems and payloads - Monitor spacecraft health - Make autonomous decisions - Perform data compression.",
"spacecraft:isComponentOf": {
"@id": "http://ontology.projectchronos.eu/spacecraft/Spacecraft"
},
"rdf:label": "Spacecraft_CDH",
"rdfs:subClassOf": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Subsystem_Spacecraft"
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsytems/function"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": [
{
"@id": "http://umbel.org/umbel/rc/InformationTransferEvent"
},
{
"@id": "http://live.dbpedia.org/data/Electronic_data_processing.ntriples"
},
{
"@id": "http://live.dbpedia.org/data/Process_control.ntriples"
}
]
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/spacecraft/isSubsystemOf"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": {
"@id": "http://ontology.projectchronos.eu/spacecraft/Spacecraft"
}
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasVoltage"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasMaxClock"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasMinClock"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasDataStorage"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasDataStorageExternal"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasRAM"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasMinTemperature"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasMaxTemperature"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/subSystemType"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": {
"@id": "http://umbel.org/umbel/rc/Computer_hardware"
}
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/maxWorkingTemperature"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/minWorkingTemperature"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasWireOutWith"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": [
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_Communication"
},
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_AODCS"
},
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_Propulsion"
}
]
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasWireInWith"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": [
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_PrimaryPower"
},
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_BackupPower"
},
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_Communication"
},
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_AODCS"
}
]
}
]
},
{
"skos:prefLabel": "It handles communication from/to ground base or other spacecraft.",
"@type": "http://www.w3.org/2002/07/owl#Class",
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_Communication",
"chronos:relConcept": {
"@id": "http://hypermedia.projectchronos.eu/data/dbpediadocs/telecommunication"
},
"rdf:comment": "Complex devices-subsystems used for transmitting/receiving radio waves.",
"spacecraft:isComponentOf": {
"@id": "http://ontology.projectchronos.eu/spacecraft/Spacecraft"
},
"rdf:label": "Spacecraft_Communication",
"rdfs:subClassOf": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Subsystem_Spacecraft"
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsytems/function"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": [
{
"@id": "http://live.dbpedia.org/data/Transmitting.ntriples"
},
{
"@id": "http://umbel.org/umbel/rc/Receiving"
}
]
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/spacecraft/isSubsystemOf"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": {
"@id": "http://ontology.projectchronos.eu/spacecraft/Spacecraft"
}
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasMinTemperature"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasMaxTemperature"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/subSystemType"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": {
"@id": "http://umbel.org/umbel/rc/ElectronicDevice"
}
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasWireOutWith"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": [
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_CDH"
}
]
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/maxWorkingTemperature"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/minWorkingTemperature"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasWireInWith"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": [
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_PrimaryPower"
},
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_BackupPower"
},
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_CDH"
}
]
}
]
},
{
"skos:prefLabel": "Attitude and Orbit Determination Control",
"@type": "http://www.w3.org/2002/07/owl#Class",
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_AODCS",
"rdf:comment": "Complex devices-subsystems used to set the direction and the position of the spacecraft, it controls flight dynamics.",
"spacecraft:isComponentOf": {
"@id": "http://ontology.projectchronos.eu/spacecraft/Spacecraft"
},
"rdf:label": "Spacecraft_AODCS",
"rdfs:subClassOf": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Subsystem_Spacecraft"
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsytems/function"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": [
{
"@id": "http://live.dbpedia.org/data/Flight_dynamics_(spacecraft).ntriples"
},
{
"@id": "http://live.dbpedia.org/data/Attitude_control.ntriples"
},
{
"@id": "http://live.dbpedia.org/data/Process_control.ntriples"
}
]
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/standsMaxTemperature"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/maxWorkingTemperature"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/minWorkingTemperature"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:minCardinality": 1
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/subSystemType"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": {
"@id": "http://umbel.org/umbel/rc/ElectronicDevice"
}
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasWireOutWith"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": [
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_CDH"
}
]
}
]
},
{
"skos:prefLabel": "AODCS that do not use any power from the spacecraft power to work.",
"@type": "http://www.w3.org/2002/07/owl#Class",
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_AODCS_ActiveDevice",
"rdf:comment": "Do NOT use any additional power from the spacecraft generator",
"rdf:label": "Spacecraft_AODCS_Active",
"rdfs:subClassOf": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Spacecraft_AODCS"
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasWireInWith"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": [
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_PrimaryPower"
},
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_BackupPower"
},
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_CDH"
}
]
}
]
},
{
"skos:prefLabel": "AODCS that do not use any power from the spacecraft power to work.",
"@type": "http://www.w3.org/2002/07/owl#Class",
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_AODCS_PassiveDevice",
"rdf:comment": "DO use any additional power from the spacecraft generator",
"rdf:label": "Spacecraft_AODCS_PassiveDevice",
"rdfs:subClassOf": [
{
"@id": "http://ontology.projectchronos.eu/spacecraft/Spacecraft_AODCS"
},
{
"owl:onProperty": {
"@id": "http://ontology.projectchronos.eu/subsystems/hasWireInWith"
},
"@type": "http://www.w3.org/2002/07/owl#Restriction",
"owl:hasValue": [
{
"@id": "http://ontology.projectchronos.eu/subsystems/Spacecraft_CDH"
}
]
}
]
}
],
"@type": "http://www.w3.org/2002/07/owl#Ontology",
"@id": "",
"rdf:comment": "These ontology contains classes of different subsystems present in a spacecraft.",
"@context": {
"defines": {
"@reverse": "http://www.w3.org/2000/01/rdf-schema#isDefinedBy"
},
"chronos": "http://ontology.projectchronos.eu/chronos/",
"skos": "http://www.w3.org/2004/02/skos/core#",
"xml": "http://www.w3.org/2001/XMLSchema#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"dbpedia": "http://live.dbpedia.org/ontology/",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"@base": "http://ontology.projectchronos.eu/subsystems",
"owl": "http://www.w3.org/2002/07/owl#",
"spacecraft": "http://ontology.projectchronos.eu/spacecraft/"
},
"rdf:label": "Subsystems that run tasks that are needed for the spacecraft to be working and fulfilling the objectives of the mission. Words separated by _ has to be read as separated concepts, camelCase in they are the same concepts (see umbel.org for this norm)"
}
|
|
#!/usr/bin/python2
# Copyright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Basic test module
"""
import uuid
import time
from unittest import TestCase
from ovs.dal.exceptions import *
from ovs.dal.dataobjectlist import DataObjectList
from ovs.extensions.generic import fakesleep
from ovs.extensions.storage.persistent.dummystore import DummyPersistentStore
from ovs.extensions.storage.volatile.dummystore import DummyVolatileStore
from ovs.extensions.storage.persistentfactory import PersistentFactory
from ovs.extensions.storage.volatilefactory import VolatileFactory
from ovs.dal.hybrids.t_testmachine import TestMachine
from ovs.dal.hybrids.t_testdisk import TestDisk
from ovs.dal.hybrids.t_testemachine import TestEMachine
from ovs.dal.datalist import DataList
from ovs.dal.helpers import Descriptor
from ovs.extensions.generic.volatilemutex import VolatileMutex
class Basic(TestCase):
"""
The basic unittestsuite will test all basic functionality of the DAL framework
It will also try accessing all dynamic properties of all hybrids making sure
that code actually works. This however means that all loaded 3rd party libs
need to be mocked
"""
@classmethod
def setUpClass(cls):
"""
Sets up the unittest, mocking a certain set of 3rd party libraries and extensions.
This makes sure the unittests can be executed without those libraries installed
"""
PersistentFactory.store = DummyPersistentStore()
PersistentFactory.store.clean()
PersistentFactory.store.clean()
VolatileFactory.store = DummyVolatileStore()
VolatileFactory.store.clean()
VolatileFactory.store.clean()
fakesleep.monkey_patch()
@classmethod
def setUp(cls):
"""
(Re)Sets the stores on every test
"""
PersistentFactory.store = DummyPersistentStore()
PersistentFactory.store.clean()
VolatileFactory.store = DummyVolatileStore()
VolatileFactory.store.clean()
DataList.test_hooks = {}
@classmethod
def tearDownClass(cls):
"""
Clean up the unittest
"""
fakesleep.monkey_restore()
def test_invalidobject(self):
"""
Validates the behavior when a non-existing object is loaded
"""
# Loading an non-existing object should raise
self.assertRaises(ObjectNotFoundException, TestDisk, uuid.uuid4(), None)
def test_newobject_delete(self):
"""
Validates the behavior on object deletions
"""
disk = TestDisk()
disk.name = 'disk'
disk.save()
# An object should always have a guid
guid = disk.guid
self.assertIsNotNone(guid, 'Guid should not be None')
# After deleting, the object should not be retreivable
disk.delete()
self.assertRaises(Exception, TestDisk, guid, None)
def test_discard(self):
"""
Validates the behavior regarding pending changes discard
"""
disk = TestDisk()
disk.name = 'one'
disk.save()
disk.name = 'two'
# Discarding an object should rollback all changes
disk.discard()
self.assertEqual(disk.name, 'one', 'Data should be discarded')
def test_updateproperty(self):
"""
Validates the behavior regarding updating properties
"""
disk = TestDisk()
disk.name = 'test'
disk.description = 'desc'
# A property should be writable
self.assertIs(disk.name, 'test', 'Property should be updated')
self.assertIs(disk.description, 'desc', 'Property should be updated')
def test_preinit(self):
"""
Validates whether initial data is loaded on object creation
"""
disk = TestDisk(data={'name': 'diskx'})
disk.save()
self.assertEqual(disk.name, 'diskx', 'Disk name should be preloaded')
def test_datapersistent(self):
"""
Validates whether data is persisted correctly
"""
disk = TestDisk()
guid = disk.guid
disk.name = 'test'
disk.save()
# Retreiving an object should return the data as when it was saved
disk2 = TestDisk(guid)
self.assertEqual(disk.name, disk2.name, 'Data should be persistent')
def test_readonlyproperty(self):
"""
Validates whether all dynamic properties are actually read-only
"""
disk = TestDisk()
# Readonly properties should return data
self.assertIsNotNone(disk.used_size, 'RO property should return data')
def test_datastorewins(self):
"""
Validates the "datastore_wins" behavior in the usecase where it wins
"""
disk = TestDisk()
disk.name = 'initial'
disk.save()
disk2 = TestDisk(disk.guid, datastore_wins=True)
disk.name = 'one'
disk.save()
disk2.name = 'two'
disk2.save()
# With datastore_wins set to True, the datastore wins concurrency conflicts
self.assertEqual(disk2.name, 'one', 'Data should be overwritten')
def test_datastoreloses(self):
"""
Validates the "datastore_wins" behavior in the usecase where it loses
"""
disk = TestDisk()
disk.name = 'initial'
disk.save()
disk2 = TestDisk(disk.guid, datastore_wins=False)
disk.name = 'one'
disk.save()
disk2.name = 'two'
disk2.save()
# With datastore_wins set to False, the datastore loses concurrency conflicts
self.assertEqual(disk2.name, 'two', 'Data should not be overwritten')
def test_silentdatarefresh(self):
"""
Validates whether the default scenario (datastore_wins=False) will execute silent
data refresh
"""
disk = TestDisk()
disk.name = 'initial'
disk.save()
disk2 = TestDisk(disk.guid, datastore_wins=False)
disk.name = 'one'
disk.save()
disk2.name = 'two'
disk2.save()
disk.save() # This should not overwrite anything but instead refresh data
# With datastore_wins set to False, the datastore loses concurrency conflicts
self.assertEqual(disk2.name, 'two', 'Data should not be overwritten')
self.assertEqual(disk.name, 'two', 'Data should be refreshed')
def test_datastoreraises(self):
"""
Validates the "datastore_wins" behavior in the usecase where it's supposed to raise
"""
disk = TestDisk()
disk.name = 'initial'
disk.save()
disk2 = TestDisk(disk.guid, datastore_wins=None)
disk.name = 'one'
disk.save()
disk2.name = 'two'
# with datastore_wins set to None, concurrency conflicts are raised
self.assertRaises(ConcurrencyException, disk2.save)
def test_volatileproperty(self):
"""
Validates the volatile behavior of dynamic properties
"""
disk = TestDisk()
disk.size = 1000000
value = disk.used_size
# Volatile properties should be stored for the correct amount of time
time.sleep(2)
self.assertEqual(disk.used_size, value, 'Value should still be from cache')
time.sleep(2)
self.assertEqual(disk.used_size, value, 'Value should still be from cache')
time.sleep(2)
# ... after which they should be reloaded from the backend
self.assertNotEqual(disk.used_size, value, 'Value should be different')
def test_primarykeyvalidation(self):
"""
Validates whether the passed in key (guid) of an object is validated
"""
self.assertRaises(ValueError, TestDisk, 'foo', None)
disk = TestDisk() # Should not raise
disk.name = 'disk'
disk.save()
_ = TestDisk(disk.guid) # Should not raise
def test_persistency(self):
"""
Validates whether the object is fetches from the correct storage backend
"""
disk = TestDisk()
disk.name = 'test'
disk.save()
# Right after a save, the cache is invalidated
disk2 = TestDisk(disk.guid)
self.assertFalse(disk2._metadata['cache'], 'Object should be retreived from persistent backend')
# Subsequent calls will retreive the object from cache
disk3 = TestDisk(disk.guid)
self.assertTrue(disk3._metadata['cache'], 'Object should be retreived from cache')
# After the object expiry passed, it will be retreived from backend again
DummyVolatileStore().delete(disk._key) # We clear the entry
disk4 = TestDisk(disk.guid)
self.assertFalse(disk4._metadata['cache'], 'Object should be retreived from persistent backend')
def test_queries(self):
"""
Validates whether executing queries returns the expected results
"""
machine = TestMachine()
machine.name = 'machine'
machine.save()
for i in xrange(0, 20):
disk = TestDisk()
disk.name = 'test_{0}'.format(i)
disk.size = i
if i < 10:
disk.machine = machine
else:
disk.storage = machine
disk.save()
self.assertEqual(len(machine.disks), 10, 'query should find added machines')
# pylint: disable=line-too-long
list_1 = DataList({'object': TestDisk,
'data': DataList.select.COUNT,
'query': {'type': DataList.where_operator.AND,
'items': [('size', DataList.operator.EQUALS, 1)]}}).data # noqa
self.assertEqual(list_1, 1, 'list should contain int 1')
list_2 = DataList({'object': TestDisk,
'data': DataList.select.GUIDS,
'query': {'type': DataList.where_operator.AND,
'items': [('size', DataList.operator.EQUALS, 1)]}}).data # noqa
found_object = Descriptor(TestDisk, list_2[0]).get_object(True)
self.assertEqual(found_object.name, 'test_1', 'list should contain correct machine')
list_3 = DataList({'object': TestDisk,
'data': DataList.select.COUNT,
'query': {'type': DataList.where_operator.AND,
'items': [('size', DataList.operator.GT, 3),
('size', DataList.operator.LT, 6)]}}).data # noqa
self.assertEqual(list_3, 2, 'list should contain int 2') # disk 4 and 5
list_4 = DataList({'object': TestDisk,
'data': DataList.select.COUNT,
'query': {'type': DataList.where_operator.OR,
'items': [('size', DataList.operator.LT, 3),
('size', DataList.operator.GT, 6)]}}).data # noqa
# at least disk 0, 1, 2, 7, 8, 9, 10-19
self.assertGreaterEqual(list_4, 16, 'list should contain >= 16')
list_5 = DataList({'object': TestDisk,
'data': DataList.select.COUNT,
'query': {'type': DataList.where_operator.AND,
'items': [('machine.guid', DataList.operator.EQUALS, machine.guid), # noqa
{'type': DataList.where_operator.OR,
'items': [('size', DataList.operator.LT, 3),
('size', DataList.operator.GT, 6)]}]}}).data # noqa
self.assertEqual(list_5, 6, 'list should contain int 6') # disk 0, 1, 2, 7, 8, 9
list_6 = DataList({'object': TestDisk,
'data': DataList.select.COUNT,
'query': {'type': DataList.where_operator.AND,
'items': [('size', DataList.operator.LT, 3),
('size', DataList.operator.GT, 6)]}}).data # noqa
self.assertEqual(list_6, 0, 'list should contain int 0') # no disks
list_7 = DataList({'object': TestDisk,
'data': DataList.select.COUNT,
'query': {'type': DataList.where_operator.OR,
'items': [('machine.guid', DataList.operator.EQUALS, '123'), # noqa
('used_size', DataList.operator.EQUALS, -1),
{'type': DataList.where_operator.AND,
'items': [('size', DataList.operator.GT, 3),
('size', DataList.operator.LT, 6)]}]}}).data # noqa
self.assertEqual(list_7, 2, 'list should contain int 2') # disk 4 and 5
list_8 = DataList({'object': TestDisk,
'data': DataList.select.COUNT,
'query': {'type': DataList.where_operator.AND,
'items': [('machine.name', DataList.operator.EQUALS, 'machine'), # noqa
('name', DataList.operator.EQUALS, 'test_3')]}}).data # noqa
self.assertEqual(list_8, 1, 'list should contain int 1') # disk 3
list_9 = DataList({'object': TestDisk,
'data': DataList.select.COUNT,
'query': {'type': DataList.where_operator.AND,
'items': [('size', DataList.operator.GT, 3),
{'type': DataList.where_operator.AND,
'items': [('size', DataList.operator.LT, 6)]}]}}).data # noqa
self.assertEqual(list_9, 2, 'list should contain int 2') # disk 4 and 5
list_10 = DataList({'object': TestDisk,
'data': DataList.select.COUNT,
'query': {'type': DataList.where_operator.OR,
'items': [('size', DataList.operator.LT, 3),
{'type': DataList.where_operator.OR,
'items': [('size', DataList.operator.GT, 6)]}]}}).data # noqa
# at least disk 0, 1, 2, 7, 8, 9, 10-19
self.assertGreaterEqual(list_10, 16, 'list should contain >= 16')
list_11 = DataList({'object': TestDisk,
'data': DataList.select.COUNT,
'query': {'type': DataList.where_operator.AND,
'items': [('storage.name', DataList.operator.EQUALS, 'machine')]}}).data # noqa
self.assertEqual(list_11, 10, 'list should contain int 10') # disk 10-19
# pylint: enable=line-too-long
def test_invalidpropertyassignment(self):
"""
Validates whether the correct exception is raised when properties are assigned with a wrong
type
"""
disk = TestDisk()
disk.size = 100
with self.assertRaises(TypeError):
disk.machine = TestDisk()
def test_recursive(self):
"""
Validates the recursive save
"""
machine = TestMachine()
machine.name = 'original'
machine.save()
disks = []
for i in xrange(0, 10):
disk = TestDisk()
disk.name = 'test_{0}'.format(i)
if i % 2:
disk.machine = machine
else:
disk.machine = machine
self.assertEqual(disk.machine.name, 'original', 'child should be set')
disk.machine = None
self.assertIsNone(disk.machine, 'child should be cleared')
disks.append(disk)
disk.save()
counter = 1
for disk in machine.disks:
disk.size = counter
counter += 1
machine.save(recursive=True)
disk = TestDisk(machine.disks[0].guid)
self.assertEqual(disk.size, 1, 'lists should be saved recursively')
disk.machine.name = 'mtest'
disk.save(recursive=True)
machine2 = TestMachine(machine.guid)
self.assertEqual(machine2.disks[1].size, 2, 'lists should be saved recursively')
self.assertEqual(machine2.name, 'mtest', 'properties should be saved recursively')
def test_descriptors(self):
"""
Validates the correct behavior of the Descriptor
"""
with self.assertRaises(RuntimeError):
_ = Descriptor().descriptor
with self.assertRaises(RuntimeError):
_ = Descriptor().get_object()
def test_relationcache(self):
"""
Validates whether the relational properties are cached correctly, and whether
they are invalidated when required
"""
machine = TestMachine()
machine.name = 'machine'
machine.save()
disk1 = TestDisk()
disk1.name = 'disk1'
disk1.save()
disk2 = TestDisk()
disk2.name = 'disk2'
disk2.save()
disk3 = TestDisk()
disk3.name = 'disk3'
disk3.save()
self.assertEqual(len(machine.disks), 0, 'There should be no disks on the machine')
disk1.machine = machine
disk1.save()
self.assertEqual(len(machine.disks), 1, 'There should be 1 disks on the machine')
disk2.machine = machine
disk2.save()
self.assertEqual(len(machine.disks), 2, 'There should be 2 disks on the machine')
disk3.machine = machine
disk3.save()
self.assertEqual(len(machine.disks), 3, 'There should be 3 disks on the machine')
machine.disks[0].name = 'disk1_'
machine.disks[1].name = 'disk2_'
machine.disks[2].name = 'disk3_'
disk1.machine = None
disk1.save()
disk2.machine = None
disk2.save()
self.assertEqual(len(machine.disks), 1, 'There should be 1 disks on the machine')
def test_datalistactions(self):
"""
Validates all actions that can be executed agains DataLists
"""
machine = TestMachine()
machine.name = 'machine'
machine.save()
disk1 = TestDisk()
disk1.name = 'disk1'
disk1.machine = machine
disk1.save()
disk2 = TestDisk()
disk2.name = 'disk2'
disk2.machine = machine
disk2.save()
disk3 = TestDisk()
disk3.name = 'disk3'
disk3.machine = machine
disk3.save()
self.assertEqual(machine.disks.count(disk1), 1, 'Disk should be available only once')
self.assertGreaterEqual(machine.disks.index(disk1), 0, 'We should retreive an index')
machine.disks.sort()
guid = machine.disks[0].guid
machine.disks.reverse()
self.assertEqual(machine.disks[-1].guid, guid, 'Reverse and sort should work')
machine.disks.sort()
self.assertEqual(machine.disks[0].guid, guid, 'And the guid should be first again')
def test_listcache(self):
"""
Validates whether lists are cached and invalidated correctly
"""
keys = ['list_cache', None]
for key in keys:
disk0 = TestDisk()
disk0.name = 'disk 0'
disk0.save()
list_cache = DataList(key=key,
query={'object': TestDisk,
'data': DataList.select.COUNT,
'query': {'type': DataList.where_operator.AND,
'items': [('machine.name', DataList.operator.EQUALS, 'machine')]}}) # noqa
self.assertFalse(list_cache.from_cache, 'List should not be loaded from cache (mode: {0})'.format(key))
self.assertEqual(list_cache.data, 0, 'List should find no entries (mode: {0})'.format(key))
machine = TestMachine()
machine.name = 'machine'
machine.save()
disk1 = TestDisk()
disk1.name = 'disk 1'
disk1.machine = machine
disk1.save()
list_cache = DataList(key=key,
query={'object': TestDisk,
'data': DataList.select.COUNT,
'query': {'type': DataList.where_operator.AND,
'items': [('machine.name', DataList.operator.EQUALS, 'machine')]}}) # noqa
self.assertFalse(list_cache.from_cache, 'List should not be loaded from cache (mode: {0})'.format(key))
self.assertEqual(list_cache.data, 1, 'List should find one entry (mode: {0})'.format(key))
list_cache = DataList(key=key,
query={'object': TestDisk,
'data': DataList.select.COUNT,
'query': {'type': DataList.where_operator.AND,
'items': [('machine.name', DataList.operator.EQUALS, 'machine')]}}) # noqa
self.assertTrue(list_cache.from_cache, 'List should be loaded from cache (mode: {0})'.format(key))
disk2 = TestDisk()
disk2.machine = machine
disk2.name = 'disk 2'
disk2.save()
list_cache = DataList(key=key,
query={'object': TestDisk,
'data': DataList.select.COUNT,
'query': {'type': DataList.where_operator.AND,
'items': [('machine.name', DataList.operator.EQUALS, 'machine')]}}) # noqa
self.assertFalse(list_cache.from_cache, 'List should not be loaded from cache (mode: {0})'.format(key))
self.assertEqual(list_cache.data, 2, 'List should find two entries (mode: {0})'.format(key))
machine.name = 'x'
machine.save()
list_cache = DataList(key=key,
query={'object': TestDisk,
'data': DataList.select.COUNT,
'query': {'type': DataList.where_operator.AND,
'items': [('machine.name', DataList.operator.EQUALS, 'machine')]}}) # noqa
self.assertFalse(list_cache.from_cache, 'List should not be loaded from cache (mode: {0})'.format(key))
self.assertEqual(list_cache.data, 0, 'List should have no matches (mode: {0})'.format(key))
def test_emptyquery(self):
"""
Validates whether an certain query returns an empty set
"""
amount = DataList({'object': TestDisk,
'data': DataList.select.COUNT,
'query': {'type': DataList.where_operator.AND,
'items': [('machine.name', DataList.operator.EQUALS, 'machine')]}}).data # noqa
self.assertEqual(amount, 0, 'There should be no data')
def test_nofilterquery(self):
"""
Validates whether empty queries return the full resultset
"""
disk1 = TestDisk()
disk1.name = 'disk 1'
disk1.save()
disk2 = TestDisk()
disk2.name = 'disk 2'
disk2.save()
amount = DataList(key='some_list',
query={'object': TestDisk,
'data': DataList.select.COUNT,
'query': {'type': DataList.where_operator.AND,
'items': []}}).data
self.assertEqual(amount, 2, 'There should be two disks ({0})'.format(amount))
disk3 = TestDisk()
disk3.name = 'disk 3'
disk3.save()
amount = DataList(key='some_list',
query={'object': TestDisk,
'data': DataList.select.COUNT,
'query': {'type': DataList.where_operator.AND,
'items': []}}).data
self.assertEqual(amount, 3, 'There should be three disks ({0})'.format(amount))
def test_invalidqueries(self):
"""
Validates invalid queries
"""
machine = TestMachine()
machine.name = 'machine'
machine.save()
disk = TestDisk()
disk.name = 'disk'
disk.machine = machine
disk.save()
setattr(DataList.select, 'SOMETHING', 'SOMETHING')
with self.assertRaises(NotImplementedError):
DataList({'object': TestDisk,
'data': DataList.select.SOMETHING,
'query': {'type': DataList.where_operator.AND,
'items': [('machine.name', DataList.operator.EQUALS, 'machine')]}}) # noqa
setattr(DataList.where_operator, 'SOMETHING', 'SOMETHING')
with self.assertRaises(NotImplementedError):
DataList({'object': TestDisk,
'data': DataList.select.COUNT,
'query': {'type': DataList.where_operator.SOMETHING,
'items': [('machine.name', DataList.operator.EQUALS, 'machine')]}}) # noqa
setattr(DataList.operator, 'SOMETHING', 'SOMETHING')
with self.assertRaises(NotImplementedError):
DataList({'object': TestDisk,
'data': DataList.select.COUNT,
'query': {'type': DataList.where_operator.AND,
'items': [('machine.name', DataList.operator.SOMETHING, 'machine')]}}) # noqa
def test_clearedcache(self):
"""
Validates the correct behavior when the volatile cache is cleared
"""
disk = TestDisk()
disk.name = 'somedisk'
disk.save()
VolatileFactory.store.delete(disk._key)
disk2 = TestDisk(disk.guid)
self.assertEqual(disk2.name, 'somedisk', 'Disk should be fetched from persistent store')
def test_serialization(self):
"""
Validates whether serialization works as expected
"""
machine = TestMachine()
machine.name = 'machine'
machine.save()
disk = TestDisk()
disk.name = 'disk'
disk.machine = machine
disk.save()
dictionary = disk.serialize()
self.assertIn('name', dictionary, 'Serialized object should have correct properties')
self.assertEqual(dictionary['name'], 'disk', 'Serialized object should have correct name')
self.assertIn('machine_guid', dictionary, 'Serialized object should have correct depth')
self.assertEqual(dictionary['machine_guid'], machine.guid,
'Serialized object should have correct properties')
dictionary = disk.serialize(depth=1)
self.assertIn('machine', dictionary, 'Serialized object should have correct depth')
self.assertEqual(dictionary['machine']['name'], 'machine',
'Serialized object should have correct properties at all depths')
def test_primarykeys(self):
"""
Validates whether the primary keys are kept in sync
"""
disk = TestDisk()
disk.name = 'disk'
keys = DataList.get_pks(disk._namespace, disk._name)
self.assertEqual(len(keys), 0, 'There should be no primary keys ({0})'.format(len(keys)))
disk.save()
keys = DataList.get_pks(disk._namespace, disk._name)
self.assertEqual(len(keys), 1, 'There should be one primary key ({0})'.format(len(keys)))
disk.delete()
keys = DataList.get_pks(disk._namespace, disk._name)
self.assertEqual(len(keys), 0, 'There should be no primary keys ({0})'.format(len(keys)))
def test_reduceddatalist(self):
"""
Validates the reduced list
"""
disk = TestDisk()
disk.name = 'test'
disk.save()
data = DataList({'object': TestDisk,
'data': DataList.select.GUIDS,
'query': {'type': DataList.where_operator.AND,
'items': []}}).data
datalist = DataObjectList(data, TestDisk)
self.assertEqual(len(datalist), 1, 'There should be only one item ({0})'.format(len(datalist)))
item = datalist.reduced[0]
with self.assertRaises(AttributeError):
print item.name
self.assertEqual(item.guid, disk.guid, 'The guid should be available')
def test_volatiemutex(self):
"""
Validates the volatile mutex
"""
mutex = VolatileMutex('test')
mutex.acquire()
mutex.acquire() # Should not raise errors
mutex.release()
mutex.release() # Should not raise errors
mutex._volatile.add(mutex.key(), 1, 10)
with self.assertRaises(RuntimeError):
mutex.acquire(wait=1)
mutex._volatile.delete(mutex.key())
mutex.acquire()
time.sleep(0.5)
mutex.release()
def test_typesafety(self):
"""
Validates typesafety checking on object properties
"""
disk = TestDisk()
disk.name = 'test'
disk.name = u'test'
disk.name = None
disk.size = 100
disk.size = 100.5
disk.order = 100
with self.assertRaises(TypeError):
disk.order = 100.5
with self.assertRaises(TypeError):
disk.__dict__['wrong_type_data'] = None
disk.wrong_type_data = 'string'
_ = disk.wrong_type
with self.assertRaises(TypeError):
disk.type = 'THREE'
disk.type = 'ONE'
def test_ownrelations(self):
"""
Validates whether relations to the object itself are working
"""
pdisk = TestDisk()
pdisk.name = 'parent'
pdisk.save()
cdisk1 = TestDisk()
cdisk1.name = 'child 1'
cdisk1.size = 100
cdisk1.parent = pdisk
cdisk1.save()
cdisk2 = TestDisk()
cdisk2.name = 'child 2'
cdisk2.size = 100
cdisk2.parent = pdisk
cdisk2.save()
self.assertEqual(len(pdisk.children), 2, 'There should be 2 children ({0})'.format(len(pdisk.children)))
self.assertEqual(cdisk1.parent.name, 'parent', 'Parent should be loaded correctly')
data = DataList({'object': TestDisk,
'data': DataList.select.GUIDS,
'query': {'type': DataList.where_operator.AND,
'items': [('parent.name', DataList.operator.EQUALS, 'parent')]}}).data
datalist = DataObjectList(data, TestDisk)
self.assertEqual(len(datalist), 2, 'There should be two items ({0})'.format(len(datalist)))
cdisk2.parent = None
cdisk2.save()
data = DataList({'object': TestDisk,
'data': DataList.select.GUIDS,
'query': {'type': DataList.where_operator.AND,
'items': [('parent.name', DataList.operator.EQUALS, 'parent')]}}).data
datalist = DataObjectList(data, TestDisk)
self.assertEqual(len(datalist), 1, 'There should be one item ({0})'.format(len(datalist)))
def test_copy(self):
"""
Validates whether the copy function works correct
"""
machine = TestMachine()
machine.name = 'testmachine1'
machine.save()
disk1 = TestDisk()
disk1.name = 'test1'
disk1.size = 100
disk1.order = 1
disk1.type = 'ONE'
disk1.machine = machine
disk1.save()
disk2 = TestDisk()
disk2.copy(disk1)
self.assertEqual(disk2.name, 'test1', 'Properties should be copied')
self.assertEqual(disk2.size, 100, 'Properties should be copied')
self.assertEqual(disk2.order, 1, 'Properties should be copied')
self.assertEqual(disk2.type, 'ONE', 'Properties should be copied')
self.assertEqual(disk2.machine, None, 'Relations should not be copied')
disk3 = TestDisk()
disk3.copy(disk1, include_relations=True)
self.assertEqual(disk3.machine.name, 'testmachine1', 'Relations should be copied')
disk4 = TestDisk()
disk4.copy(disk1, include=['name'])
self.assertEqual(disk4.name, 'test1', 'Name should be copied')
self.assertEqual(disk4.size, 0, 'Size should not be copied')
self.assertEqual(disk4.machine, None, 'Relations should not be copied')
disk5 = TestDisk()
disk5.copy(disk1, exclude=['name'])
self.assertEqual(disk5.name, None, 'Name should not be copied')
self.assertEqual(disk5.size, 100, 'Size should be copied')
self.assertEqual(disk5.machine, None, 'Relations should not be copied')
def test_querydynamic(self):
"""
Validates whether a query that queried dynamic properties is never cached
"""
def get_disks():
return DataList({'object': TestDisk,
'data': DataList.select.GUIDS,
'query': {'type': DataList.where_operator.AND,
'items': [('used_size', DataList.operator.NOT_EQUALS, -1)]}})
disk1 = TestDisk()
disk1.name = 'disk 1'
disk1.size = 100
disk1.save()
disk2 = TestDisk()
disk2.name = 'disk 2'
disk2.size = 100
disk2.save()
query_result = get_disks()
self.assertEqual(len(query_result.data), 2, 'There should be 2 disks ({0})'.format(len(query_result.data)))
self.assertFalse(query_result.from_cache, 'Disk should not be loaded from cache')
query_result = get_disks()
self.assertFalse(query_result.from_cache, 'Disk should not be loaded from cache')
def test_delete_abandoning(self):
"""
Validates the abandoning behavior of the delete method
"""
machine = TestMachine()
machine.name = 'machine'
machine.save()
disk_1 = TestDisk()
disk_1.name = 'disk 1'
disk_1.machine = machine
disk_1.save()
disk_2 = TestDisk()
disk_2.name = 'disk 2'
disk_2.machine = machine
disk_2.save()
self.assertRaises(LinkedObjectException, machine.delete)
disk_3 = TestDisk(disk_1.guid)
self.assertIsNotNone(disk_3.machine, 'The machine should still be linked')
_ = machine.disks # Make sure we loaded the list
disk_2.delete()
machine.delete(abandon=True) # Should not raise due to disk_2 being deleted
disk_4 = TestDisk(disk_1.guid)
self.assertIsNone(disk_4.machine, 'The machine should be unlinked')
def test_save_deleted(self):
"""
Validates whether saving a previously deleted object raises
"""
disk = TestDisk()
disk.name = 'disk'
disk.save()
disk.delete()
self.assertRaises(ObjectNotFoundException, disk.save, 'Cannot resave a deleted object')
def test_dol_advanced(self):
"""
Validates the DataObjectList advanced functions (indexer, sort)
"""
sizes = [7, 2, 0, 4, 6, 1, 5, 9, 3, 8]
guids = []
for i in xrange(0, 10):
disk = TestDisk()
disk.name = 'disk_{0}'.format(i)
disk.size = sizes[i]
disk.save()
guids.append(disk.guid)
data = DataList({'object': TestDisk,
'data': DataList.select.GUIDS,
'query': {'type': DataList.where_operator.AND,
'items': []}}).data
disks = DataObjectList(data, TestDisk)
disks.sort()
guids.sort()
self.assertEqual(disks[0].guid, guids[0], 'Disks should be sorted on guid')
self.assertEqual(disks[4].guid, guids[4], 'Disks should be sorted on guid')
disks.sort(cmp=lambda a, b: a.size - b.size)
self.assertEqual(disks[0].size, 0, 'Disks should be sorted on size')
self.assertEqual(disks[4].size, 4, 'Disks should be sorted on size')
disks.sort(key=lambda a: a.name)
self.assertEqual(disks[0].name, 'disk_0', 'Disks should be sorted on name')
self.assertEqual(disks[4].name, 'disk_4', 'Disks should be sorted on name')
filtered = disks[1:4]
self.assertEqual(filtered[0].name, 'disk_1', 'Disks should be properly sliced')
self.assertEqual(filtered[2].name, 'disk_3', 'Disks should be properly sliced')
def test_itemchange_during_list_build(self):
"""
Validates whether changing, creating or deleting objects while running a depending list will cause the list to
be invalidated
"""
def inject_new(datalist_object):
"""
Creates a new object
"""
_ = datalist_object
disk_x = TestDisk()
disk_x.name = 'test'
disk_x.save()
def inject_delete(datalist_object):
"""
Deletes an object
"""
_ = datalist_object
disk_1.delete()
def inject_update(datalist_object):
"""
Updates an object
"""
_ = datalist_object
disk_2.name = 'x'
disk_2.save()
disk_z = None # Needs to be there
disk_1 = TestDisk()
disk_1.name = 'test'
disk_1.save()
disk_2 = TestDisk()
disk_2.name = 'test'
disk_2.save()
# Validates new object creation
DataList.test_hooks['post_query'] = inject_new
data = DataList({'object': TestDisk,
'data': DataList.select.GUIDS,
'query': {'type': DataList.where_operator.AND,
'items': [('name', DataList.operator.EQUALS, 'test')]}}).data
disks = DataObjectList(data, TestDisk)
self.assertEqual(len(disks), 2, 'Two disks should be found ({0})'.format(len(disks)))
del DataList.test_hooks['post_query']
data = DataList({'object': TestDisk,
'data': DataList.select.GUIDS,
'query': {'type': DataList.where_operator.AND,
'items': [('name', DataList.operator.EQUALS, 'test')]}}).data
disks = DataObjectList(data, TestDisk)
self.assertEqual(len(disks), 3, 'Three disks should be found ({0})'.format(len(disks)))
# Clear the list cache for the next test
VolatileFactory.store.delete('ovs_list_6ea1af78996c9eb24a92c968ccc5f16b16686a8134212ea562135046ba146db4')
# Validates object change
DataList.test_hooks['post_query'] = inject_update
data = DataList({'object': TestDisk,
'data': DataList.select.GUIDS,
'query': {'type': DataList.where_operator.AND,
'items': [('name', DataList.operator.EQUALS, 'test')]}}).data
disks = DataObjectList(data, TestDisk)
self.assertEqual(len(disks), 3, 'Three disks should be found ({0})'.format(len(disks)))
del DataList.test_hooks['post_query']
data = DataList({'object': TestDisk,
'data': DataList.select.GUIDS,
'query': {'type': DataList.where_operator.AND,
'items': [('name', DataList.operator.EQUALS, 'test')]}}).data
disks = DataObjectList(data, TestDisk)
self.assertEqual(len(disks), 2, 'Two disk should be found ({0})'.format(len(disks)))
# Clear the list cache for the next test
VolatileFactory.store.delete('ovs_list_6ea1af78996c9eb24a92c968ccc5f16b16686a8134212ea562135046ba146db4')
# Validates object deletion
DataList.test_hooks['post_query'] = inject_delete
data = DataList({'object': TestDisk,
'data': DataList.select.GUIDS,
'query': {'type': DataList.where_operator.AND,
'items': [('name', DataList.operator.EQUALS, 'test')]}}).data
disks = DataObjectList(data, TestDisk)
self.assertEqual(len(disks), 2, 'Two disks should be found ({0})'.format(len(disks)))
del DataList.test_hooks['post_query']
iterated_list = [d for d in disks]
self.assertEqual(len(iterated_list), 1, 'One disk should be found ({0})'.format(len(iterated_list)))
data = DataList({'object': TestDisk,
'data': DataList.select.GUIDS,
'query': {'type': DataList.where_operator.AND,
'items': [('name', DataList.operator.EQUALS, 'test')]}}).data
disks = DataObjectList(data, TestDisk)
self.assertEqual(len(disks), 1, 'One disk should be found ({0})'.format(len(disks)))
_ = disk_z # Ignore this object not being used
def test_guid_query(self):
"""
Validates whether queries can use the _guid fields
"""
machine = TestMachine()
machine.name = 'machine'
machine.save()
disk = TestDisk()
disk.name = 'test'
disk.machine = machine
disk.save()
data = DataList({'object': TestDisk,
'data': DataList.select.GUIDS,
'query': {'type': DataList.where_operator.AND,
'items': [('machine_guid', DataList.operator.EQUALS, machine.guid)]}}).data
disks = DataObjectList(data, TestDisk)
self.assertEqual(len(disks), 1, 'There should be one disk ({0})'.format(len(disks)))
def test_1_to_1(self):
"""
Validates whether 1-to-1 relations work correct
"""
machine = TestMachine()
machine.name = 'machine'
machine.save()
self.assertIsNone(machine.one, 'The machine should not have a reverse disk relation')
self.assertIsNone(machine.one_guid, 'The machine should have an empty disk _guid property')
disk = TestDisk()
disk.name = 'test'
disk.one = machine
disk.save()
self.assertIsNotNone(machine.one, 'The machine should have a reverse disk relation')
self.assertEqual(machine.one.name, 'test', 'The reverse 1-to-1 relation should work')
self.assertEqual(disk.one.name, 'machine', 'The normal 1-to-1 relation should work')
self.assertEqual(machine.one_guid, disk.guid, 'The reverse disk should be the correct one')
with self.assertRaises(RuntimeError):
machine.one = disk
def test_auto_inheritance(self):
"""
Validates whether fetching a base hybrid will result in the extended object
"""
machine = TestMachine()
self.assertEqual(Descriptor(machine.__class__), Descriptor(TestEMachine), 'The fetched TestMachine should be a TestEMachine')
def test_relation_inheritance(self):
"""
Validates whether relations on inherited hybrids behave OK
"""
machine = TestMachine()
machine.name = 'machine'
machine.save()
disk = TestDisk()
disk.name = 'disk'
disk.machine = machine # Validates relation acceptance (accepts TestEMachine)
disk.save()
machine.the_disk = disk # Validates whether _relations is build correctly
machine.save()
disk2 = TestDisk(disk.guid)
self.assertEqual(Descriptor(disk2.machine.__class__), Descriptor(TestEMachine), 'The machine should be a TestEMachine')
def test_extended_property(self):
"""
Validates whether an inherited object has all properties
"""
machine = TestEMachine()
machine.name = 'emachine'
machine.extended = 'ext'
machine.save()
machine2 = TestEMachine(machine.guid)
self.assertEqual(machine2.name, 'emachine', 'The name of the extended machine should be correct')
self.assertEqual(machine2.extended, 'ext', 'The extended property of the extended machine should be correct')
def test_extended_filter(self):
"""
Validates whether base and extended hybrids behave the same in lists
"""
machine1 = TestMachine()
machine1.name = 'basic'
machine1.save()
machine2 = TestEMachine()
machine2.name = 'extended'
machine2.save()
data = DataList({'object': TestMachine,
'data': DataList.select.GUIDS,
'query': {'type': DataList.where_operator.AND,
'items': []}}).data
datalist = DataObjectList(data, TestMachine)
self.assertEqual(len(datalist), 2, 'There should be two machines if searched for TestMachine ({0})'.format(len(datalist)))
data = DataList({'object': TestEMachine,
'data': DataList.select.GUIDS,
'query': {'type': DataList.where_operator.AND,
'items': []}}).data
datalist = DataObjectList(data, TestMachine)
self.assertEqual(len(datalist), 2, 'There should be two machines if searched for TestEMachine ({0})'.format(len(datalist)))
def test_mandatory_fields(self):
"""
Validates whether mandatory properties and relations work
"""
machine = TestMachine()
machine.extended = 'extended'
machine.name = 'machine'
machine.save()
disk = TestDisk()
# Modify relation to mandatory
[_ for _ in disk._relations if _.name == 'machine'][0].mandatory = True
# Continue test
disk.name = None
with self.assertRaises(MissingMandatoryFieldsException) as exception:
disk.save()
self.assertIn('name', exception.exception.message, 'Field name should be in exception message: {0}'.format(exception.exception.message))
self.assertIn('machine', exception.exception.message, 'Field machine should be in exception message: {0}'.format(exception.exception.message))
disk.name = 'disk'
disk.machine = machine
disk.save()
disk.description = 'test'
disk.storage = machine
disk.save()
# Restore relation
[_ for _ in disk._relations if _.name == 'machine'][0].mandatory = False
def test_saveorder(self):
"""
Validates whether the order of saving related objects doesn't matter
"""
machine1 = TestMachine()
machine1.name = 'machine'
disk1_1 = TestDisk()
disk1_1.name = 'disk1'
disk1_1.machine = machine1
disk1_1.save()
disk1_2 = TestDisk()
disk1_2.name = 'disk2'
disk1_2.machine = machine1
disk1_2.save()
machine1.save()
self.assertEqual(len(machine1.disks), 2, 'There should be two disks. {0}'.format(len(machine1.disks)))
machine2 = TestMachine()
machine2.name = 'machine'
machine2.save()
disk2_1 = TestDisk()
disk2_1.name = 'disk1'
disk2_1.machine = machine2
disk2_1.save()
disk2_2 = TestDisk()
disk2_2.name = 'disk2'
disk2_2.machine = machine2
disk2_2.save()
self.assertEqual(len(machine2.disks), 2, 'There should be two disks. {0}'.format(len(machine2.disks)))
def test_versioning(self):
"""
Validates whether the versioning system works
"""
machine = TestMachine()
machine.name = 'machine0'
machine.save()
self.assertEqual(machine._data['_version'], 1, 'Version should be 1, is {0}'.format(machine._data['_version']))
machine.save()
self.assertEqual(machine._data['_version'], 2, 'Version should be 2, is {0}'.format(machine._data['_version']))
machine_x = TestMachine(machine.guid)
machine_x.name = 'machine1'
machine_x.save()
self.assertTrue(machine.updated_on_datastore(), 'Machine is updated on datastore')
machine.name = 'machine2'
machine.save()
self.assertEqual(machine._data['_version'], 4, 'Version should be 4, is {0}'.format(machine._data['_version']))
self.assertFalse(machine.updated_on_datastore(), 'Machine is not updated on datastore')
def test_outdated_listobjects(self):
"""
Validates whether elements in a (cached) list are reloaded if they are changed externally
"""
machine = TestMachine()
machine.name = 'machine'
machine.save()
disk1 = TestDisk()
disk1.name = 'disk1'
disk1.machine = machine
disk1.save()
disk2 = TestDisk()
disk2.name = 'disk2'
disk2.machine = machine
disk2.save()
self.assertListEqual(['disk1', 'disk2'], sorted([disk.name for disk in machine.disks]), 'Names should be disk1 and disk2')
disk2.name = 'disk_'
self.assertListEqual(['disk1', 'disk2'], sorted([disk.name for disk in machine.disks]), 'Names should still be disk1 and disk2')
disk2.save()
self.assertListEqual(['disk1', 'disk_'], sorted([disk.name for disk in machine.disks]), 'Names should be disk1 and disk_')
def test_invalidonetoone(self):
"""
Validates that if a one-to-one is used as a one-to-many an exception will be raised
"""
machine = TestMachine()
machine.name = 'machine'
machine.save()
self.assertIsNone(machine.one, 'There should not be any disk(s)')
disk1 = TestDisk()
disk1.name = 'disk1'
disk1.one = machine
disk1.save()
self.assertEqual(machine.one, disk1, 'The correct disk should be returned')
disk2 = TestDisk()
disk2.name = 'disk2'
disk2.one = machine
disk2.save()
with self.assertRaises(InvalidRelationException):
_ = machine.one
if __name__ == '__main__':
import unittest
suite = unittest.TestLoader().loadTestsFromTestCase(Basic)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
# ===========================================================================
import swap
import subprocess,sys,os,time
import numpy as np
#from subject import Ntrajectory
# ======================================================================
"""
NAME
logging.py
PURPOSE
Bits and pieces to help make a nice output log.
COMMENTS
Based on https://github.com/drphilmarshall/Pangloss/blob/master/pangloss/miscellaneous.py
FUNCTIONS
BUGS
AUTHORS
This file is part of the Space Warps project, and is distributed
under the MIT license by the Space Warps Science Team.
http://spacewarps.org/
HISTORY
2013-04-17 Started: Marshall (Oxford)
"""
# =========================================================================
SW_dashedline = 'SWAP: --------------------------------------------------------------------------'
SW_hello = ' SWAP: the Space Warps Analysis Pipeline '
ML_dashedline = 'ML: --------------------------------------------------------------------------'
ML_hello = ' ML: the Machine Learning component of Galaxy Zoo Express '
doubledashedline = '================================================================================'
helloswitch = ' SWITCH: the Space Warps Retirement Plan '
# ======================================================================
# Write a PDF report, using latex:
def write_report(pars,bureau,sample):
tex = pars['dir']+'/'+pars['trunk']+'_report.tex'
print "SWAP: writing report in "+tex
# Get started:
F = open(tex,"w")
swap.write_report_preamble(F)
# Top left panel holds a summary of numbers.
F.write('\\begin{minipage}{0.42\linewidth}\n')
title = pars['survey'].replace('_',',')
F.write('{\LARGE %s}\\newline\n' % title)
F.write('\medskip\n\n')
# First, just count things:
sample.take_stock()
bureau.collect_probabilities()
Nmade = np.sum(bureau.Ntotal)
Nused = np.sum(sample.exposure['sim'])+np.sum(sample.exposure['dud'])+np.sum(sample.exposure['test'])
Nc = len(bureau.member)
# Ns = len(sample.member)
# assert (Ns == sample.N)
Ns = sample.Ns
Ntl = len(sample.probabilities['sim'])
assert (Ntl == sample.Ntl)
Ntd = len(sample.probabilities['dud'])
assert (Ntd == sample.Ntd)
F.write('\\begin{tabular}{|p{0.65\linewidth}p{0.2\linewidth}|}\n')
F.write('\hline\n')
F.write('Number of classifications: & %d \\\\ \n' % Nmade )
F.write('Number of class$^{\\rm n}$s used: & %d \\\\ \n' % Nused )
F.write('Number of classifiers: & %d \\\\ \n' % Nc )
F.write('Number of test subjects: & %d \\\\ \n' % Ns )
F.write('Number of sims: & %d \\\\ \n' % Ntl )
F.write('Number of duds: & %d \\\\ \n' % Ntd )
F.write('\hline\n')
F.write('\end{tabular}\n')
# Now, what has the crowd achieved?
Nc_per_classifier = np.average(bureau.Ntest)
Nc_per_subject = np.average(sample.exposure['test'])
Ns_retired = sample.Ns_retired
Nc_per_retirement = np.average(sample.retirement_ages)
Ns_rejected = sample.Ns_rejected
Ns_detected = sample.Ns_detected
F.write('\\begin{tabular}{|p{0.65\linewidth}p{0.2\linewidth}|}\n')
F.write('\hline\n')
F.write('Mean test class$^{\\rm n}$s/classifier: & %.1f \\\\ \n' % Nc_per_classifier )
F.write('Mean class$^{\\rm n}$s/test subject: & %.1f \\\\ \n' % Nc_per_subject )
F.write('Test subject retirements: & %d \\\\ \n' % Ns_retired )
F.write('Mean class$^{\\rm n}$s/retirement: & %.1f \\\\ \n' % Nc_per_retirement )
F.write('Test subject rejections: & %d \\\\ \n' % Ns_rejected )
F.write('Test subject identifications: & %d \\\\ \n' % Ns_detected )
F.write('\hline\n')
F.write('\end{tabular}\n')
# How complete/pure is the sample likely to be, based on the
# training set? First, completeness - lenses out over lenses in:
C_LENS = 100.0*sample.Ntl_detected/(sample.Ntl + (sample.Ntl == 0))
C_NOT = 100.0*sample.Ntd_rejected/(sample.Ntd + (sample.Ntd == 0))
# Now purity - lenses out over all output, accounting for population:
Npool = 1.0/swap.prior
P_LENS = 100.0*(1.0*C_LENS/100.0 + (1.0-C_NOT/100.0)*(Npool - 1))/(Npool)
# False positive contamination - detected duds as fraction of
# total detections:
# FP = 100.0*sample.Ntd_detected/(sample.Nt_detected + (sample.Nt_detected == 0))
FP = 100.0 - P_LENS
# Lenses lost as false negatives - rejected sims as fraction of
# total number of input sims:
FN = 100.0*sample.Ntl_rejected/(sample.Ntl + (sample.Ntl == 0))
F.write('\\begin{tabular}{|p{0.65\linewidth}p{0.2\linewidth}|}\n')
F.write('\hline\n')
F.write('Lens completeness: & %.1f%s \\\\ \n' % (C_LENS,'\%') )
F.write('Lens purity: & %.1f%s \\\\ \n' % (P_LENS,'\%') )
# F.write('Non-lens completeness: & %.1f%s \\\\ \n' % (C_NOT,'\%') )
# F.write('Non-lens purity: & %.1f%s \\\\ \n' % (P_NOT,'\%') )
F.write('FP contamination: & %.1f%s \\\\ \n' % (FP,'\%') )
F.write('Lenses missed (FN rate): & %.1f%s \\\\ \n' % (FN,'\%') )
F.write('\hline\n')
F.write('\end{tabular}\n')
F.write('\end{minipage}\hfill\n')
# Other panels contain figures:
swap.add_report_figures(F,pars)
# Finish off the texfile:
swap.write_report_ending(F)
F.close()
# Compile the pdf:
swap.compile_report(tex,pars)
return
# ----------------------------------------------------------------------
def write_report_preamble(F):
F.write('\documentclass[letterpaper,12pt]{article}\n')
F.write('\usepackage{helvet,mathpple}\n')
F.write('\usepackage{graphicx}\n')
F.write('\\renewcommand{\\familydefault}{\sfdefault}\n')
F.write('\\renewcommand{\\arraystretch}{1.5}\n')
F.write('\setlength{\oddsidemargin}{-0.65in}\n')
F.write('\setlength{\\textwidth}{7.75in}\n')
F.write('\setlength{\\topmargin}{-1.5in}\n')
F.write('\setlength{\\textheight}{10.5in}\n')
F.write('\pagestyle{empty}\n')
F.write('\\begin{document}\n')
return
# ----------------------------------------------------------------------
def add_report_figures(F,pars):
# Top Right: Subject trajectories:
F.write('\\begin{minipage}{0.56\linewidth}\n')
F.write('\includegraphics[width=\linewidth]{%s}\n' % pars['trajectoriesplot'])
F.write('\end{minipage}\n\n')
F.write('\\vspace{-1\\baselineskip}\n')
F.write('\\begin{minipage}{\linewidth}\n')
# Bottom Left: Classifier probabilities:
F.write('\\begin{minipage}{0.48\linewidth}\n')
F.write('\includegraphics[width=\linewidth]{%s}\n' % pars['probabilitiesplot'])
F.write('\end{minipage}\n')
# Bottom Right: Classifier histories:
F.write('\\begin{minipage}{0.48\linewidth}\n')
F.write('\includegraphics[width=\linewidth]{%s}\n' % pars['historiesplot'])
F.write('\end{minipage}\n')
F.write('\end{minipage}\n')
return
# ----------------------------------------------------------------------
def write_report_ending(F):
F.write('\end{document}\n')
return
# ----------------------------------------------------------------------
def compile_report(tex,pars):
stem = tex.split('.')[0]
pdf = stem+'.pdf'
# Remove PDF file:
swap.rm(pdf)
# Keep a record of what happens:
log = stem+'.texlog'
L = open(log,"w")
# Run pdflatex:
P = subprocess.Popen(["pdflatex",tex],cwd=pars['dir'],stdout=L,stderr=L)
# Wait for it to finish:
for t in range(10):
time.sleep(1)
if P.poll() is not None: continue
# If pdflatex has not finished - kill it.
if P.poll() is None: P.terminate()
L.close()
# Check the PDF got made:
if os.path.exists(pdf):
print "SWAP: report compiled as "+pdf
else:
print "SWAP: pdflatex failed, here's the end of the report:"
subprocess.call(["tail",log])
print "SWAP: this report is stored in ",log
print "SWAP: exiting."
sys.exit()
return
# ----------------------------------------------------------------------
def set_cookie(go, cookie_name='cookie'):
F = open('.swap.%s'%cookie_name,'w')
if go:
F.write('running')
else:
F.write('stopped')
F.close()
return
#=======================================================================
if __name__ == '__main__':
pass
#=======================================================================
|
|
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.conf import settings
from django.core.files.storage import default_storage
from uw_canvas import Canvas
from uw_canvas.accounts import Accounts
from uw_canvas.admins import Admins
from uw_canvas.courses import Courses
from uw_canvas.sections import Sections
from uw_canvas.enrollments import Enrollments
from uw_canvas.reports import Reports
from uw_canvas.roles import Roles
from uw_canvas.users import Users
from uw_canvas.terms import Terms
from uw_canvas.external_tools import ExternalTools
from uw_canvas.sis_import import SISImport, CSV_FILES
from uw_canvas.models import CanvasEnrollment, SISImport as SISImportModel
from restclients_core.exceptions import DataFailureException
from sis_provisioner.dao.course import (
valid_academic_course_sis_id, valid_academic_section_sis_id,
group_section_sis_id)
from sis_provisioner.exceptions import CoursePolicyException
from urllib3.exceptions import SSLError
from logging import getLogger
from csv import reader
from io import BytesIO
import zipfile
import json
logger = getLogger(__name__)
AUDITOR_ENROLLMENT = 'Auditor'
ENROLLMENT_ACTIVE = CanvasEnrollment.STATUS_ACTIVE
ENROLLMENT_INACTIVE = CanvasEnrollment.STATUS_INACTIVE
ENROLLMENT_DELETED = CanvasEnrollment.STATUS_DELETED
def valid_canvas_id(canvas_id):
return Canvas().valid_canvas_id(canvas_id)
def get_account_by_id(account_id):
return Accounts().get_account(account_id)
def get_account_by_sis_id(sis_account_id):
return Accounts().get_account_by_sis_id(sis_account_id)
def get_sub_accounts(account_id):
return Accounts(per_page=100).get_sub_accounts(account_id)
def get_all_sub_accounts(account_id):
return Accounts(per_page=100).get_all_sub_accounts(account_id)
def update_account_sis_id(account_id, sis_account_id):
return Accounts().update_sis_id(account_id, sis_account_id)
def get_external_tools(account_id):
return ExternalTools(per_page=100).get_external_tools_in_account(
account_id)
def create_external_tool(account_id, config):
if 'id' in config:
del config['id']
return ExternalTools().create_external_tool_in_account(account_id, config)
def update_external_tool(account_id, external_tool_id, config):
return ExternalTools().update_external_tool_in_account(
account_id, external_tool_id, config)
def delete_external_tool(account_id, external_tool_id):
return ExternalTools().delete_external_tool_in_account(
account_id, external_tool_id)
def get_admins(account_id):
return Admins(per_page=100).get_admins(account_id)
def delete_admin(account_id, user_id, role):
try:
ret = Admins().delete_admin(account_id, user_id, role)
except DataFailureException as err:
if err.status == 404: # Non-personal regid?
return False
raise
return ret
def get_course_roles_in_account(account_sis_id):
if account_sis_id.startswith('uwcourse:uweo'):
account_id = getattr(settings, 'CONTINUUM_CANVAS_ACCOUNT_ID')
else:
account_id = getattr(settings, 'RESTCLIENTS_CANVAS_ACCOUNT_ID')
return Roles().get_effective_course_roles_in_account(account_id)
def get_account_role_data(account_id):
role_data = []
roles = Roles(per_page=100).get_roles_in_account(account_id)
for role in sorted(roles, key=lambda r: r.role_id):
role_data.append(role.json_data())
return json.dumps(role_data, sort_keys=True)
def get_user_by_sis_id(sis_user_id, params={}):
return Users().get_user_by_sis_id(sis_user_id, params=params)
def get_all_users_for_person(person):
canvas = Users()
all_uwregids = [person.uwregid]
all_uwregids.extend(person.prior_uwregids)
params = {'include': 'last_login'}
all_users = []
for uwregid in all_uwregids:
try:
all_users.append(canvas.get_user_by_sis_id(uwregid, params=params))
except DataFailureException as ex:
if ex.status != 404:
raise
return all_users
def merge_all_users_for_person(person):
destination_user = None
users_to_merge = []
for user in get_all_users_for_person(person):
if user.login_id == person.uwnetid: # Current login_id
destination_user = user
else:
users_to_merge.append(user)
if destination_user and len(users_to_merge):
canvas = Users()
for user in users_to_merge:
canvas.merge_users(user, destination_user)
logger.info('Merged user {} into {}'.format(
user.user_id, destination_user.user_id))
return destination_user
def create_user(person):
return Users().create_user(person)
def get_term_by_sis_id(term_sis_id):
return Terms().get_term_by_sis_id(term_sis_id)
def get_course_by_id(course_id):
return Courses().get_course(course_id)
def get_course_by_sis_id(course_sis_id):
return Courses().get_course_by_sis_id(course_sis_id)
def update_course_sis_id(course_id, course_sis_id):
return Courses().update_sis_id(course_id, course_sis_id)
def update_term_overrides(term_sis_id, override_dates):
overrides = {}
for role in override_dates.keys():
overrides[role] = {
'start_at': override_dates[role][0],
'end_at': override_dates[role][1]
}
return Terms().update_term_overrides(term_sis_id, overrides=overrides)
def get_section_by_sis_id(section_sis_id):
return Sections().get_section_by_sis_id(section_sis_id)
def get_sis_sections_for_course(course_sis_id):
sis_sections = []
try:
for section in Sections().get_sections_in_course_by_sis_id(
course_sis_id):
try:
valid_academic_section_sis_id(section.sis_section_id)
sis_sections.append(section)
except CoursePolicyException:
pass
except DataFailureException as err:
if err.status != 404:
raise
return sis_sections
def get_sis_import_role(role):
return CanvasEnrollment.sis_import_role(role)
def get_student_sis_import_role():
return get_sis_import_role(CanvasEnrollment.STUDENT)
def get_instructor_sis_import_role():
return get_sis_import_role(CanvasEnrollment.TEACHER)
def valid_enrollment_status(status):
return (status == ENROLLMENT_ACTIVE or status == ENROLLMENT_INACTIVE or
status == ENROLLMENT_DELETED)
def enrollment_status_from_registration(registration):
request_status = registration.request_status.lower()
if (registration.is_active or request_status == 'added to standby' or
request_status == 'pending added to class'):
return ENROLLMENT_ACTIVE
if registration.request_date is None:
logger.info('Missing request_date: {} {}'.format(
registration.section.section_label(), registration.person.uwregid))
return ENROLLMENT_DELETED
if (registration.request_date > registration.section.term.census_day):
return ENROLLMENT_INACTIVE
else:
return ENROLLMENT_DELETED
def get_enrollments_for_course_by_sis_id(course_sis_id):
canvas = Enrollments(per_page=200)
enrollments = []
for enrollment in canvas.get_enrollments_for_course_by_sis_id(
course_sis_id, {'state': [ENROLLMENT_ACTIVE]}):
# Ignore the Canvas preview 'user'
if 'StudentViewEnrollment' != enrollment.role:
enrollments.append(enrollment)
return enrollments
def get_sis_enrollments_for_user_in_course(user_sis_id, course_sis_id):
canvas = Enrollments()
enrollments = []
for enrollment in canvas.get_enrollments_for_course_by_sis_id(
course_sis_id, {'user_id': canvas.sis_user_id(user_sis_id)}):
try:
valid_academic_section_sis_id(enrollment.sis_section_id)
enrollments.append(enrollment)
except CoursePolicyException:
continue
return enrollments
def get_active_sis_enrollments_for_user(user_sis_id, roles=[]):
canvas = Enrollments(per_page=100)
params = {'state': [ENROLLMENT_ACTIVE]}
if len(roles):
params['type'] = roles
enrollments = []
for enrollment in canvas.get_enrollments_for_regid(user_sis_id, params):
try:
valid_academic_section_sis_id(enrollment.sis_section_id)
enrollments.append(enrollment)
except CoursePolicyException:
continue
return enrollments
def get_active_courses_for_term(term, account_id=None):
if account_id is None:
account_id = getattr(settings, 'RESTCLIENTS_CANVAS_ACCOUNT_ID', None)
canvas_term = get_term_by_sis_id(term.canvas_sis_id())
reports = Reports()
# Canvas report of "unused" courses for the term
unused_course_report = reports.create_unused_courses_report(
account_id, canvas_term.term_id)
unused_courses = {}
for row in reader(reports.get_report_data(unused_course_report)):
try:
sis_course_id = row[1]
valid_academic_course_sis_id(sis_course_id)
unused_courses[sis_course_id] = True
except (IndexError, CoursePolicyException):
pass
# Canvas report of all courses for the term
all_course_report = reports.create_course_provisioning_report(
account_id, canvas_term.term_id)
active_courses = []
for row in reader(reports.get_report_data(all_course_report)):
try:
sis_course_id = row[1]
valid_academic_course_sis_id(sis_course_id)
if sis_course_id not in unused_courses:
active_courses.append(sis_course_id)
except (IndexError, CoursePolicyException):
pass
reports.delete_report(unused_course_report)
reports.delete_report(all_course_report)
return active_courses
def get_unused_course_report_data(term_sis_id):
term = Terms().get_term_by_sis_id(term_sis_id)
account_id = getattr(settings, 'RESTCLIENTS_CANVAS_ACCOUNT_ID', None)
reports = Reports()
unused_course_report = reports.create_unused_courses_report(
account_id, term_id=term.term_id)
report_data = reports.get_report_data(unused_course_report)
reports.delete_report(unused_course_report)
return report_data
def sis_import_by_path(csv_path, override_sis_stickiness=False):
dirs, files = default_storage.listdir(csv_path)
archive = BytesIO()
zip_file = zipfile.ZipFile(archive, 'w')
for filename in CSV_FILES:
if filename in files:
filepath = csv_path + '/' + filename
with default_storage.open(filepath, mode='r') as csv:
zip_file.writestr(filename, csv.read(), zipfile.ZIP_DEFLATED)
zip_file.close()
archive.seek(0)
params = {}
if override_sis_stickiness:
params['override_sis_stickiness'] = '1'
return SISImport().import_archive(archive, params=params)
def get_sis_import_status(import_id):
return SISImport().get_import_status(
SISImportModel(import_id=str(import_id)))
|
|
'''
.. autoclass:: TestLoader
:members:
:member-order: bysource
'''
import os
import re
import sys
import unittest
from importlib import import_module
from .utils import LOGGER
__all__ = ['TestLoader']
def issubclass_safe(cls, base_cls):
try:
return issubclass(cls, base_cls)
except TypeError:
return False
class TestLoader(object):
'''Classes used by the :class:`.TestSuite` to aggregate tests
from a list of paths.
The way it works is simple, you give a *root* directory and a list
of submodules where to look for tests.
:parameter root: root path passed by the :class:`.TestSuite`.
:parameter modules: list (or tuple) of entries where to look for tests.
Check :ref:`loading test documentation <apps-test-loading>` for
more information.
:parameter runner: The :class:`.TestRunner` passed by the test suite.
'''
def __init__(self, root, modules, runner, logger=None):
self.runner = runner
self.logger = logger or LOGGER
self.root = root
self.modules = []
for mod in modules:
if isinstance(mod, str):
mod = (mod, None, None)
if len(mod) < 3:
mod = tuple(mod) + (None,) * (3 - len(mod))
self.modules.append(mod)
def __repr__(self):
return self.root
__str__ = __repr__
def alltags(self, tag):
bits = tag.split('.')
tag, rest = bits[0], bits[1:]
yield tag
for b in rest:
tag += '.' + b
yield tag
def checktag(self, tag, import_tags, exclude_tags):
'''Return ``True`` if ``tag`` is in ``import_tags``.'''
if exclude_tags:
alltags = list(self.alltags(tag))
for exclude_tag in exclude_tags:
for bit in alltags:
if bit == exclude_tag:
return 0
if import_tags:
c = 0
alltags = list(self.alltags(tag))
for import_tag in import_tags:
allitags = list(self.alltags(import_tag))
for bit in alltags:
if bit == import_tag:
return 2
elif bit in allitags:
c = 1
return c
else:
return 2
def testclasses(self, tags=None, exclude_tags=None):
pt = ', '.join(('"%s"' % t for t in tags)) if tags else 'all'
ex = ((' excluding %s' % ', '.join(('"%s"' % t for t in exclude_tags)))
if exclude_tags else '')
self.logger.info('Load test classes for %s %s', pt, ex)
for tag, mod in self.testmodules(tags, exclude_tags):
if tags:
skip = True
for bit in self.alltags(tag):
if bit in tags:
skip = False
break
if skip:
continue
for name in dir(mod):
obj = getattr(mod, name)
if issubclass_safe(obj, unittest.TestCase):
yield tag, obj
def testmodules(self, tags=None, exclude_tags=None):
'''Generator of ``tag``, ``modules`` pairs.
:parameter tags: optional list of tags to include, if not available all tags
will be included.
:parameter exclude_tags: optional list of tags to exclude. If not provided no
tags will be excluded.'''
d = dict(self._testmodules(tags, exclude_tags))
return [(k, d[k]) for k in sorted(d)]
def _testmodules(self, tags, exclude_tags):
for name, pattern, tag in self.modules:
names = name.split('.') if name else ()
absolute_path = pattern_path = os.path.join(self.root, *names)
if pattern == '*':
pattern = None
if pattern:
pattern_path = os.path.join(pattern_path, pattern)
pattern = re.compile(pattern.replace('*', '(.*)'))
self.logger.debug('Loading from "%s"', pattern_path)
if os.path.isdir(absolute_path):
pathbase = os.path.dirname(absolute_path)
if pathbase not in sys.path:
sys.path.append(pathbase)
name = names[-1]
stags = (tag,) if tag else ()
for tag, mod in self.get_tests(absolute_path, name, pattern,
import_tags=tags, tags=stags,
exclude_tags=exclude_tags):
yield tag, mod
elif os.path.isfile(absolute_path + '.py'):
include, ntag = self.match(pattern,
os.path.basename(absolute_path))
if include:
tag = ntag or tag or name
mod = self.import_module(name)
if mod:
yield tag[0] if isinstance(tag, tuple) else tag, mod
else:
raise ValueError('%s cannot be found in %s directory.'
% (name, self.root))
def get_tests(self, path, dotted_path, pattern, import_tags=None,
tags=(), exclude_tags=None, parent=None):
'''Collect python modules for testing and return a generator of
tag,module pairs.
:parameter path: directory path where to search. Files starting with ``_``
or ``.`` are excluded from the search, as well as non-python files.
:parameter dotted_path: the dotted python path equivalent of ``path``.
:parameter parent: the parent module for the current one. This parameter
is passed by this function recursively.'''
for mod_name in os.listdir(path):
if mod_name.startswith('_') or mod_name.startswith('.'):
continue
mod_path = os.path.join(path, mod_name)
is_file = os.path.isfile(mod_path)
if is_file:
if mod_name.endswith('.py'):
mod_name = mod_name.split('.')[0]
else:
continue
include, addtag = self.match(pattern, mod_name)
if not include and is_file: # does not match and is a file, skip.
continue
elif include and not is_file and pattern:
# All modules under this directory will be included
# regardless of pattern
pattern = None
# module dotted path
if dotted_path:
mod_dotted_path = '%s.%s' % (dotted_path, mod_name)
else:
tags = (mod_name,)
mod_dotted_path = mod_name
#
module = self.import_module(mod_dotted_path, mod_path, parent)
if not module:
continue
ctags = tags + addtag
tag = '.'.join(ctags)
c = self.checktag(tag, import_tags, exclude_tags)
if not c:
continue
if is_file:
yield tag, module
else:
counter = 0
# Recursively import modules
for ctag, mod in self.get_tests(mod_path, mod_dotted_path,
pattern, import_tags, ctags,
exclude_tags, parent=module):
counter += 1
yield ctag, mod
# If more than one submodule, yield this tag too
if pattern:
if counter > 1:
yield tag, module
elif c == 2:
yield tag, module
def import_module(self, name, path=None, parent=None):
imp = True
if path and os.path.isdir(path):
imp = False
# import only if it has a __init__.py file
for sname in os.listdir(path):
if sname == '__init__.py':
imp = True
break
if imp:
try:
mod = import_module(name)
if getattr(mod, '__test__', True):
return self.runner.import_module(mod, parent)
except ImportError:
self.logger.error('Failed to import module %s. Skipping.',
name, exc_info=True)
self.logger.debug('Full python path:\n%s', '\n'.join(sys.path))
except Exception:
self.logger.critical('Failed to import module %s. Skipping.',
name, exc_info=True)
def match(self, pattern, name):
if pattern:
p = pattern.search(name)
if p:
return True, p.groups(0)
else:
return False, (name,)
else:
return True, (name,)
|
|
# -*- coding: UTF8 -*-
# ***** BEGIN LICENSE BLOCK *****
# Sconspiracy - Copyright (C) IRCAD, 2004-2010.
# Distributed under the terms of the BSD Licence as
# published by the Open Source Initiative.
# ****** END LICENSE BLOCK ******
import os
import racy
from racy.rutils import memoize
def must_be_overloaded(func):
def new_func(self, *a,**kw):
msg = "{0.__name__} of {1.__class__} is not implemented"
raise racy.RacyPluginError, msg.format(func, self)
return new_func
@racy.no_undef_attr_read
@racy.no_undef_attr_write
class Plugin(object):
name = ""
options = {}
allowed_values = {}
commandline_opts = []
commandline_prj_opts = []
commandline_prj_args = []
descriptions_opts = {}
additive = False
replacement = False
env_addon = False
#----------------------------------------
def init(self):
return False
#----------------------------------------
def has_additive(self, prj):
return False
@must_be_overloaded
def get_additive(self, prj):
return []
#----------------------------------------
def has_replacement(self, prj):
return False
@must_be_overloaded
def get_replacement(self, prj):
return []
#----------------------------------------
def has_env_addon(self, env):
return False
@must_be_overloaded
def get_env_addon(self, env):
return []
#----------------------------------------
def _apply_additive(self, prj):
res = self.has_additive(prj)
if res:
return self.get_additive(prj)
return []
def _apply_replacement(self, prj):
res = self.has_replacement(prj)
if res:
return self.get_replacement(prj)
return []
def _apply_env_addon(self, prj):
res = self.has_env_addon(prj)
if res:
return self.get_env_addon(prj)
return []
def _load(self):
def list_extend_uniq(src,dst):
"""Extends src with dst, check for duplicates. Returns duplicates
values
"""
res = []
for el in src:
if el not in dst: dst.append(el)
else: res.append(el)
return res
def already_defined_exception(defs, attr):
msg = ("Plugin '{plug.name}' is redefining <Plugin.{attr}>"
" existing values : {defs}")
msg = msg.format(plug=self, defs=defs, attr=attr)
raise racy.RacyPluginError, msg
def register_list(src_name, dst):
src = getattr(self, src_name)
defined = list_extend_uniq( src, dst )
if defined:
already_defined_exception(defined, src_name)
def register_dict(src_name, dst):
src = getattr(self, src_name)
defined = list_extend_uniq( src.keys(), dst.keys())
if defined:
already_defined_exception(defined, src_name)
dst.update(src)
import racy.renv.configs.default as default
register_dict("options", default.__dict__ )
import racy.renv.configs.allowedvalues as allowedvalues
register_dict("allowed_values", allowedvalues.__dict__ )
import racy.renv.configs.commandline as commandline
register_list("commandline_opts",
commandline.ALLOWED_COMMANDLINE_OPTIONS)
register_list("commandline_prj_opts",
commandline.ALLOWED_COMMANDLINE_PRJ_OPTIONS)
register_dict("descriptions_opts",
commandline.COMMANDLINE_OPTIONS_DESC)
d = dir
class PluginRegistry(object):
plugins = {}
additive = []
replacement = []
env_addon = []
def load_dir(self, dir):
"""Find dirs (non recursive) and load each plugin found in the dir"""
import imp
if not os.path.isdir(dir):
raise racy.RacyPluginError, "{0} isn't a directory, can't load_dir"
walker = os.walk(dir)
root, dirs, files = walker.next()
racy.rutils.remove_vcs_dirs(dirs)
for dir in dirs:
try:
fp, pathname, description = imp.find_module(dir, [root])
except ImportError, e:
pass
try:
plugin = imp.load_module(dir, fp, pathname, description)
self.load_plugin(plugin)
except Exception, e:
tb = str(e) + os.linesep + ''.join(racy.get_last_exception_traceback())
msg = "Unable to load '{plug}' plugin : {err}".format(
plug = dir, err=tb )
racy.print_warning('Plugin load', msg, wrap=False)
finally:
if fp:
fp.close()
def load_plugin(self, plugin):
import types
from racy.renv.options import get_option
if isinstance(plugin, types.ModuleType):
if not hasattr(plugin, "Plugin"):
msg = "<{0.__file__}> doesn't define a Racy plugin"
raise racy.RacyPluginError, msg.format(plugin)
plug = plugin.Plugin()
plugin.Plugin = plug
name = plug.name.lower()
allowed_plugins = get_option('PLUGINS')
if allowed_plugins is not None and name not in allowed_plugins:
return
if not self.plugins.has_key(name):
self.plugins[name] = plugin
plug._load()
racy.rlog.info.log('Loaded Plugin', plug.name)
else:
oldpl = self.plugins[name]
msg = ("<{name}> plugin already registered."
"Defined here : {plg.__file__}. "
"Redefined here : {old.__file__}. "
)
raise racy.RacyPluginError, msg.format(
plg = plugin,
old = oldpl,
name = name
)
plug.init()
if plug.additive:
self.additive.append(plug)
if plug.replacement:
self.replacement.append(plug)
if plug.env_addon:
self.env_addon.append(plug)
else:
msg = ("<{0}> is not a python module, "
"can't load as SConspiracy plugin.")
raise racy.RacyPluginError, msg.format(plugin)
def obj_eligible_plugins(self, obj, entry):
def check(plugin,o):
method = 'has_' + entry
return getattr(plugin.Plugin,method)(o)
plugins = [p for p in self.plugins.values() if check(p,obj)]
return plugins
def get_plugins_result(self, obj, entry):
def get_prj(plugin,o):
method = 'get_' + entry
return getattr(plugin.Plugin,method)(o)
res = []
for p in self.obj_eligible_plugins(obj, entry):
res += get_prj(p,obj)
return res
def get_additive_projects(self, prj):
return [r for res in map(lambda p:p._apply_additive(prj), self.additive) for r in res]
return [ r
for p in self.additive
for r in p._apply_additive(prj)
]
def get_replacement_projects(self, prj):
return [ r
for p in self.replacement
for r in p._apply_replacement(prj)
]
def get_env_addons(self, env):
return [ r
for p in self.env_addon
for r in p._apply_env_addon(env)
]
# def get_additive_projects(self, prj):
# return self.get_plugins_result(prj, "additive")
# def get_replacement_projects(self, prj):
# return self.get_plugins_result(prj, "replacement")
#
# def get_env_addons(self, env):
# return self.get_plugins_result(env, "env_addon")
register = PluginRegistry()
del PluginRegistry
|
|
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.connection import AWSAuthConnection
from boto.exception import DynamoDBResponseError
from boto.provider import Provider
from boto.dynamodb import exceptions as dynamodb_exceptions
from boto.dynamodb.table import Table
import time
try:
import simplejson as json
except ImportError:
import json
#
# To get full debug output, uncomment the following line and set the
# value of Debug to be 2
#
#boto.set_stream_logger('dynamodb')
Debug=0
class Layer1(AWSAuthConnection):
"""
This is the lowest-level interface to DynamoDB. Methods at this
layer map directly to API requests and parameters to the methods
are either simple, scalar values or they are the Python equivalent
of the JSON input as defined in the DynamoDB Developer's Guide.
All responses are direct decoding of the JSON response bodies to
Python data structures via the json or simplejson modules.
:ivar throughput_exceeded_events: An integer variable that
keeps a running total of the number of ThroughputExceeded
responses this connection has received from Amazon DynamoDB.
"""
DefaultHost = 'dynamodb.us-east-1.amazonaws.com'
"""The default DynamoDB API endpoint to connect to."""
ServiceName = 'DynamoDB'
"""The name of the Service"""
Version = '20111205'
"""DynamoDB API version."""
ThruputError = "ProvisionedThroughputExceededException"
"""The error response returned when provisioned throughput is exceeded"""
SessionExpiredError = 'com.amazon.coral.service#ExpiredTokenException'
"""The error response returned when session token has expired"""
ResponseError = DynamoDBResponseError
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
host=None, debug=0, session_token=None):
if not host:
host = self.DefaultHost
self._passed_access_key = aws_access_key_id
self._passed_secret_key = aws_secret_access_key
if not session_token:
session_token = self._get_session_token()
self.creds = session_token
self.throughput_exceeded_events = 0
AWSAuthConnection.__init__(self, host,
self.creds.access_key,
self.creds.secret_key,
is_secure, port, proxy, proxy_port,
debug=debug,
security_token=self.creds.session_token)
def _update_provider(self):
self.provider = Provider('aws',
self.creds.access_key,
self.creds.secret_key,
self.creds.session_token)
self._auth_handler.update_provider(self.provider)
def _get_session_token(self):
boto.log.debug('Creating new Session Token')
sts = boto.connect_sts(self._passed_access_key,
self._passed_secret_key)
return sts.get_session_token()
def _required_auth_capability(self):
return ['hmac-v3-http']
def make_request(self, action, body='', object_hook=None):
"""
:raises: ``DynamoDBExpiredTokenError`` if the security token expires.
"""
headers = {'X-Amz-Target' : '%s_%s.%s' % (self.ServiceName,
self.Version, action),
'Content-Type' : 'application/x-amz-json-1.0',
'Content-Length' : str(len(body))}
http_request = self.build_base_http_request('POST', '/', '/',
{}, headers, body, None)
response = self._mexe(http_request, sender=None,
override_num_retries=10,
retry_handler=self._retry_handler)
response_body = response.read()
boto.log.debug(response_body)
return json.loads(response_body, object_hook=object_hook)
def _retry_handler(self, response, i, next_sleep):
status = None
if response.status == 400:
response_body = response.read()
boto.log.debug(response_body)
json_response = json.loads(response_body)
if self.ThruputError in json_response.get('__type'):
self.throughput_exceeded_events += 1
msg = "%s, retry attempt %s" % (self.ThruputError, i)
if i == 0:
next_sleep = 0
else:
next_sleep = 0.05 * (2**i)
i += 1
status = (msg, i, next_sleep)
elif self.SessionExpiredError in json_response.get('__type'):
msg = 'Renewing Session Token'
self.creds = self._get_session_token()
self._update_provider()
status = (msg, i+self.num_retries-1, next_sleep)
else:
raise self.ResponseError(response.status, response.reason,
json_response)
return status
def list_tables(self, limit=None, start_table=None):
"""
Return a list of table names associated with the current account
and endpoint.
:type limit: int
:param limit: The maximum number of tables to return.
:type start_table: str
:param limit: The name of the table that starts the
list. If you ran a previous list_tables and not
all results were returned, the response dict would
include a LastEvaluatedTableName attribute. Use
that value here to continue the listing.
"""
data = {}
if limit:
data['Limit'] = limit
if start_table:
data['ExclusiveStartTableName'] = start_table
json_input = json.dumps(data)
return self.make_request('ListTables', json_input)
def describe_table(self, table_name):
"""
Returns information about the table including current
state of the table, primary key schema and when the
table was created.
:type table_name: str
:param table_name: The name of the table to describe.
"""
data = {'TableName' : table_name}
json_input = json.dumps(data)
return self.make_request('DescribeTable', json_input)
def create_table(self, table_name, schema, provisioned_throughput):
"""
Add a new table to your account. The table name must be unique
among those associated with the account issuing the request.
This request triggers an asynchronous workflow to begin creating
the table. When the workflow is complete, the state of the
table will be ACTIVE.
:type table_name: str
:param table_name: The name of the table to create.
:type schema: dict
:param schema: A Python version of the KeySchema data structure
as defined by DynamoDB
:type provisioned_throughput: dict
:param provisioned_throughput: A Python version of the
ProvisionedThroughput data structure defined by
DynamoDB.
"""
data = {'TableName' : table_name,
'KeySchema' : schema,
'ProvisionedThroughput': provisioned_throughput}
json_input = json.dumps(data)
response_dict = self.make_request('CreateTable', json_input)
return response_dict
def update_table(self, table_name, provisioned_throughput):
"""
Updates the provisioned throughput for a given table.
:type table_name: str
:param table_name: The name of the table to update.
:type provisioned_throughput: dict
:param provisioned_throughput: A Python version of the
ProvisionedThroughput data structure defined by
DynamoDB.
"""
data = {'TableName': table_name,
'ProvisionedThroughput': provisioned_throughput}
json_input = json.dumps(data)
return self.make_request('UpdateTable', json_input)
def delete_table(self, table_name):
"""
Deletes the table and all of it's data. After this request
the table will be in the DELETING state until DynamoDB
completes the delete operation.
:type table_name: str
:param table_name: The name of the table to delete.
"""
data = {'TableName': table_name}
json_input = json.dumps(data)
return self.make_request('DeleteTable', json_input)
def get_item(self, table_name, key, attributes_to_get=None,
consistent_read=False, object_hook=None):
"""
Return a set of attributes for an item that matches
the supplied key.
:type table_name: str
:param table_name: The name of the table containing the item.
:type key: dict
:param key: A Python version of the Key data structure
defined by DynamoDB.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
"""
data = {'TableName': table_name,
'Key': key}
if attributes_to_get:
data['AttributesToGet'] = attributes_to_get
if consistent_read:
data['ConsistentRead'] = True
json_input = json.dumps(data)
response = self.make_request('GetItem', json_input,
object_hook=object_hook)
if not response.has_key('Item'):
raise dynamodb_exceptions.DynamoDBKeyNotFoundError(
"Key does not exist."
)
return response
def batch_get_item(self, request_items, object_hook=None):
"""
Return a set of attributes for a multiple items in
multiple tables using their primary keys.
:type request_items: dict
:param request_items: A Python version of the RequestItems
data structure defined by DynamoDB.
"""
data = {'RequestItems' : request_items}
json_input = json.dumps(data)
return self.make_request('BatchGetItem', json_input,
object_hook=object_hook)
def put_item(self, table_name, item,
expected=None, return_values=None,
object_hook=None):
"""
Create a new item or replace an old item with a new
item (including all attributes). If an item already
exists in the specified table with the same primary
key, the new item will completely replace the old item.
You can perform a conditional put by specifying an
expected rule.
:type table_name: str
:param table_name: The name of the table in which to put the item.
:type item: dict
:param item: A Python version of the Item data structure
defined by DynamoDB.
:type expected: dict
:param expected: A Python version of the Expected
data structure defined by DynamoDB.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
data = {'TableName' : table_name,
'Item' : item}
if expected:
data['Expected'] = expected
if return_values:
data['ReturnValues'] = return_values
json_input = json.dumps(data)
return self.make_request('PutItem', json_input,
object_hook=object_hook)
def update_item(self, table_name, key, attribute_updates,
expected=None, return_values=None,
object_hook=None):
"""
Edits an existing item's attributes. You can perform a conditional
update (insert a new attribute name-value pair if it doesn't exist,
or replace an existing name-value pair if it has certain expected
attribute values).
:type table_name: str
:param table_name: The name of the table.
:type key: dict
:param key: A Python version of the Key data structure
defined by DynamoDB which identifies the item to be updated.
:type attribute_updates: dict
:param attribute_updates: A Python version of the AttributeUpdates
data structure defined by DynamoDB.
:type expected: dict
:param expected: A Python version of the Expected
data structure defined by DynamoDB.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
data = {'TableName' : table_name,
'Key' : key,
'AttributeUpdates': attribute_updates}
if expected:
data['Expected'] = expected
if return_values:
data['ReturnValues'] = return_values
json_input = json.dumps(data)
return self.make_request('UpdateItem', json_input,
object_hook=object_hook)
def delete_item(self, table_name, key,
expected=None, return_values=None,
object_hook=None):
"""
Delete an item and all of it's attributes by primary key.
You can perform a conditional delete by specifying an
expected rule.
:type table_name: str
:param table_name: The name of the table containing the item.
:type key: dict
:param key: A Python version of the Key data structure
defined by DynamoDB.
:type expected: dict
:param expected: A Python version of the Expected
data structure defined by DynamoDB.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
data = {'TableName' : table_name,
'Key' : key}
if expected:
data['Expected'] = expected
if return_values:
data['ReturnValues'] = return_values
json_input = json.dumps(data)
return self.make_request('DeleteItem', json_input,
object_hook=object_hook)
def query(self, table_name, hash_key_value, range_key_conditions=None,
attributes_to_get=None, limit=None, consistent_read=False,
scan_index_forward=True, exclusive_start_key=None,
object_hook=None):
"""
Perform a query of DynamoDB. This version is currently punting
and expecting you to provide a full and correct JSON body
which is passed as is to DynamoDB.
:type table_name: str
:param table_name: The name of the table to query.
:type hash_key_value: dict
:param key: A DynamoDB-style HashKeyValue.
:type range_key_conditions: dict
:param range_key_conditions: A Python version of the
RangeKeyConditions data structure.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type limit: int
:param limit: The maximum number of items to return.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:type scan_index_forward: bool
:param scan_index_forward: Specified forward or backward
traversal of the index. Default is forward (True).
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
"""
data = {'TableName': table_name,
'HashKeyValue': hash_key_value}
if range_key_conditions:
data['RangeKeyCondition'] = range_key_conditions
if attributes_to_get:
data['AttributesToGet'] = attributes_to_get
if limit:
data['Limit'] = limit
if consistent_read:
data['ConsistentRead'] = True
if scan_index_forward:
data['ScanIndexForward'] = True
else:
data['ScanIndexForward'] = False
if exclusive_start_key:
data['ExclusiveStartKey'] = exclusive_start_key
json_input = json.dumps(data)
return self.make_request('Query', json_input,
object_hook=object_hook)
def scan(self, table_name, scan_filter=None,
attributes_to_get=None, limit=None,
count=False, exclusive_start_key=None,
object_hook=None):
"""
Perform a scan of DynamoDB. This version is currently punting
and expecting you to provide a full and correct JSON body
which is passed as is to DynamoDB.
:type table_name: str
:param table_name: The name of the table to scan.
:type scan_filter: dict
:param scan_filter: A Python version of the
ScanFilter data structure.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type limit: int
:param limit: The maximum number of items to return.
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Scan operation, even if the
operation has no matching items for the assigned filter.
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
"""
data = {'TableName': table_name}
if scan_filter:
data['ScanFilter'] = scan_filter
if attributes_to_get:
data['AttributesToGet'] = attributes_to_get
if limit:
data['Limit'] = limit
if count:
data['Count'] = True
if exclusive_start_key:
data['ExclusiveStartKey'] = exclusive_start_key
json_input = json.dumps(data)
return self.make_request('Scan', json_input, object_hook=object_hook)
|
|
import ftplib
import requests
import tempfile
import json
import os
import sys
'''
Created on 07 Sep 2013
@author: rob dobson
'''
class HostedConfigFile():
hostedDataLocations = []
latestFileVersion = -1
def initFromFile(self, fName):
with open(fName, 'r') as jsonFile:
localSetup = json.load(jsonFile)
if localSetup != None and 'ConfigLocations' in localSetup:
for locn in localSetup['ConfigLocations']:
self.hostedDataLocations.append(locn)
def addDataLocation(self, hostURLForGet, filePathForGet, getUsing, hostURLForPut, filePathForPut, putUsing, userName, passWord):
newLocation = { 'hostURLForGet': hostURLForGet, 'filePathForGet': filePathForGet, 'getUsing': getUsing,
'hostURLForPut': hostURLForPut, 'filePathForPut': filePathForPut, 'putUsing': putUsing,
'userName': userName, 'passWord':passWord }
self.hostedDataLocations.append(newLocation)
def getFileFromLocation(self, destFilePath):
# Get config file contents
configContents = self.getConfigContentsFromLocation()
if configContents == None:
return False
# Copy best file to output
with open(destFilePath, "wt") as outFile:
outFile.write(configContents)
return True
def getConfigDataFromLocation(self):
configContents = self.getConfigContentsFromLocation()
if configContents == None:
return None
configData = json.loads(configContents)
return configData
def getConfigContentsFromLocation(self):
# Work through the locations in order trying to get the file
tmpFiles = []
fileVersions = []
fileIdx = 0
for locn in self.hostedDataLocations:
# Get temp file
bFileLoadOk = False
fileVersion = { "ver":-1, "source":"" }
temporaryFile = tempfile.TemporaryFile('w+t', delete=True)
if locn['getUsing'] == 'ftp':
bFileLoadOk = self.getFileWithFTP(locn, temporaryFile)
elif locn['getUsing'] == 'http':
bFileLoadOk = self.getFileWithHTTP(locn, temporaryFile)
elif locn['getUsing'] == 'local':
bFileLoadOk = self.getLocalFile(locn, temporaryFile)
if bFileLoadOk:
tmpFiles.append(temporaryFile)
temporaryFile.seek(0)
# Read the version info from the file
jsonData = json.load(temporaryFile)
if 'FileVersion' in jsonData:
fileVersion = {"ver":int(jsonData['FileVersion']), "source":(locn["sourceName"] if "sourceName" in locn else "")}
else:
tmpFiles.append(None)
temporaryFile.close()
fileVersions.append(fileVersion)
fileIdx += 1
# Find latest file version
latestVersion = -1
latestFileIdx = -1
for fileIdx in range(len(fileVersions)):
if latestVersion < fileVersions[fileIdx]["ver"]:
latestVersion = fileVersions[fileIdx]["ver"]
latestFileIdx = fileIdx
# Check if we failed to get a valid file from anywhere
if latestFileIdx == -1:
print("No valid file available")
for tFile in tmpFiles:
if tFile != None:
tFile.close()
return None
self.latestFileVersion = latestVersion
# Write back to versions that are not latest
for fileIdx in range(len(fileVersions)):
if fileVersions[fileIdx] != -1:
print("FileIdx", fileIdx, "Version", fileVersions[fileIdx]["ver"], "Source",fileVersions[fileIdx]["source"])
if latestVersion != fileVersions[fileIdx]["ver"]:
self.copyFileToLocation(self.hostedDataLocations[fileIdx], tmpFiles[latestFileIdx])
# Get contents of latest file
print("LatestFileIdx", latestFileIdx, "Version", latestVersion)
tmpFiles[latestFileIdx].seek(0, os.SEEK_SET)
returnData = tmpFiles[latestFileIdx].read()
# Close all temporary files (which should delete them)
for tFile in tmpFiles:
if tFile != None:
tFile.close()
print("Success")
return returnData
def putConfigContentsToLocation(self, jsonStr):
tempFile = tempfile.TemporaryFile('w+t', delete=True)
tempFile.write(jsonStr)
tempFile.seek(0)
for fileIdx in range(len(self.hostedDataLocations)):
reslt = self.copyFileToLocation(self.hostedDataLocations[fileIdx], tempFile)
print("PutToLocationIdx", fileIdx, "Result", reslt)
def getFileWithFTP(self, locn, outFile):
try:
ftp = ftplib.FTP(locn['hostURLForGet'])
ftp.login(locn['userName'], locn['passWord'])
# ftp.dir()
for filename in ftp.nlst(locn['filePathForGet']):
print('Getting ' + filename)
ftp.retrlines('RETR ' + filename, outFile.write)
break
ftp.close()
print ("Got file via FTP ", locn['hostURLForGet'])
return True
except Exception as excp:
print ("Failed to get from FTP", locn['hostURLForGet'], "excp", excp)
return False
def getFileWithHTTP(self, locn, outFile):
reqFile = None
try:
reqFile = requests.get(locn['hostURLForGet'] + locn['filePathForGet'], auth=(locn['userName'], locn['passWord']), timeout=30)
except requests.exceptions.ConnectionError:
print ("HTTP ConnectionError")
except requests.exceptions.HTTPError:
print ("HTTPError")
except requests.exceptions.URLRequired:
print ("HTTP URLRequired")
except requests.exceptions.TooManyRedirects:
print ("HTTP TooManyRedirects")
except requests.exceptions.Timeout:
print ("HTTP Timeout")
except requests.exceptions.RequestException:
print ("HTTP requests error")
if reqFile != None and reqFile.status_code == 200:
print (reqFile.status_code)
# Strip spurious newlines
newText = "\r".join([s for s in reqFile.text.splitlines() if s.strip("\r\n")])
print(newText)
outFile.write(newText)
return True
return False
def getLocalFile(self, locn, outFile):
print ("Trying to get local file ", locn['hostURLForGet'] + locn['filePathForGet'], "...", end="")
try:
with open(locn['hostURLForGet'] + locn['filePathForGet'], "r") as inFile:
print ("Got ok")
return self.copyFileContents(inFile, outFile)
except IOError as excp:
print ("getLocalFile I/O error({0}): {1}".format(excp.errno, excp.strerror))
except ValueError:
print ("Could not convert data to an integer.")
except:
print ("Unexpected error:", sys.exc_info()[0])
raise
return False
def copyFileContents(self, inFile, outFile):
try:
inFile.seek(0)
while(True):
linStr = inFile.readline()
if linStr == '':
break
outFile.write(linStr)
except IOError as excp:
print ("copyFileContents I/O error({0}): {1}".format(excp.errno, excp.strerror))
except:
return False
return True
def putFileWithFTP(self,locn,inFile):
inFile.seek(0)
tempFile = tempfile.TemporaryFile('w+b', delete=True)
while(True):
linStr = inFile.readline()
if linStr == '':
break
tempFile.write(linStr.encode('ascii'))
tempFile.seek(0)
try:
with ftplib.FTP(locn['hostURLForPut']) as ftp:
ftp.login(locn['userName'], locn['passWord'])
fileNameParts = os.path.split(locn['filePathForPut'])
ftp.cwd(fileNameParts[0])
ftp.storbinary("STOR " + fileNameParts[1], tempFile)
except ftplib.all_errors as excp:
print("FTP error", str(excp))
tempFile.close()
return False
tempFile.close()
return True
def copyFileToLocation(self, locn, fileToCopyFrom):
success = False
try:
if locn['putUsing'] == 'ftp':
print ("Attempting to copy file using ftp to ", locn['hostURLForPut'], locn['filePathForPut'])
success = self.putFileWithFTP(locn, fileToCopyFrom)
elif locn['putUsing'] == 'local':
print ("Attempting to copy file local to ", locn['hostURLForPut'], locn['filePathForPut'])
with open(locn['hostURLForPut'] + locn['filePathForPut'], "wt") as outFile:
success = self.copyFileContents(fileToCopyFrom, outFile)
except:
print("Failed to copy file")
return success
def configFileUpdate(self, updatedData):
# form data to write
updatedData["FileVersion"] = self.latestFileVersion + 1
jsonStr = json.dumps(updatedData, indent=4)
self.putConfigContentsToLocation(jsonStr)
|
|
import logging
import sys
import types
from functools import wraps
from functools import partial
from .error import SleuthError, SleuthNotFoundError
from ._util import import_, set_trace
__all__ = ['breakOnEnter', 'breakOnException', 'breakOnExit', 'breakOnResult',
'callOnEnter', 'callOnException', 'callOnExit', 'callOnResult',
'logCalls', 'logOnException', 'skip', 'substitute', 'tap']
def logCalls(func=None, *, enterFmtStr=None, exitFmtStr=None,
level=logging.DEBUG, logName=None, timerFunc=None):
"""
A function wrapper that logs call information about a function.
Logging is performed both when the wrapped function is entered and
exited. By default, the call number, name, and total call time of
the function are logged.
Parameters
----------
func : The function to wrap.
enterFmtStr : A formatted string that is output when the wrapped
function is entered. The format() function is called on the
string with locals(). If not specified, this argument is set to
'[{callNumber}] Calling {funcName}()'.
exitFmtStr : A formatted string that is output when the wrapped
function is exited. The format() function is called on the
string with locals(). If not specified, this argument is set to
'[{callNumber}] Exiting {funcName}()\t[{callTime} seconds]'.
level : The logging level used for logging calls. This must be one
of the logging level constants defined in the logging module,
e.g. logging.DEBUG.
logName : The name of the log which is written to by logging calls.
If not given, the name of the module in which the wrapped
function is defined is used, i.e. func.__module__.
timerFunc : The function used for timing the duration of function
calls. This function is called before and after the wrapped
function is called. The difference between the two return
values of the timing function is used as the duration of the
function call. If not given, time.time is used.
"""
if func is None:
return partial(logCalls, enterFmtStr=enterFmtStr,
exitFmtStr=exitFmtStr, level=level, logName=logName,
timerFunc=timerFunc)
# The number of times the wrapped function has been called
nCalls = 0
if enterFmtStr is None:
enterFmtStr = '[{callNumber}] Calling {funcName}()'
if exitFmtStr is None:
exitFmtStr = ('[{callNumber}] Exiting {funcName}()\t[{callTime} '
'seconds]')
if logName is None:
logName = func.__module__
if timerFunc is None:
import time
timerFunc = time.time
@wraps(func)
def wrapper(*args, **kwargs):
nonlocal nCalls
funcName = func.__name__
callNumber = nCalls
logger = logging.getLogger(logName)
logMsg = enterFmtStr.format(**locals())
logger.log(level, logMsg)
nCalls = nCalls + 1
start = timerFunc()
result = func(*args, **kwargs)
end = timerFunc()
callTime = round(end - start, 4) # TODO: use string formatting instead
logMsg = exitFmtStr.format(**locals())
logger.log(level, logMsg)
return result
return wrapper
def logOnException(func=None, *, exceptionList=Exception, suppress=False,
fmtStr=None, level=logging.DEBUG, logName=None):
"""
A function wrapper that logs information when an exception is
thrown by the wrapped function.
Parameters
----------
func : The function to wrap.
exceptionList : An exception or tuple of exceptions to be logged.
suppress : A boolean indicating whether a caught exception should
be suppressed. If False, the exception is reraised. This only
applies to exceptions specified in exceptionList.
fmtStr : A formatted string that is output when the wrapped
function raises a specified exception.
level : The logging level used for logging calls. This must be one
of the logging level constants defined in the logging module,
e.g. logging.DEBUG.
logName : The name of the log which is written to by logging calls.
If not given, the name of the module in which the wrapped
function is defined is used, i.e. func.__module__.
"""
if func is None:
return partial(logOnException, exceptionList=exceptionList,
suppress=suppress, fmtStr=fmtStr, level=level,
logName=logName)
if fmtStr is None:
fmtStr = ("Exception raised in {funcName}(): '{exceptionType}: "
"{exception}'")
if logName is None:
logName = func.__module__
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exceptionList as exception:
exceptionType = exception.__class__.__name__
funcName = func.__name__
logger = logging.getLogger(logName)
logMsg = fmtStr.format(**locals())
logger.log(level, logMsg)
if not suppress:
raise
return wrapper
def breakOnEnter(func=None, *, debugger='pdb'):
"""
A function wrapper that causes debug mode to be entered when the
wrapped function is called.
Parameters
----------
func : The function to wrap.
debugger : The debugger used when debug mode is entered. This can
be either the debugging module itself or a string containing
the name of the debugging module. Currently, pdb and ipdb are
supported.
"""
if func is None:
return partial(breakOnEnter, debugger=debugger)
debugger = import_(debugger)
@wraps(func)
def wrapper(*args, **kwargs):
return debugger.runcall(func, *args, **kwargs)
return wrapper
def breakOnExit(func=None, *, debugger='pdb'):
"""
A function wrapper that causes debug mode to be entered when the
wrapped function exits.
Parameters
----------
func : The function to wrap.
debugger : The debugger used when debug mode is entered. This can
be either the debugging module itself or a string containing
the name of the debugging module. Currently, pdb and ipdb are
supported.
"""
if func is None:
return partial(breakOnExit, debugger=debugger)
debugger = import_(debugger)
@wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
debug_frame = sys._getframe().f_back
set_trace(debug_frame, debugger)
return result
return wrapper
def breakOnResult(func=None, *, compare=None, debugger='pdb'):
"""
A function wrapper that causes debug mode to be entered when the
wrapped function returns a certain result.
Parameters
----------
func : The function to wrap.
compare : A function used to perform the comparison. When the
wrapped function returns, this function is called with the
result. Debug mode is entered if the compare function returns
True.
debugger : The debugger used when debug mode is entered. This can
be either the debugging module itself or a string containing
the name of the debugging module. Currently, pdb and ipdb are
supported.
"""
if func is None:
return partial(breakOnResult, compare=compare, debugger=debugger)
debugger = import_(debugger)
@wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if compare(result):
debug_frame = sys._getframe().f_back
set_trace(debug_frame, debugger)
return result
return wrapper
def breakOnException(func=None, *, exceptionList=Exception, debugger='pdb'):
"""
A function wrapper that causes debug mode to be entered when the
wrapped function throws a specified exception.
Parameters
----------
func : The function to wrap.
exceptionList : An exception or tuple of exceptions to break on.
debugger : The debugger used when debug mode is entered. This can
be either the debugging module itself or a string containing
the name of the debugging module. Currently, pdb and ipdb are
supported.
"""
if func is None:
return partial(breakOnException, exceptionList=exceptionList,
debugger=debugger)
debugger = import_(debugger)
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exceptionList as e:
debug_frame = sys._getframe().f_back
set_trace(debug_frame, debugger)
return wrapper
def callOnEnter(func=None, *, callback=None):
"""
A function wrapper that calls a callback function before the
wrapped function is called.
Parameters
----------
func : The function to wrap.
callback : The callback function to call. This function is called
with the wrapped function as the first argument, followed by
the same arguments passed to the wrapped function.
"""
if func is None:
return partial(callOnEnter, callback=callback)
@wraps(func)
def wrapper(*args, **kwargs):
callback(func, *args, **kwargs) # TODO: add attribute for retval?
return func(*args, **kwargs)
return wrapper
def callOnExit(func=None, *, callback=None):
"""
A function wrapper that calls a callback function after the wrapped
function is called.
Parameters
----------
func : The function to wrap.
callback : The callback function to call. This function is called
with the wrapped function and the value returned by the wrapped
function. The return value of the callback function is
ultimately returned to the caller of the wrapped function.
"""
if func is None:
return partial(callOnExit, callback=callback)
@wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
return callback(func, result)
return wrapper
def callOnResult(func=None, *, compare=None, callback=None):
"""
A function wrapper that calls a callback function when the wrapped
function returns a certain result.
Parameters
----------
func : The function to wrap.
compare : A function used to perform the comparison. When the
wrapped function returns, this function is called with the
result. The callback function is called if the compare function
returns True.
callback : The callback function to call. This function is called
with the wrapped function and the value returned by the wrapped
function if the compare function returns True. If called, the
return value of the callback function is ultimately returned to
the caller of the wrapped function.
"""
if func is None:
return partial(callOnResult, compare=compare, callback=callback)
@wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if compare(result):
result = callback(func, result)
return result
return wrapper
def callOnException(func=None, *, exceptionList=Exception, callback=None):
"""
A function wrapper that calls a callback function when the wrapped
function throws a specified exception.
Parameters
----------
func : The function to wrap.
exceptionList : A tuple of exceptions on which to call the callback
function.
callback : The callback function to call. This function is called
with the wrapped function and the exception thrown by the
wrapped function. After the callback function returns, the
exception is reraised if the return value of the callback
function was False. Otherwise, the exception is caught and
suppressed. By default, the exception is reraised if the
callback function returns no value.
"""
if func is None:
return partial(callOnException, exceptionList=exceptionList,
callback=callback)
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exceptionList as e:
if not callback(func, e):
raise
return wrapper
def skip(func=None, *, returnValue=None):
"""
A function wrapper that causes the call to the wrapped function to
be skipped.
Parameters
----------
func : The function to wrap.
returnValue : A value to return in place of the value that would
normally be returned by the wrapped function. This is None by
default.
"""
if func is None:
return partial(skip, returnValue=returnValue)
@wraps(func)
def wrapper(*args, **kwargs):
return returnValue
return wrapper
def substitute(func=None, *, replacement=None):
"""
A function wrapper that substitutes calls to the wrapped function
with calls to a replacement funciton.
Parameters
----------
func : The function to wrap.
replacement : A function to be substituted for the wrapped
function. The replacement function is called with the same
arguments as would be passed to the wrapped function.
"""
if func is None:
return partial(substitute, replacement=replacement)
@wraps(func)
def wrapper(*args, **kwargs):
return replacement(*args, **kwargs)
return wrapper
def tap(func, wrapper, *args, **kwargs):
"""
Apply a Sleuth function wrapper to a function or method.
Parameters
----------
func : The function to wrap.
wrapper : A Sleuth function wrapper to apply to func.
*args, **kwargs : Positional and keyword arguments that should be
passed to wrapper.
"""
try:
module = sys.modules[func.__module__]
except (KeyError, AttributeError):
raise SleuthNotFoundError("The module containing function '{0}' could "
"not be found.".format(func.__name__))
wrapped = wrapper(*args, **kwargs)(func)
parent = _get_parent_scope(func, module)
setattr(parent, func.__name__, wrapped)
# TODO: is func.__name__ always correct?
def _get_parent_scope(func, module):
"""
Obtain the parent scope of a function given the module in which it
is defined.
"""
path = _search(func, module, limit=100)
if path is not None:
return path[-2]
else:
raise SleuthNotFoundError("The function '{0}' could not be found "
"within module '{1}'."
.format(func.__name__, module.__name__))
def _search(func, module, limit):
"""
Get the path of a function starting with the module in which it is
defined; that is, the sequence of enclosing modules and classes
that must be followed to reach the function from its module.
Returns
-------
A list of module and class objects which forms a path from the
module in which a function is defined to the function itself.
The first item in the list is the module in which the function
is defined and the last item is the function itself. Each item
in the list is an attribute of the previous item.
"""
def search_helper(goal, node, path, depth, limit, seen):
# Cut off redundant searches
if node in seen:
return None
# Cut off deep searches
if limit is not None and depth > limit:
return None
# Keep track of searched nodes and search path
seen.add(node)
path.append(node)
if node is goal:
return path
for attr in dir(node):
try:
child = getattr(node, attr)
# Only search modules, classes, and functions
if (isinstance(child, type) or
isinstance(child, types.ModuleType) or
isinstance(child, types.FunctionType)):
child_path = search_helper(goal, child, path, depth + 1,
limit, seen)
if child_path is not None:
return child_path
except AttributeError:
# Ignore attribute errors
pass
# Solution path does not contain this node
path.pop()
return None
for i in range(1, limit):
path = search_helper(func, module, [], 0, i, set())
if path is not None:
return path
return None
|
|
import argparse
import itertools
import string
import struct
import sys
# Accepts list of tuples [(mergeable, value)] and merges fields where
# mergeable is True.
def merge(iterable, merge=lambda a,b:a+b):
for k, g in itertools.groupby(iterable, key=lambda a:a[0]):
if k is True:
yield reduce(merge, (i[1] for i in g))
else:
for i in g:
yield i[1]
ACCEPTABLE_CHARS = set(string.printable) - set(string.whitespace) - set(string.punctuation)
def gen(args, l3_off=0, ipversion=4, negate=False):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
prog="%s dns --" % (sys.argv[0]),
description=r'''
This tool creates a raw Berkeley Packet Filter (BPF) rule that will
match packets which are DNS queries against listed domains. For
example:
%(prog)s example.com
will print a BPF rule matching all packets that look like a DNS packet
first query being equal to "example.com". Another example:
%(prog)s *.www.fint.me
will match packets that have a any prefix (subdomain) and exactly
"www.fint.me" as suffix. It will match:
blah.www.fint.me
anyanyany.www.fint.me
but it will not match:
www.fint.me
blah.blah.www.fint.me
Also, star has a special meaning only if it's a sole part of
subdomain: "*xxx.example.com" is treated as a literal star, so is
"xxx*.example.com". On the other hand "xxx.*.example.com" will have a
wildcard meaning.
Question mark '?' matches exactly one characer. For example this rule:
%(prog)s fin?.me
will match:
fint.me, finT.me, finX.me, finZ,me
but will not match:
finXX.me, fiXX.me, www.finX.me, fin.me
You can create a single rule matching than one domain:
%(prog)s example.com *.www.fint.me
Leading and trailing dots are ignored, this commands are equivalent:
%(prog)s example.com fint.me
%(prog)s .example.com fint.me.
Finally the "--ignorecase" option will produce BPF bytecode that
matches domains in case insensitive way. Beware, the genrated bytecode
will be significantly longer.
''')
parser.add_argument('-i', '--ignorecase', action='store_true',
help='match domains in case-insensitive way')
parser.add_argument('domains', nargs='*',
help='DNS domain patterns to match on')
args = parser.parse_args(args)
if not args.domains:
parser.print_help()
sys.exit(-1)
list_of_rules = []
for domain in args.domains:
# remove trailing and leading dots and whitespace
domain = domain.strip(".").strip()
# keep the trailing dot
domain += '.'
rule = []
for part in domain.split("."):
if part == '*':
rule.append( (False, '*') )
else:
rule.append( (True, [(False, chr(len(part)))] \
+ [(True, c) for c in part]) )
list_of_rules.append( list(merge(rule)) )
def match_exact(rule, label, last=False):
mask = []
for is_char, b in rule:
if is_char and b == '?':
mask.append( '\xff' )
elif is_char and args.ignorecase:
mask.append( '\x20' )
else:
mask.append( '\x00' )
mask = ''.join(mask)
s = ''.join(map(lambda (is_char, b): b, rule))
print " ; Match: %s %r mask=%s" % (s.encode('hex'), s, mask.encode('hex'))
off = 0
while s:
if len(s) >= 4:
m, s = s[:4], s[4:]
mm, mask = mask[:4], mask[4:]
m, = struct.unpack('!I', m)
mm, = struct.unpack('!I', mm)
print " ld [x + %i]" % off
if mm:
print " or #0x%08x" % mm
m |= mm
print " jneq #0x%08x, %s" % (m, label,)
off += 4
elif len(s) >= 2:
m, s = s[:2], s[2:]
mm, mask = mask[:2], mask[2:]
m, = struct.unpack('!H', m)
mm, = struct.unpack('!H', mm)
print " ldh [x + %i]" % off
if mm:
print " or #0x%04x" % mm
m |= mm
print " jneq #0x%04x, %s" % (m, label,)
off += 2
else:
m, s = s[:1], s[1:]
m, = struct.unpack('!B', m)
mm, mask = mask[:1], mask[1:]
mm, = struct.unpack('!B', mm)
print " ldb [x + %i]" % off
if mm:
print " or #0x%02x" % mm
m |= mm
print " jneq #0x%02x, %s" % (m, label,)
off += 1
if not last:
print " txa"
print " add #%i" % (off,)
print " tax"
def match_star():
print " ; Match: *"
print " ldb [x + 0]"
print " add x"
print " add #1"
print " tax"
if ipversion == 4:
print " ldx 4*([%i]&0xf)" % (l3_off,)
print " ; l3_off(%i) + 8 of udp + 12 of dns" % (l3_off,)
print " ld #%i" % (l3_off + 8 + 12) # 8B of udp + 12B of dns header
print " add x"
elif ipversion == 6:
# assuming first "next header" is UDP
print " ld #%i" % (l3_off + 40 + 8 + 12) # 40B of ipv6 + 8B of udp + 12B of dns header
print " tax"
print " ; a = x = M[0] = offset of first dns query byte"
print " %sst M[0]" % ('' if len(list_of_rules) > 1 else '; ',)
print
for i, rules in enumerate(list_of_rules):
print "lb_%i:" % (i,)
#print " ; %r" % (rules,)
print " %sldx M[0]" % ('' if i != 0 else '; ')
for j, rule in enumerate(rules):
last = (j == len(rules)-1)
if rule != '*':
match_exact(rule, 'lb_%i' % (i+1,), last)
else:
match_star()
print " ret #%i" % (1 if not negate else 0)
print
print "lb_%i:" % (i+1,)
print " ret #%i" % (0 if not negate else 1)
name_parts = []
for domain in args.domains:
if domain[0] == '-':
continue
domain = domain.strip(".").strip()
parts = []
for part in domain.split("."):
if part == '*':
parts.append( 'any' )
else:
parts.append( ''.join(c if c in ACCEPTABLE_CHARS else 'x'
for c in part) )
name_parts.append( '_'.join(parts) )
return '_'.join(name_parts)
|
|
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import uuid
import ldap as ldap
import ldap.filter
from keystone import assignment
from keystone.assignment.role_backends import ldap as ldap_role
from keystone import clean
from keystone.common import driver_hints
from keystone.common import ldap as common_ldap
from keystone.common import models
from keystone import config
from keystone import exception
from keystone.i18n import _
from keystone.identity.backends import ldap as ldap_identity
from keystone.openstack.common import log
CONF = config.CONF
LOG = log.getLogger(__name__)
class Assignment(assignment.Driver):
def __init__(self):
super(Assignment, self).__init__()
self.LDAP_URL = CONF.ldap.url
self.LDAP_USER = CONF.ldap.user
self.LDAP_PASSWORD = CONF.ldap.password
self.suffix = CONF.ldap.suffix
# These are the only deep dependency from assignment back
# to identity. The assumption is that if you are using
# LDAP for assignments, you are using it for Id as well.
self.user = ldap_identity.UserApi(CONF)
self.group = ldap_identity.GroupApi(CONF)
self.project = ProjectApi(CONF)
self.role = RoleApi(CONF, self.user)
def default_role_driver(self):
return 'keystone.assignment.role_backends.ldap.Role'
def _set_default_parent_project(self, ref):
"""If the parent project ID has not been set, set it to None."""
if isinstance(ref, dict):
if 'parent_id' not in ref:
ref = dict(ref, parent_id=None)
return ref
elif isinstance(ref, list):
return [self._set_default_parent_project(x) for x in ref]
else:
raise ValueError(_('Expected dict or list: %s') % type(ref))
def _validate_parent_project_is_none(self, ref):
"""If a parent_id different from None was given,
raises InvalidProjectException.
"""
parent_id = ref.get('parent_id')
if parent_id is not None:
raise exception.InvalidParentProject(parent_id)
def _set_default_attributes(self, project_ref):
project_ref = self._set_default_domain(project_ref)
return self._set_default_parent_project(project_ref)
def get_project(self, tenant_id):
return self._set_default_attributes(
self.project.get(tenant_id))
def list_projects(self, hints):
return self._set_default_attributes(
self.project.get_all())
def list_projects_in_domain(self, domain_id):
# We don't support multiple domains within this driver, so ignore
# any domain specified
return self.list_projects(driver_hints.Hints())
def list_projects_in_subtree(self, project_id):
# We don't support projects hierarchy within this driver, so a
# project will never have children
return []
def list_project_parents(self, project_id):
# We don't support projects hierarchy within this driver, so a
# project will never have parents
return []
def is_leaf_project(self, project_id):
# We don't support projects hierarchy within this driver, so a
# project will always be a root and a leaf at the same time
return True
def list_projects_from_ids(self, ids):
return [self.get_project(id) for id in ids]
def list_project_ids_from_domain_ids(self, domain_ids):
# We don't support multiple domains within this driver, so ignore
# any domain specified.
return [x.id for x in self.list_projects(driver_hints.Hints())]
def get_project_by_name(self, tenant_name, domain_id):
self._validate_default_domain_id(domain_id)
return self._set_default_attributes(
self.project.get_by_name(tenant_name))
def create_project(self, tenant_id, tenant):
self.project.check_allow_create()
tenant = self._validate_default_domain(tenant)
self._validate_parent_project_is_none(tenant)
tenant['name'] = clean.project_name(tenant['name'])
data = tenant.copy()
if 'id' not in data or data['id'] is None:
data['id'] = str(uuid.uuid4().hex)
if 'description' in data and data['description'] in ['', None]:
data.pop('description')
return self._set_default_attributes(
self.project.create(data))
def update_project(self, tenant_id, tenant):
self.project.check_allow_update()
tenant = self._validate_default_domain(tenant)
if 'name' in tenant:
tenant['name'] = clean.project_name(tenant['name'])
return self._set_default_attributes(
self.project.update(tenant_id, tenant))
def list_role_ids_for_groups_on_project(
self, groups, project_id, project_domain_id, project_parents):
group_dns = [self.group._id_to_dn(group_id) for group_id in groups]
role_list = [self.role._dn_to_id(role_assignment.role_dn)
for role_assignment in self.role.get_role_assignments
(self.project._id_to_dn(project_id))
if role_assignment.user_dn.upper() in group_dns]
# NOTE(morganfainberg): Does not support OS-INHERIT as domain
# metadata/roles are not supported by LDAP backend. Skip OS-INHERIT
# logic.
return role_list
def _get_metadata(self, user_id=None, tenant_id=None,
domain_id=None, group_id=None):
def _get_roles_for_just_user_and_project(user_id, tenant_id):
user_dn = self.user._id_to_dn(user_id)
return [self.role._dn_to_id(a.role_dn)
for a in self.role.get_role_assignments
(self.project._id_to_dn(tenant_id))
if common_ldap.is_dn_equal(a.user_dn, user_dn)]
def _get_roles_for_group_and_project(group_id, project_id):
group_dn = self.group._id_to_dn(group_id)
return [self.role._dn_to_id(a.role_dn)
for a in self.role.get_role_assignments
(self.project._id_to_dn(project_id))
if common_ldap.is_dn_equal(a.user_dn, group_dn)]
if domain_id is not None:
msg = _('Domain metadata not supported by LDAP')
raise exception.NotImplemented(message=msg)
if group_id is None and user_id is None:
return {}
if tenant_id is None:
return {}
if user_id is None:
metadata_ref = _get_roles_for_group_and_project(group_id,
tenant_id)
else:
metadata_ref = _get_roles_for_just_user_and_project(user_id,
tenant_id)
if not metadata_ref:
return {}
return {'roles': [self._role_to_dict(r, False) for r in metadata_ref]}
def list_project_ids_for_user(self, user_id, group_ids, hints,
inherited=False):
# TODO(henry-nash): The ldap driver does not support inherited
# assignments, so the inherited parameter is unused.
# See bug #1404273.
user_dn = self.user._id_to_dn(user_id)
associations = (self.role.list_project_roles_for_user
(user_dn, self.project.tree_dn))
for group_id in group_ids:
group_dn = self.group._id_to_dn(group_id)
for group_role in self.role.list_project_roles_for_group(
group_dn, self.project.tree_dn):
associations.append(group_role)
return list(set(
[self.project._dn_to_id(x.project_dn) for x in associations]))
def list_role_ids_for_groups_on_domain(self, group_ids, domain_id):
raise exception.NotImplemented()
def list_project_ids_for_groups(self, group_ids, hints,
inherited=False):
raise exception.NotImplemented()
def list_domain_ids_for_user(self, user_id, group_ids, hints):
raise exception.NotImplemented()
def list_domain_ids_for_groups(self, group_ids, inherited=False):
raise exception.NotImplemented()
def list_user_ids_for_project(self, tenant_id):
tenant_dn = self.project._id_to_dn(tenant_id)
rolegrants = self.role.get_role_assignments(tenant_dn)
return [self.user._dn_to_id(user_dn) for user_dn in
self.project.get_user_dns(tenant_id, rolegrants)]
def _subrole_id_to_dn(self, role_id, tenant_id):
if tenant_id is None:
return self.role._id_to_dn(role_id)
else:
return '%s=%s,%s' % (self.role.id_attr,
ldap.dn.escape_dn_chars(role_id),
self.project._id_to_dn(tenant_id))
def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
user_dn = self.user._id_to_dn(user_id)
role_dn = self._subrole_id_to_dn(role_id, tenant_id)
self.role.add_user(role_id, role_dn, user_dn, user_id, tenant_id)
tenant_dn = self.project._id_to_dn(tenant_id)
return UserRoleAssociation(role_dn=role_dn,
user_dn=user_dn,
tenant_dn=tenant_dn)
def _add_role_to_group_and_project(self, group_id, tenant_id, role_id):
group_dn = self.group._id_to_dn(group_id)
role_dn = self._subrole_id_to_dn(role_id, tenant_id)
self.role.add_user(role_id, role_dn, group_dn, group_id, tenant_id)
tenant_dn = self.project._id_to_dn(tenant_id)
return GroupRoleAssociation(group_dn=group_dn,
role_dn=role_dn,
tenant_dn=tenant_dn)
def delete_project(self, tenant_id):
self.project.check_allow_delete()
if self.project.subtree_delete_enabled:
self.project.deleteTree(tenant_id)
else:
# The manager layer will call assignments to delete the
# role assignments, so we just have to delete the project itself.
self.project.delete(tenant_id)
def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
role_dn = self._subrole_id_to_dn(role_id, tenant_id)
return self.role.delete_user(role_dn,
self.user._id_to_dn(user_id), role_id)
def _remove_role_from_group_and_project(self, group_id, tenant_id,
role_id):
role_dn = self._subrole_id_to_dn(role_id, tenant_id)
return self.role.delete_user(role_dn,
self.group._id_to_dn(group_id), role_id)
def create_domain(self, domain_id, domain):
if domain_id == CONF.identity.default_domain_id:
msg = _('Duplicate ID, %s.') % domain_id
raise exception.Conflict(type='domain', details=msg)
raise exception.Forbidden(_('Domains are read-only against LDAP'))
def get_domain(self, domain_id):
self._validate_default_domain_id(domain_id)
return assignment.calc_default_domain()
def update_domain(self, domain_id, domain):
self._validate_default_domain_id(domain_id)
raise exception.Forbidden(_('Domains are read-only against LDAP'))
def delete_domain(self, domain_id):
self._validate_default_domain_id(domain_id)
raise exception.Forbidden(_('Domains are read-only against LDAP'))
def list_domains(self, hints):
return [assignment.calc_default_domain()]
def list_domains_from_ids(self, ids):
return [assignment.calc_default_domain()]
# Bulk actions on User From identity
def delete_user(self, user_id):
user_dn = self.user._id_to_dn(user_id)
for ref in self.role.list_global_roles_for_user(user_dn):
self.role.delete_user(ref.role_dn, ref.user_dn,
self.role._dn_to_id(ref.role_dn))
for ref in self.role.list_project_roles_for_user(user_dn,
self.project.tree_dn):
self.role.delete_user(ref.role_dn, ref.user_dn,
self.role._dn_to_id(ref.role_dn))
def delete_group(self, group_id):
"""Called when the group was deleted.
Any role assignments for the group should be cleaned up.
"""
group_dn = self.group._id_to_dn(group_id)
group_role_assignments = self.role.list_project_roles_for_group(
group_dn, self.project.tree_dn)
for ref in group_role_assignments:
self.role.delete_user(ref.role_dn, ref.group_dn,
self.role._dn_to_id(ref.role_dn))
def create_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
try:
metadata_ref = self._get_metadata(user_id, project_id,
domain_id, group_id)
except exception.MetadataNotFound:
metadata_ref = {}
if user_id is None:
metadata_ref['roles'] = self._add_role_to_group_and_project(
group_id, project_id, role_id)
else:
metadata_ref['roles'] = self.add_role_to_user_and_project(
user_id, project_id, role_id)
def check_grant_role_id(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
try:
metadata_ref = self._get_metadata(user_id, project_id,
domain_id, group_id)
except exception.MetadataNotFound:
metadata_ref = {}
role_ids = set(self._roles_from_role_dicts(
metadata_ref.get('roles', []), inherited_to_projects))
if role_id not in role_ids:
raise exception.RoleNotFound(role_id=role_id)
def delete_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
try:
metadata_ref = self._get_metadata(user_id, project_id,
domain_id, group_id)
except exception.MetadataNotFound:
metadata_ref = {}
try:
if user_id is None:
metadata_ref['roles'] = (
self._remove_role_from_group_and_project(
group_id, project_id, role_id))
else:
metadata_ref['roles'] = self.remove_role_from_user_and_project(
user_id, project_id, role_id)
except KeyError:
raise exception.RoleNotFound(role_id=role_id)
def list_grant_role_ids(self, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
try:
metadata_ref = self._get_metadata(user_id, project_id,
domain_id, group_id)
except exception.MetadataNotFound:
metadata_ref = {}
return self._roles_from_role_dicts(metadata_ref.get('roles', []),
inherited_to_projects)
def get_domain_by_name(self, domain_name):
default_domain = assignment.calc_default_domain()
if domain_name != default_domain['name']:
raise exception.DomainNotFound(domain_id=domain_name)
return default_domain
def list_role_assignments(self):
role_assignments = []
for a in self.role.list_role_assignments(self.project.tree_dn):
if isinstance(a, UserRoleAssociation):
assignment = {
'role_id': self.role._dn_to_id(a.role_dn),
'user_id': self.user._dn_to_id(a.user_dn),
'project_id': self.project._dn_to_id(a.project_dn)}
else:
assignment = {
'role_id': self.role._dn_to_id(a.role_dn),
'group_id': self.group._dn_to_id(a.group_dn),
'project_id': self.project._dn_to_id(a.project_dn)}
role_assignments.append(assignment)
return role_assignments
def delete_project_assignments(self, project_id):
tenant_dn = self.project._id_to_dn(project_id)
self.role.roles_delete_subtree_by_project(tenant_dn)
def delete_role_assignments(self, role_id):
self.role.roles_delete_subtree_by_role(role_id, self.project.tree_dn)
# TODO(termie): turn this into a data object and move logic to driver
class ProjectApi(common_ldap.EnabledEmuMixIn, common_ldap.BaseLdap):
DEFAULT_OU = 'ou=Groups'
DEFAULT_STRUCTURAL_CLASSES = []
DEFAULT_OBJECTCLASS = 'groupOfNames'
DEFAULT_ID_ATTR = 'cn'
DEFAULT_MEMBER_ATTRIBUTE = 'member'
NotFound = exception.ProjectNotFound
notfound_arg = 'project_id' # NOTE(yorik-sar): while options_name = tenant
options_name = 'project'
attribute_options_names = {'name': 'name',
'description': 'desc',
'enabled': 'enabled',
'domain_id': 'domain_id'}
immutable_attrs = ['name']
model = models.Project
def __init__(self, conf):
super(ProjectApi, self).__init__(conf)
self.member_attribute = (getattr(conf.ldap, 'project_member_attribute')
or self.DEFAULT_MEMBER_ATTRIBUTE)
def create(self, values):
data = values.copy()
if data.get('id') is None:
data['id'] = uuid.uuid4().hex
return super(ProjectApi, self).create(data)
def get_user_projects(self, user_dn, associations):
"""Returns list of tenants a user has access to
"""
project_ids = set()
for assoc in associations:
project_ids.add(self._dn_to_id(assoc.project_dn))
projects = []
for project_id in project_ids:
# slower to get them one at a time, but a huge list could blow out
# the connection. This is the safer way
projects.append(self.get(project_id))
return projects
def get_user_dns(self, tenant_id, rolegrants, role_dn=None):
tenant = self._ldap_get(tenant_id)
res = set()
if not role_dn:
# Get users who have default tenant mapping
for user_dn in tenant[1].get(self.member_attribute, []):
if self._is_dumb_member(user_dn):
continue
res.add(user_dn)
# Get users who are explicitly mapped via a tenant
for rolegrant in rolegrants:
if role_dn is None or rolegrant.role_dn == role_dn:
res.add(rolegrant.user_dn)
return list(res)
def update(self, project_id, values):
old_obj = self.get(project_id)
return super(ProjectApi, self).update(project_id, values, old_obj)
class UserRoleAssociation(object):
"""Role Grant model."""
def __init__(self, user_dn=None, role_dn=None, tenant_dn=None,
*args, **kw):
self.user_dn = user_dn
self.role_dn = role_dn
self.project_dn = tenant_dn
class GroupRoleAssociation(object):
"""Role Grant model."""
def __init__(self, group_dn=None, role_dn=None, tenant_dn=None,
*args, **kw):
self.group_dn = group_dn
self.role_dn = role_dn
self.project_dn = tenant_dn
# TODO(termie): turn this into a data object and move logic to driver
# NOTE(heny-nash): The RoleLdapStructureMixin class enables the sharing of the
# LDAP structure between here and the role backend LDAP, no methods are shared.
class RoleApi(ldap_role.RoleLdapStructureMixin, common_ldap.BaseLdap):
def __init__(self, conf, user_api):
super(RoleApi, self).__init__(conf)
self.member_attribute = (getattr(conf.ldap, 'role_member_attribute')
or self.DEFAULT_MEMBER_ATTRIBUTE)
self._user_api = user_api
def add_user(self, role_id, role_dn, user_dn, user_id, tenant_id=None):
try:
super(RoleApi, self).add_member(user_dn, role_dn)
except exception.Conflict:
msg = (_('User %(user_id)s already has role %(role_id)s in '
'tenant %(tenant_id)s') %
dict(user_id=user_id, role_id=role_id, tenant_id=tenant_id))
raise exception.Conflict(type='role grant', details=msg)
except self.NotFound:
if tenant_id is None or self.get(role_id) is None:
raise Exception(_("Role %s not found") % (role_id,))
attrs = [('objectClass', [self.object_class]),
(self.member_attribute, [user_dn]),
(self.id_attr, [role_id])]
if self.use_dumb_member:
attrs[1][1].append(self.dumb_member)
with self.get_connection() as conn:
conn.add_s(role_dn, attrs)
def delete_user(self, role_dn, user_dn, role_id):
try:
super(RoleApi, self).remove_member(user_dn, role_dn)
except (self.NotFound, ldap.NO_SUCH_ATTRIBUTE):
raise exception.RoleNotFound(message=_(
'Cannot remove role that has not been granted, %s') %
role_id)
def get_role_assignments(self, tenant_dn):
try:
roles = self._ldap_get_list(tenant_dn, ldap.SCOPE_ONELEVEL,
attrlist=[self.member_attribute])
except ldap.NO_SUCH_OBJECT:
roles = []
res = []
for role_dn, attrs in roles:
try:
user_dns = attrs[self.member_attribute]
except KeyError:
continue
for user_dn in user_dns:
if self._is_dumb_member(user_dn):
continue
res.append(UserRoleAssociation(
user_dn=user_dn,
role_dn=role_dn,
tenant_dn=tenant_dn))
return res
def list_global_roles_for_user(self, user_dn):
user_dn_esc = ldap.filter.escape_filter_chars(user_dn)
roles = self.get_all('(%s=%s)' % (self.member_attribute, user_dn_esc))
return [UserRoleAssociation(
role_dn=role.dn,
user_dn=user_dn) for role in roles]
def list_project_roles_for_user(self, user_dn, project_subtree):
try:
roles = self._ldap_get_list(project_subtree, ldap.SCOPE_SUBTREE,
query_params={
self.member_attribute: user_dn},
attrlist=common_ldap.DN_ONLY)
except ldap.NO_SUCH_OBJECT:
roles = []
res = []
for role_dn, _role_attrs in roles:
# ldap.dn.dn2str returns an array, where the first
# element is the first segment.
# For a role assignment, this contains the role ID,
# The remainder is the DN of the tenant.
# role_dn is already utf8 encoded since it came from LDAP.
tenant = ldap.dn.str2dn(role_dn)
tenant.pop(0)
tenant_dn = ldap.dn.dn2str(tenant)
res.append(UserRoleAssociation(
user_dn=user_dn,
role_dn=role_dn,
tenant_dn=tenant_dn))
return res
def list_project_roles_for_group(self, group_dn, project_subtree):
group_dn_esc = ldap.filter.escape_filter_chars(group_dn)
query = '(&(objectClass=%s)(%s=%s))' % (self.object_class,
self.member_attribute,
group_dn_esc)
with self.get_connection() as conn:
try:
roles = conn.search_s(project_subtree,
ldap.SCOPE_SUBTREE,
query,
attrlist=common_ldap.DN_ONLY)
except ldap.NO_SUCH_OBJECT:
# Return no roles rather than raise an exception if the project
# subtree entry doesn't exist because an empty subtree is not
# an error.
return []
res = []
for role_dn, _role_attrs in roles:
# ldap.dn.str2dn returns a list, where the first
# element is the first RDN.
# For a role assignment, this contains the role ID,
# the remainder is the DN of the project.
# role_dn is already utf8 encoded since it came from LDAP.
project = ldap.dn.str2dn(role_dn)
project.pop(0)
project_dn = ldap.dn.dn2str(project)
res.append(GroupRoleAssociation(
group_dn=group_dn,
role_dn=role_dn,
tenant_dn=project_dn))
return res
def roles_delete_subtree_by_project(self, tenant_dn):
self._delete_tree_nodes(tenant_dn, ldap.SCOPE_ONELEVEL)
def roles_delete_subtree_by_role(self, role_id, tree_dn):
self._delete_tree_nodes(tree_dn, ldap.SCOPE_SUBTREE, query_params={
self.id_attr: role_id})
def list_role_assignments(self, project_tree_dn):
"""Returns a list of all the role assignments linked to project_tree_dn
attribute.
"""
try:
roles = self._ldap_get_list(project_tree_dn, ldap.SCOPE_SUBTREE,
attrlist=[self.member_attribute])
except ldap.NO_SUCH_OBJECT:
roles = []
res = []
for role_dn, role in roles:
# role_dn is already utf8 encoded since it came from LDAP.
tenant = ldap.dn.str2dn(role_dn)
tenant.pop(0)
# It obtains the tenant DN to construct the UserRoleAssociation
# object.
tenant_dn = ldap.dn.dn2str(tenant)
for occupant_dn in role[self.member_attribute]:
if self._is_dumb_member(occupant_dn):
continue
if self._user_api.is_user(occupant_dn):
association = UserRoleAssociation(
user_dn=occupant_dn,
role_dn=role_dn,
tenant_dn=tenant_dn)
else:
# occupant_dn is a group.
association = GroupRoleAssociation(
group_dn=occupant_dn,
role_dn=role_dn,
tenant_dn=tenant_dn)
res.append(association)
return res
|
|
# GP4_Table.py : P4 Table Object
#
## @package GP4
#
# This is a table Instance (all Tables are unique)
from GP4_Utilities import *
#from GP4_Parser_Function_Code import *
from GP4_AST_object import AST_object
import GP4_Exceptions
import sys
class Table(AST_object):
## Construct new Table object.
# @param self : object
# @param string : String. Source text
# @param loc : Integer. location in text of this object
# @param name : String. Name of the Table
# @param field_matches : List of Pyparsing field_match objects
# @param action_next_table : List of Pyparsing action_next_table objects
# @param min_size : Integer.
# @param max_size : Integer.
# @returns self
def __init__(self, string, loc, name, field_matches=[], actions={},
min_size=1, max_size=256 ):
super(Table, self).__init__(string, loc, 'table')
self.name = name
self.field_matches = field_matches
self.action_next_table = actions # dict maps action name to next_table name
# (or '' if no next_table)
self.min_size = min_size
self.max_size = max_size
# self.size = 0 # Actual size. Not same as number of entries.
self.entries = [] # List of Entry objects
self.num_entries = 0 # Actual number of entries installed.
self.match_key_fun = None # function to construct match key List for this Table
self.default_action = [ 'no_action' ] # [ action_name, [params*] ] Used if no match. Set at run time.
## Set default action
# @param self : table object
# @param action : Pyparsing param list
# @return Bool: Success or Fail
def set_default_action(self, *action):
print "Table:",self.name,"setting default action to:", action[0]
self.default_action = action[0]
return True
## Check self-consistency where possible. More checking is done at run-time.
# @param self : table object
# @param p4 : p4 object
# @return None. Raises runtime error if there is a problem.
def check_self_consistent(self, p4):
for action in self.action_next_table:
if not p4.get_action_by_name(action):
raise GP4_Exceptions.RuntimeError, 'Table "%s" specifies undefined action "%s"' % \
(self.name, action)
nxt_table = self.action_next_table[action] # '' if no next table
if nxt_table != '':
if nxt_table == self.name: # recursion!
raise GP4_Exceptions.RuntimeError, \
'Table "%s" action "%s" specifies self as next_table: recursion is not allowed.' % \
(self.name, action)
if not p4.get_table(nxt_table):
raise GP4_Exceptions.RuntimeError, \
'Table "%s" action "%s" specifies undefined next_table "%s"' % \
(self.name, action, nxt_table)
## Apply this table with a P4 argument as context.
# @param self : table object
# @param p4 : p4 object
# @returns None
def apply( self, p4 ):
print "Applying table", str(self)
idx = -1
if self.num_entries:
match_keys = self.create_match_keys(p4)
if match_keys:
for mk in match_keys: print "match_key=",str(mk)
# get table entry index of first match
idx = self.lookup_match_keys(match_keys)
if idx != -1:
print "Table",self.name,"matched on idx",idx
action_args = self.entries[idx].get_action()
#print "action args:", action_args
if idx == -1: # no match
# choose default action
action_args = self.default_action
#print "No matches. using default action",action_args
if not action_args:
raise GP4_Exceptions.RuntimeError, "Table '%s' has no default action." % self.name
action_name = action_args[0]
action = p4.get_action_by_name(action_name)
if not action:
raise GP4_Exceptions.RuntimeError, "Unknown action '%s'" % action_name
action.execute(p4, *action_args[1:] )
# execute does a non-blocking update to fields so that the effects occur in
# parallel. Now we need to update the real values ready for the next table.
p4.update_modified_fields()
# If we have a next_table defined for this action then we need to execute it.
next_table_name = self.action_next_table.get(action_name)
if next_table_name :
tbl = p4.get_table(next_table_name)
if not tbl:
raise GP4_Exceptions.RuntimeError, "Unknown Table '%s'" % next_table_name
tbl.apply(p4)
## Construct the match key from current header instances.
# @param self : table object
# @param p4 : p4 object
# @returns list of Match_Key objects
def create_match_keys( self, p4 ):
if not self.match_key_fun:
self.match_key_fun = self.compile_match_key_fun(p4)
return self.match_key_fun(p4)
## Compile the self.match_key_fun function
# @param self : table object
# @param p4 : p4 object
# @returns function f(p4): return [ Match_Key ]
def compile_match_key_fun( self, p4 ):
""" The match_key_fun should return a list of Match_Keys based on the
current header fields. An undefined field returns a Match_Key with
a length of zero. i.e.:
match_key_fun(p4) : return [ Match_Key ]
"""
print "compile_match_key_fun"
# If nothing specified in the "reads" expression then return None
if not len( self.field_matches ):
return lambda p4: None
codeL = [ 'def f(p4):', ' match_keyL = []' ]
for fm in self.field_matches:
print ' ',fm
assert len(fm)==2 # 0=[hdr,field,mask] 1=type (lpm, exact, valid, etc)
mask = 0 if len(fm[0])==1 else fm[0][1]
field_ref = fm[0][0]
fm_type = fm[1]
hdr_name, hdr_index, field_name = get_hdr_hdr_index_field_name_from_field_ref(field_ref)
print "hdr:",hdr_name,"hdr_index:",hdr_index,"field:",field_name,"mask:",mask
# if fm_type is valid then we need to construct Match_key using header.
if fm_type == 'valid':
codeL.append( ' hdr_valid = p4.check_hdr_inst_is_valid("%s","%s")' % ( hdr_name, hdr_index ))
codeL.append( ' if hdr_valid: match_key = Match_Key(value = 1, length = 1, valid = True)' )
codeL.append( ' else: match_key = Match_Key(value = 0, length = 1, valid = True)' )
else: # not a 'valid' type. (must be lpm, exact, etc)
codeL.append( ' field = p4.get_field("%s","%s","%s")' % ( hdr_name, hdr_index, field_name ))
codeL.append( ' match_key = field.make_Match_Key("%s") if field else Match_Key()' % fm_type)
if mask:
codeL.append( ' if match_key.valid : match_key.value &= %s' % mask)
codeL.append( ' match_keyL.append(match_key)')
codeL.append(' return match_keyL' )
for l in codeL: print l
code = '\n'.join(codeL)
try:
exec code in globals(), locals()
except Exception as ex_err:
print "Error: generated code for python function yielded exception:",ex_err.data
print "code was <\n",code,"\n>\n"
raise GP4_Exceptions.RuntimeError, ex_err.data
return f
## Find an entry that matches the match_keys list. Return index or -1 if no match
# @param self : table object
# @param match_keys : [ Match_Key ]
# @returns Integer: index in self.entries or -1 if no match
def lookup_match_keys(self, match_keys):
print "lookup_match_keys(",
for mk in match_keys: print mk,
print ")"
#fixme - needs to much more efficient
for (ix,entry) in enumerate(self.entries):
if entry.matches(match_keys):
return ix
return -1
## Check action statement is legal for this table
# @param self : table object
# @param action_stmt : pyparse action_stmt
# @returns Bool: success or Failure
def check_action(self, action_stmt):
print "check_action:", action_stmt
return action_stmt[0] in self.action_next_table.keys()
## Runtime command to add an entry to a table
# @param self : table object
# @param pos : position in table. None = anywhere.
# @param args : Tuple ( entry_list, action_stmt )
# @returns (Bool: success or Failure, Integer: index where entry was added.)
def add_entry( self, pos, *args ):
print "add_entry: pos=",pos,"args=",args
assert len(args)==2
entry_list = args[0] # [ num | (num,num) ]
action_stmt = args[1]
#fixme - check entry not already in table.
# create a new Entry with EntryVals=entry_list and action=action_stmt
entry = Entry()
entry.set_matchList(entry.make_match_vals_from_pyparse(entry_list))
if not self.check_action(action_stmt):
raise GP4_Exceptions.RuntimeError, "Table '%s' cannot do action %s." % (self.name, action_stmt)
entry.set_action(action_stmt)
idx = -1
if pos == 'any':
idx = len(self.entries)
if not self.insert_entry_at(idx, entry) : return (False, 0)
else:
idx = get_integer(pos)
if not self.insert_entry_at(idx, entry) : return (False, idx)
if idx < len(self.entries): # ok
self.entries[idx] = entry
else:
if idx >= self.max_size:
raise GP4_Exceptions.RuntimeError, "Add_entry: entry %s is beyond max size for table %s." % (pos, self.name)
else:
while idx >= len(self.entries): self.entries.append(Entry())
self.entries.append(entry)
print "add_entry: added to idx",idx," Table=",self
return (True, idx)
## Insert the given Entry at the specified location in self.entries
# @param self : table object
# @param idx : position in self.entries array
# @param entry : Entry
# @returns Bool: success or Failure
def insert_entry_at( self, idx, entry ):
if self.num_entries >= self.max_size:
print "Table",self,"full"
return (False) # full - cant add more.
if idx >= self.max_size:
print "Error: trying to add entry beyond '%s' table's max size of %d." % \
(self.name, self.max_size)
return False
if idx < len(self.entries): # ok
self.entries[idx] = entry
else:
while idx >= len(self.entries): self.entries.append(Entry())
self.entries.append(entry)
self.num_entries += 1
return True
def __str__(self):
s = self.name + '() [min=%d max=%d num=%d]\n' % (self.min_size, self.max_size, self.num_entries)
if len(self.field_matches):
s+=' Field matches:'
for el in self.field_matches:
s += str(el) + ';'
s+='\n'
if len(self.action_next_table):
s+=' Actions:\n'
for el in self.action_next_table:
s += " Action '%s' => next table '%s';\n" % (el, self.action_next_table[el])
s+='\n'
return s
|
|
# Copyright 2014 Alcatel-Lucent USA Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.orm import exc as sql_exc
from neutron.db import common_db_mixin
from neutron.db.models import allowed_address_pair as addr_pair_models
from neutron.db.models import external_net as external_net_db
from neutron.db.models import l3 as l3_db
from neutron.db.models import securitygroup as securitygroups_db
from neutron.db import models_v2
from neutron.plugins.ml2 import models as ml2_models
from neutron_lib import constants as os_constants
from nuage_neutron.plugins.common import exceptions
from nuage_neutron.plugins.common import nuage_models
from nuage_neutron.plugins.common import utils
def add_net_partition(session, netpart_id,
l3dom_id, l2dom_id,
ent_name, l3isolated,
l3shared):
net_partitioninst = nuage_models.NetPartition(id=netpart_id,
name=ent_name,
l3dom_tmplt_id=l3dom_id,
l2dom_tmplt_id=l2dom_id,
isolated_zone=l3isolated,
shared_zone=l3shared)
session.add(net_partitioninst)
return net_partitioninst
def delete_net_partition(session, net_partition):
session.delete(net_partition)
def delete_net_partition_by_id(session, netpart_id):
query = session.query(nuage_models.NetPartition)
query.filter_by(id=netpart_id).delete()
def get_net_partition_by_name(session, name):
query = session.query(nuage_models.NetPartition)
return query.filter_by(name=name).first()
def get_net_partition_by_id(session, id):
query = session.query(nuage_models.NetPartition)
return query.filter_by(id=id).first()
def get_net_partitions(session, filters=None, fields=None):
query = session.query(nuage_models.NetPartition)
common_db = common_db_mixin.CommonDbMixin()
query = common_db._apply_filters_to_query(query,
nuage_models.NetPartition,
filters)
return query
def get_net_partition_ids(session):
query = session.query(nuage_models.NetPartition.id)
return [netpart[0] for netpart in query]
def get_net_partition_with_lock(session, netpart_id):
query = session.query(nuage_models.NetPartition)
netpart_db = query.filter_by(id=netpart_id).with_lockmode('update').one()
return netpart_db
def get_subnet_with_lock(session, sub_id):
query = session.query(models_v2.Subnet)
subnet_db = query.filter_by(id=sub_id).with_lockmode('update').one()
return subnet_db
def get_router_with_lock(session, router_id):
query = session.query(l3_db.Router)
router_db = query.filter_by(id=router_id).with_lockmode('update').one()
return router_db
def get_secgrp_with_lock(session, secgrp_id):
query = session.query(securitygroups_db.SecurityGroup)
secgrp_db = query.filter_by(id=secgrp_id).with_lockmode('update').one()
return secgrp_db
def get_secgrprule_ids(session):
query = session.query(securitygroups_db.SecurityGroupRule.id)
return [secgrprule[0] for secgrprule in query]
def get_secgrprule_with_lock(session, secgrprule_id):
query = session.query(securitygroups_db.SecurityGroupRule)
secgrprule_db = (query.filter_by(id=secgrprule_id).with_lockmode(
'update').one())
return secgrprule_db
def get_port_with_lock(session, port_id):
query = session.query(models_v2.Port)
port_db = query.filter_by(id=port_id).with_lockmode('update').one()
return port_db
def get_dhcp_port_with_lock(session, net_id):
query = session.query(models_v2.Port)
port_db = query.filter_by(network_id=net_id).filter_by(
device_owner=os_constants.DEVICE_OWNER_DHCP).with_lockmode(
'update').first()
return port_db
def get_fip_with_lock(session, fip_id):
query = session.query(l3_db.FloatingIP)
fip_db = query.filter_by(id=fip_id).with_lockmode('update').one()
return fip_db
def get_fip_by_floating_port_id(session, fixed_port_id):
query = session.query(l3_db.FloatingIP)
return query.filter_by(fixed_port_id=fixed_port_id).first()
def add_entrouter_mapping(session, np_id,
router_id,
n_l3id, rt, rd):
ent_rtr_mapping = nuage_models.NetPartitionRouter(net_partition_id=np_id,
router_id=router_id,
nuage_router_id=n_l3id,
nuage_rtr_rt=rt,
nuage_rtr_rd=rd)
session.add(ent_rtr_mapping)
def update_entrouter_mapping(ent_rtr_mapping, new_dict):
ent_rtr_mapping.update(new_dict)
def add_subnetl2dom_mapping(session, neutron_subnet_id,
nuage_sub_id,
np_id,
ip_version,
l2dom_id=None,
nuage_user_id=None,
nuage_group_id=None,
managed=False):
subnet_l2dom = nuage_models.SubnetL2Domain(subnet_id=neutron_subnet_id,
nuage_subnet_id=nuage_sub_id,
net_partition_id=np_id,
nuage_l2dom_tmplt_id=l2dom_id,
nuage_user_id=nuage_user_id,
nuage_group_id=nuage_group_id,
nuage_managed_subnet=managed,
ip_version=ip_version)
session.add(subnet_l2dom)
return subnet_l2dom
def update_netpartition(net_partition_db, new_values):
net_partition_db.update(new_values)
def update_subnetl2dom_mapping(subnet_l2dom,
new_dict):
subnet_l2dom.update(new_dict)
def get_update_subnetl2dom_mapping(session, new_dict):
subnet_l2dom = get_subnet_l2dom_with_lock(session, new_dict['subnet_id'])
subnet_l2dom.update(new_dict)
def update_entrtr_mapping(ent_rtr, new_dict):
ent_rtr.update(new_dict)
def get_update_entrtr_mapping(session, new_dict):
ent_rtr = get_ent_rtr_mapping_with_lock(session, new_dict['router_id'])
ent_rtr.update(new_dict)
def delete_subnetl2dom_mapping(session, subnet_l2dom):
session.delete(subnet_l2dom)
def get_subnet_l2dom_by_id(session, id):
query = session.query(nuage_models.SubnetL2Domain)
return query.filter_by(subnet_id=id).first()
def get_subnet_l2doms_by_subnet_ids(session, subnet_ids):
return (
session.query(nuage_models.SubnetL2Domain)
.filter(
nuage_models.SubnetL2Domain.subnet_id.in_(subnet_ids)
)).all()
def get_subnet_l2dom_by_port_id(session, port_id):
query = (session.query(nuage_models.SubnetL2Domain)
.join(models_v2.Subnet)
.join(models_v2.IPAllocation)
.filter(models_v2.IPAllocation.port_id == port_id))
try:
return query.one()
except sql_exc.NoResultFound:
raise exceptions.SubnetMappingNotFound(resource='port', id=port_id)
except sql_exc.MultipleResultsFound:
return query.first()
def get_subnet_l2dom_by_network_id(session, network_id):
return (
session.query(nuage_models.SubnetL2Domain)
.join(models_v2.Subnet)
.filter(
models_v2.Subnet.network_id == network_id)
).all()
def get_nuage_subnet_info(session, subnet, fields):
if not fields or not \
any(x in fields for x in
['vsd_managed', 'vsd_id', 'nuage_net_partition_id']):
return subnet
result = (
session.query(nuage_models.SubnetL2Domain)
.filter(nuage_models.SubnetL2Domain.subnet_id == subnet['id']).first())
subnet['vsd_managed'] = result.nuage_managed_subnet if result else False
subnet['vsd_id'] = result.nuage_subnet_id if result else None
subnet['nuage_net_partition_id'] = (result.net_partition_id
if result else None)
return subnet
def get_nuage_subnets_info(session, subnets, fields, filters):
ids = [subnet['id'] for subnet in subnets]
query = session \
.query(nuage_models.SubnetL2Domain) \
.filter(nuage_models.SubnetL2Domain.subnet_id.in_(ids))
result = query.all()
subnet_id_mapping = dict([(mapping.subnet_id, mapping)
for mapping in result])
filtered = []
for subnet in subnets:
mapping = subnet_id_mapping.get(subnet['id'])
subnet['vsd_managed'] = (mapping.nuage_managed_subnet
if mapping else False)
subnet['vsd_id'] = mapping.nuage_subnet_id if mapping else None
subnet['nuage_net_partition_id'] = (mapping.net_partition_id
if mapping else None)
add = True
if filters:
if 'vsd_managed' in filters.keys():
add = (str(subnet['vsd_managed']).lower() ==
str(filters['vsd_managed'][0]).lower())
if 'vsd_id' in filters.keys():
add = str(subnet['vsd_id']) == str(filters['vsd_id'][0])
if 'nuage_net_partition_id' in filters.keys():
add = (str(subnet['nuage_net_partition_id']) ==
str(filters['nuage_net_partition_id'][0]))
if add:
filtered.append(subnet)
for subnet in filtered:
for field in ['vsd_managed', 'vsd_id', 'nuage_net_partition_id']:
if fields and field not in fields:
del subnet[field]
return filtered
def get_floatingip_per_vip_in_network(session, network_id):
result = (
session.query(l3_db.FloatingIP, models_v2.Port)
.join(
(models_v2.Port,
l3_db.FloatingIP.fixed_port_id == models_v2.Port.id))
.filter(
models_v2.Port.network_id == network_id,
models_v2.Port.device_owner.in_(utils.get_device_owners_vip())
)
).all()
fips_per_vip = {}
for row in result:
fip = row[0]
vip_port = row[1]
for fixed_ip in vip_port.fixed_ips:
fips_per_vip[fixed_ip.ip_address] = fip
return fips_per_vip
def get_subnet_l2dom_by_nuage_id(session, id):
query = session.query(nuage_models.SubnetL2Domain)
return query.filter_by(nuage_subnet_id=str(id)).first()
def get_subnet_l2dom_by_nuage_id_and_ipversion(session, id, ipversion):
query = session.query(nuage_models.SubnetL2Domain)
return query.filter_by(nuage_subnet_id=str(id)).filter_by(
ip_version=str(ipversion)).first()
def get_subnet_l2dom_with_lock(session, id):
query = session.query(nuage_models.SubnetL2Domain)
subl2dom = query.filter_by(subnet_id=id).with_lockmode('update').one()
return subl2dom
def get_ent_rtr_mapping_by_entid(session, entid):
query = session.query(nuage_models.NetPartitionRouter)
return query.filter_by(net_partition_id=entid).all()
def get_ent_l2dom_mapping_by_entid(session, entid):
query = session.query(nuage_models.SubnetL2Domain)
return query.filter_by(net_partition_id=entid).all()
def get_ent_rtr_mapping_by_rtrid(session, rtrid):
query = session.query(nuage_models.NetPartitionRouter)
return query.filter_by(router_id=rtrid).first()
def get_ent_rtr_mapping_by_rtrids(session, rtrids):
if not rtrids:
return []
return (
session.query(nuage_models.NetPartitionRouter)
.filter(
nuage_models.NetPartitionRouter.router_id.in_(rtrids)
)
).all()
def add_network_binding(session, network_id, network_type, physical_network,
vlan_id):
binding = nuage_models.ProviderNetBinding(
network_id=network_id, network_type=network_type,
physical_network=physical_network, vlan_id=vlan_id)
session.add(binding)
return binding
def get_network_binding(session, network_id):
return (session.query(nuage_models.ProviderNetBinding).
filter_by(network_id=network_id).
first())
def get_network_binding_with_lock(session, network_id):
return (session.query(nuage_models.ProviderNetBinding).
filter_by(network_id=network_id).with_lockmode('update').first())
def get_ent_rtr_mapping_with_lock(session, rtrid):
query = session.query(nuage_models.NetPartitionRouter)
entrtr = query.filter_by(router_id=rtrid).with_lockmode('update').one()
return entrtr
def get_ipalloc_for_fip(session, network_id, ip, lock=False):
query = session.query(models_v2.IPAllocation)
if lock:
# Lock is required when the resource is synced
ipalloc_db = (query.filter_by(network_id=network_id).filter_by(
ip_address=ip).with_lockmode('update').one())
else:
ipalloc_db = (query.filter_by(network_id=network_id).filter_by(
ip_address=ip).one())
return make_ipalloc_dict(ipalloc_db)
def get_all_net_partitions(session):
net_partitions = get_net_partitions(session)
return make_net_partition_list(net_partitions)
def get_default_net_partition(context, def_net_part):
net_partition = get_net_partition_by_name(context.session,
def_net_part)
return net_partition
def get_all_routes(session):
routes = session.query(l3_db.RouterRoute)
return make_route_list(routes)
def get_ext_network_ids(session):
query = session.query(external_net_db.ExternalNetwork.network_id)
return [net[0] for net in query]
def get_route_with_lock(session, dest, nhop):
query = session.query(l3_db.RouterRoute)
route_db = (query.filter_by(destination=dest).filter_by(nexthop=nhop)
.with_lockmode('update').one())
return make_route_dict(route_db)
def get_all_provider_nets(session):
provider_nets = session.query(nuage_models.ProviderNetBinding)
return make_provider_net_list(provider_nets)
def make_provider_net_list(provider_nets):
return [make_provider_net_dict(pnet) for pnet in provider_nets]
def make_provider_net_dict(provider_net):
return {'network_id': provider_net['network_id'],
'network_type': provider_net['network_type'],
'physical_network': provider_net['physical_network'],
'vlan_id': provider_net['vlan_id']}
def make_ipalloc_dict(subnet_db):
return {'port_id': subnet_db['port_id'],
'subnet_id': subnet_db['subnet_id'],
'network_id': subnet_db['network_id'],
'ip_address': subnet_db['ip_address']}
def make_net_partition_dict(net_partition):
return {'id': net_partition['id'],
'name': net_partition['name'],
'l3dom_tmplt_id': net_partition['l3dom_tmplt_id'],
'l2dom_tmplt_id': net_partition['l2dom_tmplt_id']}
def make_net_partition_list(net_partitions):
return [make_net_partition_dict(net_partition) for net_partition in
net_partitions]
def make_route_dict(route):
return {'destination': route['destination'],
'nexthop': route['nexthop'],
'router_id': route['router_id']}
def make_route_list(routes):
return [make_route_dict(route) for route in routes]
def make_subnl2dom_dict(subl2dom):
return {'subnet_id': subl2dom['subnet_id'],
'net_partition_id': subl2dom['net_partition_id'],
'nuage_subnet_id': subl2dom['nuage_subnet_id'],
'nuage_l2dom_tmplt_id': subl2dom['nuage_l2dom_tmplt_id'],
'nuage_user_id': subl2dom['nuage_user_id'],
'nuage_group_id': subl2dom['nuage_group_id']}
def make_entrtr_dict(entrtr):
return {'net_partition_id': entrtr['net_partition_id'],
'router_id': entrtr['router_id'],
'nuage_router_id': entrtr['nuage_router_id']}
def count_allowedaddresspairs_for_subnet(session, subnet_id):
return (
session.query(addr_pair_models.AllowedAddressPair)
.join(models_v2.Port)
.join(models_v2.Network)
.join(models_v2.Subnet)
.filter(
models_v2.Subnet.id == subnet_id
)).count()
def get_port_bindings(session, port_ids):
return (
session.query(ml2_models.PortBinding)
.filter(ml2_models.PortBinding.port_id.in_(port_ids))
).all()
|
|
# Copyright 2021 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Build/install the iree-compiler-backend python package.
# Note that this includes a relatively large build of LLVM (~2400 C++ files)
# and can take a considerable amount of time, especially with defaults.
# To install:
# pip install . --use-feature=in-tree-build
# To build a wheel:
# pip wheel . --use-feature=in-tree-build
#
# It is recommended to build with Ninja and ccache. To do so, set environment
# variables by prefixing to above invocations:
# CMAKE_C_COMPILER_LAUNCHER=ccache CMAKE_CXX_COMPILER_LAUNCHER=ccache
#
# On CIs, it is often advantageous to re-use/control the CMake build directory.
# This can be set with the IREE_COMPILER_API_CMAKE_BUILD_DIR env var.
import json
import os
import platform
import re
import shutil
import subprocess
import sys
import sysconfig
from distutils.command.build import build as _build
from setuptools import find_namespace_packages, setup, Extension
from setuptools.command.build_ext import build_ext as _build_ext
from setuptools.command.build_py import build_py as _build_py
# Setup and get version information.
THIS_DIR = os.path.realpath(os.path.dirname(__file__))
IREESRC_DIR = os.path.join(THIS_DIR, "..", "..")
VERSION_INFO_FILE = os.path.join(IREESRC_DIR, "version_info.json")
def load_version_info():
with open(VERSION_INFO_FILE, "rt") as f:
return json.load(f)
try:
version_info = load_version_info()
except FileNotFoundError:
print("version_info.json not found. Using defaults")
version_info = {}
PACKAGE_SUFFIX = version_info.get("package-suffix") or ""
PACKAGE_VERSION = version_info.get("package-version") or "0.1dev1"
class CustomBuild(_build):
def run(self):
self.run_command("build_py")
self.run_command("build_ext")
self.run_command("build_scripts")
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=""):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuildPy(_build_py):
def run(self):
version_py_content = generate_version_py()
print(f"Generating version.py:\n{version_py_content}", file=sys.stderr)
subprocess.check_call(["cmake", "--version"])
target_dir = os.path.abspath(self.build_lib)
print(f"Building in target dir: {target_dir}", file=sys.stderr)
os.makedirs(target_dir, exist_ok=True)
cmake_build_dir = os.getenv("IREE_COMPILER_API_CMAKE_BUILD_DIR")
if not cmake_build_dir:
cmake_build_dir = os.path.join(target_dir, "..", "cmake_build")
os.makedirs(cmake_build_dir, exist_ok=True)
cmake_build_dir = os.path.abspath(cmake_build_dir)
print(f"CMake build dir: {cmake_build_dir}", file=sys.stderr)
cmake_install_dir = os.path.abspath(
os.path.join(target_dir, "..", "cmake_install"))
print(f"CMake install dir: {cmake_install_dir}", file=sys.stderr)
src_dir = os.path.abspath(os.path.dirname(__file__))
cfg = "Release"
cmake_args = [
"-GNinja",
"--log-level=VERBOSE",
"-DCMAKE_INSTALL_PREFIX={}".format(cmake_install_dir),
"-DPython3_EXECUTABLE={}".format(sys.executable),
"-DPython3_INCLUDE_DIRS={}".format(sysconfig.get_path("include")),
"-DIREE_VERSION_INFO={}".format(self.distribution.get_version()),
"-DCMAKE_BUILD_TYPE={}".format(cfg),
]
# Enable CUDA if specified.
cuda_target_option = os.getenv("IREE_TARGET_BACKEND_CUDA")
if cuda_target_option:
cmake_args.append(f"-DIREE_TARGET_BACKEND_CUDA={cuda_target_option}")
build_args = []
if os.path.exists(cmake_install_dir):
shutil.rmtree(cmake_install_dir)
cmake_cache_file = os.path.join(cmake_build_dir, "CMakeCache.txt")
if os.path.exists(cmake_cache_file):
os.remove(cmake_cache_file)
install_target = "install/strip"
if platform.system() == "Windows":
install_target = "install"
print(f"Configuring with: {cmake_args}", file=sys.stderr)
subprocess.check_call(["cmake", src_dir] + cmake_args, cwd=cmake_build_dir)
subprocess.check_call(
["cmake", "--build", ".", "--target", install_target] + build_args,
cwd=cmake_build_dir)
print("Build complete.", file=sys.stderr)
if os.path.exists(target_dir):
shutil.rmtree(target_dir)
print("Copying install to target.", file=sys.stderr)
shutil.copytree(os.path.join(cmake_install_dir, "python_package"),
target_dir,
symlinks=False)
print("Target populated.", file=sys.stderr)
# Write version.py.
version_py_file = os.path.join(target_dir, "iree", "compiler", "version.py")
os.makedirs(os.path.dirname(version_py_file), exist_ok=True)
with open(version_py_file, "wt") as f:
f.write(version_py_content)
class NoopBuildExtension(_build_ext):
def __init__(self, *args, **kwargs):
assert False
def build_extension(self, ext):
pass
def generate_version_py():
return f"""# Auto-generated version info.
PACKAGE_SUFFIX = "{PACKAGE_SUFFIX}"
VERSION = "{PACKAGE_VERSION}"
REVISIONS = {json.dumps(find_git_versions())}
"""
def find_git_versions():
revisions = {}
try:
revisions["IREE"] = subprocess.check_output(
["git", "rev-parse", "HEAD"], cwd=IREESRC_DIR).decode("utf-8").strip()
except subprocess.SubprocessError as e:
print(f"ERROR: Could not get IREE revision: {e}", file=sys.stderr)
revisions["LLVM_PROJECT"] = find_git_submodule_revision(
"third_party/llvm-project")
revisions["TENSORFLOW"] = find_git_submodule_revision(
"third_party/tensorflow")
revisions["MLIR_HLO"] = find_git_submodule_revision("third_party/mlir-hlo")
return revisions
def find_git_submodule_revision(submodule_path):
try:
data = subprocess.check_output(["git", "ls-tree", "HEAD", submodule_path],
cwd=IREESRC_DIR).decode("utf-8").strip()
columns = re.split("\\s+", data)
return columns[2]
except Exception as e:
print(
f"ERROR: Could not get submodule revision for {submodule_path}"
f" ({e})",
file=sys.stderr)
return ""
setup(
name=f"iree-compiler{PACKAGE_SUFFIX}",
version=f"{PACKAGE_VERSION}",
author="IREE Authors",
author_email="[email protected]",
description="IREE Compiler API",
long_description="",
license="Apache-2.0",
classifiers=[
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
ext_modules=[
CMakeExtension("iree.compiler._mlir_libs._mlir"),
CMakeExtension("iree.compiler._mlir_libs._ireeDialects"),
CMakeExtension("iree.compiler._mlir_libs._ireecTransforms"),
CMakeExtension("iree.compiler._mlir_libs._mlirHlo"),
CMakeExtension("iree.compiler._mlir_libs._mlirLinalgPasses"),
],
cmdclass={
"build": CustomBuild,
"built_ext": NoopBuildExtension,
"build_py": CMakeBuildPy,
},
zip_safe=False,
packages=find_namespace_packages(include=[
"iree.compiler",
"iree.compiler.*",
],),
entry_points={
"console_scripts": [
"ireec = iree.compiler.tools.scripts.ireec.__main__:main",
# Transitional note: iree-translate resolves to ireec.
"iree-translate = iree.compiler.tools.scripts.ireec.__main__:main",
],
},
install_requires=[
"numpy",
"PyYAML",
],
)
|
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
from cms.tests.menu_page_viewperm import ViewPermissionTests
from cms.compat import get_user_model
__all__ = [
'ViewPermissionComplexMenuStaffNodeTests',
]
class ViewPermissionComplexMenuStaffNodeTests(ViewPermissionTests):
"""
Test CMS_PUBLIC_FOR=staff group access and menu nodes rendering
"""
settings_overrides = {
'CMS_PERMISSION': True,
'CMS_PUBLIC_FOR': 'staff',
}
def test_public_pages_anonymous_norestrictions(self):
"""
All pages are INVISIBLE to an anonymous user
"""
all_pages = self._setup_tree_pages()
granted = []
self.assertGrantedVisibility(all_pages, granted)
def test_public_menu_anonymous_user(self):
"""
Anonymous sees nothing, as he is no staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = []
self.assertGrantedVisibility(all_pages, granted)
def test_node_staff_access_page_and_children_group_1(self):
"""
simulate behaviour of group b member
group_b_ACCESS_PAGE_AND_CHILDREN to page_b
staff user
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_b',
'page_b_a',
'page_b_b',
'page_b_c',
'page_b_d',
'page_c',
'page_c_a',
'page_c_b',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_1')
# user 1 is member of group_b_access_page_and_children
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='[email protected]')
else:
user = get_user_model().objects.get(username='user_1')
urls = self.get_url_dict(all_pages)
# call /
self.assertViewAllowed(urls["/en/page_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_c/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
def test_node_staff_access_page_and_children_group_1_no_staff(self):
"""
simulate behaviour of group b member
group_b_ACCESS_PAGE_AND_CHILDREN to page_b
no staff user
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = [
'page_b',
'page_b_a',
'page_b_b',
'page_b_c',
'page_b_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_1_nostaff')
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='[email protected]')
else:
user = get_user_model().objects.get(username='user_1_nostaff')
urls = self.get_url_dict(all_pages)
self.assertViewAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
def test_node_staff_access_children_group_2(self):
"""
simulate behaviour of group 2 member
GROUPNAME_2 = 'group_b_b_ACCESS_CHILDREN'
to page_b_b and user is staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_b_b_a',
'page_b_b_b',
'page_b_b_c',
'page_c',
'page_c_a',
'page_c_b',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_2')
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='[email protected]')
else:
user = get_user_model().objects.get(username='user_2')
urls = self.get_url_dict(all_pages)
self.assertViewNotAllowed(urls['/en/page_b/'], user)
self.assertViewNotAllowed(urls['/en/page_b/page_b_b/'], user)
self.assertViewAllowed(urls['/en/page_b/page_b_b/page_b_b_a/'], user)
self.assertViewAllowed(urls['/en/page_b/page_b_b/page_b_b_b/'], user)
self.assertViewNotAllowed(urls['/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/'], user)
self.assertViewNotAllowed(urls['/en/page_d/'], user)
self.assertViewAllowed(urls['/en/page_d/page_d_a/'], user)
#
def test_node_staff_access_children_group_2_nostaff(self):
"""
simulate behaviour of group 2 member
GROUPNAME_2 = 'group_b_b_ACCESS_CHILDREN'
to page_b_b and user is no staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_b_b_a',
'page_b_b_b',
'page_b_b_c',
]
self.assertGrantedVisibility(all_pages, granted, username='user_2_nostaff')
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='[email protected]')
else:
user = get_user_model().objects.get(username='user_2_nostaff')
urls = self.get_url_dict(all_pages)
# member of group that has access to this page
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
def test_node_staff_access_page_and_descendants_group_3(self):
"""
simulate behaviour of group 3 member
group_b_ACCESS_PAGE_AND_DESCENDANTS to page_b
and user is staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_b',
'page_b_a',
'page_b_b',
'page_b_b_a',
'page_b_b_a_a',
'page_b_b_b',
'page_b_b_c',
'page_b_c',
'page_b_d',
'page_b_d_a',
'page_b_d_b',
'page_b_d_c',
'page_c',
'page_c_a',
'page_c_b',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_3')
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='[email protected]')
else:
user = get_user_model().objects.get(username='user_3')
urls = self.get_url_dict(all_pages)
url = self.get_pages_root()
self.assertViewAllowed(urls[url], user)
self.assertViewAllowed(urls["/en/page_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_c/"], user)
def test_node_staff_access_page_and_descendants_group_3_nostaff(self):
"""
simulate behaviour of group 3 member
group_b_ACCESS_PAGE_AND_DESCENDANTS to page_b
user is not staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_b',
'page_b_a',
'page_b_b',
'page_b_b_a',
'page_b_b_a_a',
'page_b_b_b',
'page_b_b_c',
'page_b_c',
'page_b_d',
'page_b_d_a',
'page_b_d_b',
'page_b_d_c',
]
self.assertGrantedVisibility(all_pages, granted, username='user_3_nostaff')
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='[email protected]')
else:
user = get_user_model().objects.get(username='user_3_nostaff')
urls = self.get_url_dict(all_pages)
# call /
url = self.get_pages_root()
self.assertViewNotAllowed(urls[url], user)
self.assertViewAllowed(urls["/en/page_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_c/"], user)
def test_node_staff_access_descendants_group_4(self):
"""
simulate behaviour of group 4 member
group_b_b_ACCESS_DESCENDANTS to page_b_b
user is staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_b_b_a',
'page_b_b_a_a',
'page_b_b_b',
'page_b_b_c',
'page_c',
'page_c_a',
'page_c_b',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_4')
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='[email protected]')
else:
user = get_user_model().objects.get(username='user_4')
urls = self.get_url_dict(all_pages)
# call /
url = self.get_pages_root()
self.assertViewAllowed(urls[url], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_d/"], user)
# not a direct child
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_c/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_d/"], user)
def test_node_staff_access_descendants_group_4_nostaff(self):
"""
simulate behaviour of group 4 member
group_b_b_ACCESS_DESCENDANTS to page_b_b
user is no staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = [
'page_b_b_a',
'page_b_b_a_a',
'page_b_b_b',
'page_b_b_c',
]
self.assertGrantedVisibility(all_pages, granted, username='user_4_nostaff')
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='[email protected]')
else:
user = get_user_model().objects.get(username='user_4_nostaff')
urls = self.get_url_dict(all_pages)
url = self.get_pages_root()
self.assertViewNotAllowed(urls[url], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_d/"], user)
def test_node_staff_access_page_group_5(self):
"""
simulate behaviour of group b member
group_d_ACCESS_PAGE to page_d
user is staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_c',
'page_c_a',
'page_c_b',
'page_d',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_5')
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='[email protected]')
else:
user = get_user_model().objects.get(username='user_5')
urls = self.get_url_dict(all_pages)
url = self.get_pages_root()
self.assertViewAllowed(urls[url], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewAllowed(urls["/en/page_c/"], user)
self.assertViewAllowed(urls["/en/page_d/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
def test_node_staff_access_page_group_5_nostaff(self):
"""
simulate behaviour of group b member
group_d_ACCESS_PAGE to page_d
nostaff user
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_d',]
self.assertGrantedVisibility(all_pages, granted, username='user_5_nostaff')
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='[email protected]')
else:
user = get_user_model().objects.get(username='user_5_nostaff')
urls = self.get_url_dict(all_pages)
url = self.get_pages_root()
self.assertViewNotAllowed(urls[url], user)
self.assertViewAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_d/"], user)
|
|
import math
class Fraction(object):
precision = 16
def __init__(self, numerator, denominator=1):
"""
:type numerator: int, float
:type denominator: int
"""
if isinstance(numerator, float) and denominator == 1:
self.numerator = Fraction.from_float(numerator).numerator
self.denominator = Fraction.from_float(numerator).denominator
elif isinstance(numerator, int) and isinstance(denominator, int):
if denominator == 0:
raise ZeroDivisionError
x = math.gcd(numerator, denominator)
self.numerator = numerator//x
self.denominator = denominator//x
if self.numerator < 0 and self.denominator < 0:
self.numerator = -self.numerator
self.denominator = -self.denominator
elif self.denominator < 0 < self.numerator:
self.numerator = -self.numerator
self.denominator = -self.denominator
else:
raise ValueError("expected <class 'int'>")
def __str__(self):
if self.denominator == 1:
return "Fraction(" + str(self.numerator) + ')'
string = "Fraction(" + '/'.join([str(self.numerator), str(self.denominator)]) + ')'
return string
__repr__ = __str__
def __bool__(self):
if self == Fraction(0):
return False
else:
return True
def __eq__(self, other):
if self.numerator == other.numerator and self.denominator == other.denominator:
return True
else:
return False
def __ne__(self, other):
if self == other:
return False
else:
return True
def __lt__(self, other):
if (self - other).numerator*(self - other).denominator < 0:
return True
else:
return False
def __le__(self, other):
"""
:type other: Fraction
"""
if (self - other).numerator*(self - other).denominator <= 0:
return True
else:
return False
def __gt__(self, other):
"""
:type other: Fraction
"""
if self <= other:
return False
else:
return True
def __ge__(self, other):
"""
:type other: Fraction
"""
if self < other:
return False
else:
return True
def __pos__(self):
return self
def __neg__(self):
return Fraction(-1)*self
def __abs__(self):
a = abs(self.numerator)
b = abs(self.denominator)
return Fraction(a, b)
def __add__(self, other):
if not isinstance(other, Fraction):
other = Fraction(other)
a = self.numerator*other.denominator + other.numerator*self.denominator
b = self.denominator*other.denominator
return Fraction(a, b)
__radd__ = __add__
__iadd__ = __add__
def __sub__(self, other):
if not isinstance(other, Fraction):
other = Fraction(other)
return self + (-other)
def __rsub__(self, other):
if not isinstance(other, Fraction):
other = Fraction(other)
return -self + other
__isub__ = __sub__
def __mul__(self, other):
if not isinstance(other, Fraction):
other = Fraction(other)
a = self.numerator*other.numerator
b = self.denominator*other.denominator
return Fraction(a, b)
__rmul__ = __mul__
__imul__ = __mul__
def __truediv__(self, other):
if not isinstance(other, Fraction):
other = Fraction(other)
a = other.numerator
b = other.denominator
return self*Fraction(b, a)
def __rtruediv__(self, other):
if not isinstance(other, Fraction):
other = Fraction(other)
return other*Fraction(self.denominator, self.numerator)
__itruediv__ = __truediv__
def __floordiv__(self, other):
if not isinstance(other, Fraction):
other = Fraction(other)
return int(self/other)
def __rfloordiv__(self, other):
if not isinstance(other, Fraction):
other = Fraction(other)
return int(other/self)
__ifloordiv__ = __floordiv__
def __mod__(self, other):
if not isinstance(other, Fraction):
other = Fraction(other)
return Fraction.from_float(float(self - self//other*other))
def __rmod__(self, other):
if not isinstance(other, Fraction):
other = Fraction(other)
return Fraction.from_float(float(other - other//self*self))
__imod__ = __mod__
def __floor__(self):
return int(self)
def __ceil__(self):
return int(self) + 1
def __pow__(self, power, modulo=None):
if not isinstance(power, int):
raise ValueError("expected <class 'int'> got %s instead" % type(power))
if power >= 0:
return Fraction(self.numerator**power, self.denominator**power)
else:
return Fraction(self.denominator**(-power), self.numerator**(-power))
__ipow__ = __pow__
def __int__(self):
return int(self._to_float())
def __float__(self):
return self._to_float()
def __copy__(self):
cls = type(self)
return cls(self.numerator, self.denominator)
def copy(self):
return self.__copy__()
@staticmethod
def set_precision(n):
"""
:type n: int
"""
Fraction.precision = n
def _to_float(self):
x = abs(self.numerator)
y = self.denominator
i, x = divmod(x, y)
x *= 10**(Fraction.precision + len(str(y)) - 1)
f, x = divmod(x, y)
if self.numerator >= 0:
return float(str(i) + '.' + str(f))
else:
return float('-' + str(i) + '.' + str(f))
@classmethod
def from_float(cls, num):
"""
:type num: float
"""
if not isinstance(num, float):
if isinstance(num, int):
return cls(num)
else:
raise ValueError("expected <class 'float'> got %s instead" % type(num))
n = str(num)
prec = len(n.split('.')[1])
n = int(float(n)*10**prec)
return cls(n, 10**prec)
|
|
import cProfile
import json
import argparse
import glob
import collections
import itertools
import cv2
import numpy as np
import os
def region_of_interest(img, vertices):
mask = np.zeros_like(img)
if len(img.shape) > 2:
channel_count = img.shape[2]
select_mask_color = (255,) * channel_count
else:
select_mask_color = 255
cv2.fillPoly(mask, vertices, select_mask_color)
return cv2.bitwise_and(img, mask)
def sliding_window_search(nonzeroy, nonzerox, y_size, base, search_img):
nwindows = 8
window_height = np.int(y_size / nwindows)
current = base
margin = 40
minpix = 10
lane_inds = []
for window in range(nwindows):
win_y_low = y_size - (window + 1) * window_height
win_y_high = y_size - window * window_height
win_x_low = current - margin
win_x_high = current + margin
good_inds_mask = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high)
& (nonzerox >= win_x_low) & (nonzerox < win_x_high))
good_inds = good_inds_mask.nonzero()[0]
lane_inds.append(good_inds)
if len(good_inds) > minpix:
current = np.int(np.mean(nonzerox[good_inds]))
if search_img is not None:
cv2.rectangle(search_img, (win_x_low, win_y_low), (win_x_high, win_y_high), 255, 2)
lane_inds = np.concatenate(lane_inds)
return nonzeroy[lane_inds], nonzerox[lane_inds]
def crop_image(input_img):
y_size = input_img.shape[0]
if input_img.shape[0] != 128:
raise Exception("expected y dimension 128 but got: %d" % y_size)
cropped = input_img[88:, :, :]
assert cropped.shape[0] == 40
assert cropped.shape[1] == 160
return cropped
PipelineResult = collections.namedtuple("PipelineResult",
["input_img", "cropped",
"sobel_h_x", "sobel_h_y", "sobel_h_mag",
"sobel_h_mag_scaled", "sobel_h_thresholded",
"sobel_s_thresholded",
"l_threshold_mask",
"yellow_mask",
"binary",
"search_img",
"hls", "blurred_s",
"sobel_s_mag_scaled",
"blurred_h",
"warped_input",
"warped", "num_pts", "linex", "liney",
"line_fit"])
def get_yellow_mask(hls):
lower_yellow = (21.25, 75, 40)
# lower_yellow = (21.25, 75, 100.0)
upper_yellow = (50, 178.5, 255)
mask = cv2.inRange(hls, lower_yellow, upper_yellow)
return mask
def get_ground_polygon():
d1 = (0, 40) # bottom left of polygon
d2 = (62, 0) # top left of polygon
d3 = (98, 0) # top right of polygon
d4 = (160, 40) # bottom right of polygon
return np.int32([d1, d2, d3, d4])
def _perspective_mat_dst():
d1 = (40, 40) # bottom mid-left
d2 = (40, 0) # top mid-left
d3 = (120, 0) # top mid-right
d4 = (120, 40) # bottom mid-right
return np.float32([d1, d2, d3, d4])
class Processor(object):
def __init__(self, mtx, dist):
self.mtx = mtx
self.dist = dist
src = get_ground_polygon().astype(np.float32)
dst = _perspective_mat_dst()
self.perspective_mat = cv2.getPerspectiveTransform(src, dst)
self.perspective_mat_inv = cv2.getPerspectiveTransform(dst, src)
def undistort(self, cropped):
if self.mtx is not None:
return cv2.undistort(cropped, mtx, dist, None, mtx)
else:
return cropped
def inv_warp(self, img):
return cv2.warpPerspective(
img, self.perspective_mat_inv, (160, 40), flags=cv2.INTER_LINEAR)
def warp(self, img):
# copy = np.copy(img)
# cv2.polylines(copy, [get_ground_polygon()], True, [255, 0, 255], 5)
return cv2.warpPerspective(
img, self.perspective_mat, (160, 40), flags=cv2.INTER_LINEAR)
def process(self, input_img, debug=False):
cropped = crop_image(input_img)
y_size = 40
x_size = 160
assert cropped.shape[0] == y_size
assert cropped.shape[1] == x_size
undistorted = self.undistort(cropped)
hls = cv2.cvtColor(cropped, cv2.COLOR_BGR2HLS)
h_channel = hls[:, :, 0]
l_channel = hls[:, :, 1]
s_channel = hls[:, :, 2]
blurred_s = cv2.GaussianBlur(s_channel, (5, 5), 0)
blurred_h = cv2.GaussianBlur(h_channel, (5, 5), 0)
yellow_mask = get_yellow_mask(hls)
l_threshold_mask = cv2.inRange(l_channel, 50, 255)
sobel_h_x = cv2.Sobel(blurred_h, cv2.CV_64F, 1, 0, ksize=5)
sobel_h_y = cv2.Sobel(blurred_h, cv2.CV_64F, 0, 1, ksize=5)
sobel_h_mag = np.sqrt(sobel_h_x ** 2 + sobel_h_y ** 2)
sobel_h_mag = cv2.bitwise_and(sobel_h_mag, sobel_h_mag, mask=yellow_mask)
sobel_h_scale_factor = np.max(sobel_h_mag) / 255
if sobel_h_scale_factor > 0:
sobel_h_mag_scaled = sobel_h_mag / sobel_h_scale_factor
else:
sobel_h_mag_scaled = sobel_h_mag
sobel_h_threshold_mask = cv2.inRange(sobel_h_mag_scaled, 50, 255)
sobel_h_thresholded = cv2.bitwise_and(sobel_h_mag_scaled, sobel_h_mag_scaled, mask=sobel_h_threshold_mask)
sobel_s_x = cv2.Sobel(blurred_s, cv2.CV_64F, 1, 0, ksize=5)
sobel_s_y = cv2.Sobel(blurred_s, cv2.CV_64F, 0, 1, ksize=5)
sobel_s_mag = np.sqrt(sobel_s_x ** 2 + sobel_s_y ** 2)
sobel_s_mag = cv2.bitwise_and(sobel_s_mag, sobel_s_mag, mask=yellow_mask)
sobel_s_scale_factor = np.max(sobel_s_mag) / 255
if sobel_s_scale_factor > 0:
sobel_s_mag_scaled = sobel_s_mag / sobel_s_scale_factor
else:
sobel_s_mag_scaled = sobel_s_mag
sobel_s_threshold_mask = cv2.inRange(sobel_s_mag_scaled, 50, 255)
sobel_s_thresholded = cv2.bitwise_and(sobel_s_mag_scaled, sobel_s_mag_scaled, mask=sobel_s_threshold_mask)
binary = np.ones_like(s_channel)
binary = cv2.bitwise_and(binary, binary, mask=sobel_h_threshold_mask)
binary = cv2.bitwise_and(binary, binary, mask=sobel_s_threshold_mask)
if debug:
warped_input = self.warp(cropped)
else:
warped_input = None
warped_binary = self.warp(binary)
histogram = np.sum(warped_binary[20:, :], axis=0)
nonzero = warped_binary.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
x_base = np.argmax(histogram)
if debug:
search_img = np.zeros_like(warped_binary)
else:
search_img = None
liney, linex = sliding_window_search(nonzeroy, nonzerox, y_size, x_base, search_img)
num_pts = len(linex)
if num_pts >= 10:
line_fit = np.polyfit(liney, linex, 1)
else:
line_fit = None
return PipelineResult(
input_img=input_img,
yellow_mask=yellow_mask,
sobel_h_x=sobel_h_x,
l_threshold_mask=l_threshold_mask,
sobel_h_y=sobel_h_y,
sobel_h_mag=sobel_h_mag,
sobel_h_mag_scaled=sobel_h_mag_scaled,
sobel_s_mag_scaled=sobel_s_mag_scaled,
sobel_h_thresholded=sobel_h_thresholded,
sobel_s_thresholded=sobel_s_thresholded,
cropped=cropped,
hls=hls,
search_img=search_img,
binary=binary,
blurred_s=blurred_s,
blurred_h=blurred_h,
warped_input=warped_input,
warped=warped_binary,
liney=liney,
linex=linex,
line_fit=line_fit,
num_pts=num_pts)
def handle_filepath(args, processor, filepath, report_file):
bgr_image = load_bgr_image(filepath)
result = processor.process(bgr_image)
if report_file is not None:
doc = {}
doc['image'] = filepath
if result.line_fit is None:
doc['fit'] = False
else:
doc['fit'] = True
doc['weight'] = result.num_pts
doc['c0'] = result.line_fit[0]
doc['c1'] = result.line_fit[1]
json.dump(doc, report_file)
report_file.write("\n")
report_file.flush()
def load_bgr_image(bgr_filepath):
bgr_array = np.fromfile(bgr_filepath, dtype=np.uint8)
bgr_image = bgr_array.reshape(128, 160, 3)
return bgr_image
def run3(args, processor, report_file):
if args.imgfile is not None:
with open(args.imgfile, "r") as infile:
while True:
line = infile.readline()
if line is None:
break
filepath = line.rstrip()
handle_filepath(args, processor, filepath, report_file)
else:
bgr_filepaths = list(glob.glob(os.path.join(args.imgdir, "*.bgr")))
if args.ntake is not None:
bgr_filepaths = take(args.ntake, bgr_filepaths)
for filepath in bgr_filepaths:
handle_filepath(args, processor, filepath, report_file)
def run2(args, processor):
run3(args, processor, args.report)
def run1(args):
if args.calibration is not None:
with open(args.calibration, "r") as infile:
doc = json.load(infile)
mtx = np.array(doc['mtx'])
dist = np.array(doc['dist'])
processor = Processor(mtx, dist)
else:
processor = Processor(None, None)
run2(args, processor)
def take(n, iterable):
"Return first n items of the iterable as a list"
return list(itertools.islice(iterable, n))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--demo", action="store_true")
parser.add_argument("--imgdir", default="/dev/shm/bgr")
parser.add_argument("--ntake", default=None, type=int)
parser.add_argument("--imgfile")
parser.add_argument("--report", type=argparse.FileType('w'))
parser.add_argument("--calibration")
args = parser.parse_args()
return args
def demo():
args = parse_args()
run1(args)
def main():
args = parse_args()
if args.demo:
cProfile.run('demo()', sort='cumulative')
else:
run1(args)
if __name__ == "__main__":
main()
|
|
from __future__ import absolute_import, unicode_literals
from django import forms
from django.forms.formsets import BaseFormSet, DELETION_FIELD_NAME
from django.forms.util import ErrorDict, ErrorList
from django.forms.models import modelform_factory, inlineformset_factory, modelformset_factory, BaseModelFormSet
from django.test import TestCase
from django.utils import six
from .models import User, UserSite, Restaurant, Manager, Network, Host
class InlineFormsetTests(TestCase):
def test_formset_over_to_field(self):
"A formset over a ForeignKey with a to_field can be saved. Regression for #10243"
Form = modelform_factory(User, fields="__all__")
FormSet = inlineformset_factory(User, UserSite, fields="__all__")
# Instantiate the Form and FormSet to prove
# you can create a form with no data
form = Form()
form_set = FormSet(instance=User())
# Now create a new User and UserSite instance
data = {
'serial': '1',
'username': 'apollo13',
'usersite_set-TOTAL_FORMS': '1',
'usersite_set-INITIAL_FORMS': '0',
'usersite_set-MAX_NUM_FORMS': '0',
'usersite_set-0-data': '10',
'usersite_set-0-user': 'apollo13'
}
user = User()
form = Form(data)
if form.is_valid():
user = form.save()
else:
self.fail('Errors found on form:%s' % form_set)
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.all().values()
self.assertEqual(usersite[0]['data'], 10)
self.assertEqual(usersite[0]['user_id'], 'apollo13')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now update the UserSite instance
data = {
'usersite_set-TOTAL_FORMS': '1',
'usersite_set-INITIAL_FORMS': '1',
'usersite_set-MAX_NUM_FORMS': '0',
'usersite_set-0-id': six.text_type(usersite[0]['id']),
'usersite_set-0-data': '11',
'usersite_set-0-user': 'apollo13'
}
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.all().values()
self.assertEqual(usersite[0]['data'], 11)
self.assertEqual(usersite[0]['user_id'], 'apollo13')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now add a new UserSite instance
data = {
'usersite_set-TOTAL_FORMS': '2',
'usersite_set-INITIAL_FORMS': '1',
'usersite_set-MAX_NUM_FORMS': '0',
'usersite_set-0-id': six.text_type(usersite[0]['id']),
'usersite_set-0-data': '11',
'usersite_set-0-user': 'apollo13',
'usersite_set-1-data': '42',
'usersite_set-1-user': 'apollo13'
}
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.all().values().order_by('data')
self.assertEqual(usersite[0]['data'], 11)
self.assertEqual(usersite[0]['user_id'], 'apollo13')
self.assertEqual(usersite[1]['data'], 42)
self.assertEqual(usersite[1]['user_id'], 'apollo13')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
def test_formset_over_inherited_model(self):
"A formset over a ForeignKey with a to_field can be saved. Regression for #11120"
Form = modelform_factory(Restaurant, fields="__all__")
FormSet = inlineformset_factory(Restaurant, Manager, fields="__all__")
# Instantiate the Form and FormSet to prove
# you can create a form with no data
form = Form()
form_set = FormSet(instance=Restaurant())
# Now create a new Restaurant and Manager instance
data = {
'name': "Guido's House of Pasta",
'manager_set-TOTAL_FORMS': '1',
'manager_set-INITIAL_FORMS': '0',
'manager_set-MAX_NUM_FORMS': '0',
'manager_set-0-name': 'Guido Van Rossum'
}
restaurant = User()
form = Form(data)
if form.is_valid():
restaurant = form.save()
else:
self.fail('Errors found on form:%s' % form_set)
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.all().values()
self.assertEqual(manager[0]['name'], 'Guido Van Rossum')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now update the Manager instance
data = {
'manager_set-TOTAL_FORMS': '1',
'manager_set-INITIAL_FORMS': '1',
'manager_set-MAX_NUM_FORMS': '0',
'manager_set-0-id': six.text_type(manager[0]['id']),
'manager_set-0-name': 'Terry Gilliam'
}
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.all().values()
self.assertEqual(manager[0]['name'], 'Terry Gilliam')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now add a new Manager instance
data = {
'manager_set-TOTAL_FORMS': '2',
'manager_set-INITIAL_FORMS': '1',
'manager_set-MAX_NUM_FORMS': '0',
'manager_set-0-id': six.text_type(manager[0]['id']),
'manager_set-0-name': 'Terry Gilliam',
'manager_set-1-name': 'John Cleese'
}
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.all().values().order_by('name')
self.assertEqual(manager[0]['name'], 'John Cleese')
self.assertEqual(manager[1]['name'], 'Terry Gilliam')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
def test_formset_with_none_instance(self):
"A formset with instance=None can be created. Regression for #11872"
Form = modelform_factory(User, fields="__all__")
FormSet = inlineformset_factory(User, UserSite, fields="__all__")
# Instantiate the Form and FormSet to prove
# you can create a formset with an instance of None
form = Form(instance=None)
formset = FormSet(instance=None)
def test_empty_fields_on_modelformset(self):
"No fields passed to modelformset_factory should result in no fields on returned forms except for the id. See #14119."
UserFormSet = modelformset_factory(User, fields=())
formset = UserFormSet()
for form in formset.forms:
self.assertTrue('id' in form.fields)
self.assertEqual(len(form.fields), 1)
def test_save_as_new_with_new_inlines(self):
"""
Existing and new inlines are saved with save_as_new.
Regression for #14938.
"""
efnet = Network.objects.create(name="EFNet")
host1 = Host.objects.create(hostname="irc.he.net", network=efnet)
HostFormSet = inlineformset_factory(Network, Host, fields="__all__")
# Add a new host, modify previous host, and save-as-new
data = {
'host_set-TOTAL_FORMS': '2',
'host_set-INITIAL_FORMS': '1',
'host_set-MAX_NUM_FORMS': '0',
'host_set-0-id': six.text_type(host1.id),
'host_set-0-hostname': 'tranquility.hub.dal.net',
'host_set-1-hostname': 'matrix.de.eu.dal.net'
}
# To save a formset as new, it needs a new hub instance
dalnet = Network.objects.create(name="DALnet")
formset = HostFormSet(data, instance=dalnet, save_as_new=True)
self.assertTrue(formset.is_valid())
formset.save()
self.assertQuerysetEqual(
dalnet.host_set.order_by("hostname"),
["<Host: matrix.de.eu.dal.net>", "<Host: tranquility.hub.dal.net>"]
)
def test_initial_data(self):
user = User.objects.create(username="bibi", serial=1)
UserSite.objects.create(user=user, data=7)
FormSet = inlineformset_factory(User, UserSite, extra=2, fields="__all__")
formset = FormSet(instance=user, initial=[{'data': 41}, {'data': 42}])
self.assertEqual(formset.forms[0].initial['data'], 7)
self.assertEqual(formset.extra_forms[0].initial['data'], 41)
self.assertTrue('value="42"' in formset.extra_forms[1].as_p())
class FormsetTests(TestCase):
def test_error_class(self):
'''
Test the type of Formset and Form error attributes
'''
Formset = modelformset_factory(User, fields="__all__")
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '0',
'form-0-id': '',
'form-0-username': 'apollo13',
'form-0-serial': '1',
'form-1-id': '',
'form-1-username': 'apollo13',
'form-1-serial': '2',
}
formset = Formset(data)
# check if the returned error classes are correct
# note: formset.errors returns a list as documented
self.assertIsInstance(formset.errors, list)
self.assertIsInstance(formset.non_form_errors(), ErrorList)
for form in formset.forms:
self.assertIsInstance(form.errors, ErrorDict)
self.assertIsInstance(form.non_field_errors(), ErrorList)
def test_initial_data(self):
User.objects.create(username="bibi", serial=1)
Formset = modelformset_factory(User, fields="__all__", extra=2)
formset = Formset(initial=[{'username': 'apollo11'}, {'username': 'apollo12'}])
self.assertEqual(formset.forms[0].initial['username'], "bibi")
self.assertEqual(formset.extra_forms[0].initial['username'], "apollo11")
self.assertTrue('value="apollo12"' in formset.extra_forms[1].as_p())
def test_extraneous_query_is_not_run(self):
Formset = modelformset_factory(Network, fields="__all__")
data = {'test-TOTAL_FORMS': '1',
'test-INITIAL_FORMS': '0',
'test-MAX_NUM_FORMS': '',
'test-0-name': 'Random Place', }
with self.assertNumQueries(1):
formset = Formset(data, prefix="test")
formset.save()
class CustomWidget(forms.widgets.TextInput):
pass
class UserSiteForm(forms.ModelForm):
class Meta:
model = UserSite
fields = "__all__"
widgets = {
'id': CustomWidget,
'data': CustomWidget,
}
localized_fields = ('data',)
class Callback(object):
def __init__(self):
self.log = []
def __call__(self, db_field, **kwargs):
self.log.append((db_field, kwargs))
return db_field.formfield(**kwargs)
class FormfieldCallbackTests(TestCase):
"""
Regression for #13095 and #17683: Using base forms with widgets
defined in Meta should not raise errors and BaseModelForm should respect
the specified pk widget.
"""
def test_inlineformset_factory_default(self):
Formset = inlineformset_factory(User, UserSite, form=UserSiteForm, fields="__all__")
form = Formset().forms[0]
self.assertIsInstance(form['id'].field.widget, CustomWidget)
self.assertIsInstance(form['data'].field.widget, CustomWidget)
self.assertFalse(form.fields['id'].localize)
self.assertTrue(form.fields['data'].localize)
def test_modelformset_factory_default(self):
Formset = modelformset_factory(UserSite, form=UserSiteForm)
form = Formset().forms[0]
self.assertIsInstance(form['id'].field.widget, CustomWidget)
self.assertIsInstance(form['data'].field.widget, CustomWidget)
self.assertFalse(form.fields['id'].localize)
self.assertTrue(form.fields['data'].localize)
def assertCallbackCalled(self, callback):
id_field, user_field, data_field = UserSite._meta.fields
expected_log = [
(id_field, {'widget': CustomWidget}),
(user_field, {}),
(data_field, {'widget': CustomWidget, 'localize': True}),
]
self.assertEqual(callback.log, expected_log)
def test_inlineformset_custom_callback(self):
callback = Callback()
inlineformset_factory(User, UserSite, form=UserSiteForm,
formfield_callback=callback, fields="__all__")
self.assertCallbackCalled(callback)
def test_modelformset_custom_callback(self):
callback = Callback()
modelformset_factory(UserSite, form=UserSiteForm,
formfield_callback=callback)
self.assertCallbackCalled(callback)
class BaseCustomDeleteFormSet(BaseFormSet):
"""
A formset mix-in that lets a form decide if it's to be deleted.
Works for BaseFormSets. Also works for ModelFormSets with #14099 fixed.
form.should_delete() is called. The formset delete field is also suppressed.
"""
def add_fields(self, form, index):
super(BaseCustomDeleteFormSet, self).add_fields(form, index)
self.can_delete = True
if DELETION_FIELD_NAME in form.fields:
del form.fields[DELETION_FIELD_NAME]
def _should_delete_form(self, form):
return hasattr(form, 'should_delete') and form.should_delete()
class FormfieldShouldDeleteFormTests(TestCase):
"""
Regression for #14099: BaseModelFormSet should use ModelFormSet method _should_delete_form
"""
class BaseCustomDeleteModelFormSet(BaseModelFormSet, BaseCustomDeleteFormSet):
""" Model FormSet with CustomDelete MixIn """
class CustomDeleteUserForm(forms.ModelForm):
""" A model form with a 'should_delete' method """
class Meta:
model = User
fields = "__all__"
def should_delete(self):
""" delete form if odd PK """
return self.instance.pk % 2 != 0
NormalFormset = modelformset_factory(User, form=CustomDeleteUserForm, can_delete=True)
DeleteFormset = modelformset_factory(User, form=CustomDeleteUserForm, formset=BaseCustomDeleteModelFormSet)
data = {
'form-TOTAL_FORMS': '4',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '4',
'form-0-username': 'John',
'form-0-serial': '1',
'form-1-username': 'Paul',
'form-1-serial': '2',
'form-2-username': 'George',
'form-2-serial': '3',
'form-3-username': 'Ringo',
'form-3-serial': '5',
}
delete_all_ids = {
'form-0-DELETE': '1',
'form-1-DELETE': '1',
'form-2-DELETE': '1',
'form-3-DELETE': '1',
}
def test_init_database(self):
""" Add test data to database via formset """
formset = self.NormalFormset(self.data)
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 4)
def test_no_delete(self):
""" Verify base formset doesn't modify database """
# reload database
self.test_init_database()
# pass standard data dict & see none updated
data = dict(self.data)
data['form-INITIAL_FORMS'] = 4
data.update(dict(
('form-%d-id' % i, user.pk)
for i,user in enumerate(User.objects.all())
))
formset = self.NormalFormset(data, queryset=User.objects.all())
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 0)
self.assertEqual(len(User.objects.all()), 4)
def test_all_delete(self):
""" Verify base formset honors DELETE field """
# reload database
self.test_init_database()
# create data dict with all fields marked for deletion
data = dict(self.data)
data['form-INITIAL_FORMS'] = 4
data.update(dict(
('form-%d-id' % i, user.pk)
for i,user in enumerate(User.objects.all())
))
data.update(self.delete_all_ids)
formset = self.NormalFormset(data, queryset=User.objects.all())
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 0)
self.assertEqual(len(User.objects.all()), 0)
def test_custom_delete(self):
""" Verify DeleteFormset ignores DELETE field and uses form method """
# reload database
self.test_init_database()
# Create formset with custom Delete function
# create data dict with all fields marked for deletion
data = dict(self.data)
data['form-INITIAL_FORMS'] = 4
data.update(dict(
('form-%d-id' % i, user.pk)
for i,user in enumerate(User.objects.all())
))
data.update(self.delete_all_ids)
formset = self.DeleteFormset(data, queryset=User.objects.all())
# verify two were deleted
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 0)
self.assertEqual(len(User.objects.all()), 2)
# verify no "odd" PKs left
odd_ids = [user.pk for user in User.objects.all() if user.pk % 2]
self.assertEqual(len(odd_ids), 0)
|
|
import struct
import unittest
import zstandard as zstd
def multithreaded_chunk_size(level, source_size=0):
params = zstd.ZstdCompressionParameters.from_level(
level, source_size=source_size
)
return 1 << (params.window_log + 2)
class TestCompressor_compress(unittest.TestCase):
def test_compress_empty(self):
cctx = zstd.ZstdCompressor(level=1, write_content_size=False)
result = cctx.compress(b"")
self.assertEqual(result, b"\x28\xb5\x2f\xfd\x00\x00\x01\x00\x00")
params = zstd.get_frame_parameters(result)
self.assertEqual(params.content_size, zstd.CONTENTSIZE_UNKNOWN)
self.assertEqual(params.window_size, 1024)
self.assertEqual(params.dict_id, 0)
self.assertFalse(params.has_checksum, 0)
cctx = zstd.ZstdCompressor()
result = cctx.compress(b"")
self.assertEqual(result, b"\x28\xb5\x2f\xfd\x20\x00\x01\x00\x00")
params = zstd.get_frame_parameters(result)
self.assertEqual(params.content_size, 0)
def test_input_types(self):
cctx = zstd.ZstdCompressor(level=1, write_content_size=False)
expected = b"\x28\xb5\x2f\xfd\x00\x00\x19\x00\x00\x66\x6f\x6f"
mutable_array = bytearray(3)
mutable_array[:] = b"foo"
sources = [
memoryview(b"foo"),
bytearray(b"foo"),
mutable_array,
]
for source in sources:
self.assertEqual(cctx.compress(source), expected)
def test_compress_large(self):
chunks = []
for i in range(255):
chunks.append(struct.Struct(">B").pack(i) * 16384)
cctx = zstd.ZstdCompressor(level=3, write_content_size=False)
result = cctx.compress(b"".join(chunks))
self.assertEqual(len(result), 999)
self.assertEqual(result[0:4], b"\x28\xb5\x2f\xfd")
# This matches the test for read_to_iter() below.
cctx = zstd.ZstdCompressor(level=1, write_content_size=False)
result = cctx.compress(
b"f" * zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE + b"o"
)
self.assertEqual(
result,
b"\x28\xb5\x2f\xfd\x00\x40\x54\x00\x00"
b"\x10\x66\x66\x01\x00\xfb\xff\x39\xc0"
b"\x02\x09\x00\x00\x6f",
)
def test_negative_level(self):
cctx = zstd.ZstdCompressor(level=-4)
result = cctx.compress(b"foo" * 256)
def test_no_magic(self):
params = zstd.ZstdCompressionParameters.from_level(
1, format=zstd.FORMAT_ZSTD1
)
cctx = zstd.ZstdCompressor(compression_params=params)
magic = cctx.compress(b"foobar")
params = zstd.ZstdCompressionParameters.from_level(
1, format=zstd.FORMAT_ZSTD1_MAGICLESS
)
cctx = zstd.ZstdCompressor(compression_params=params)
no_magic = cctx.compress(b"foobar")
self.assertEqual(magic[0:4], b"\x28\xb5\x2f\xfd")
self.assertEqual(magic[4:], no_magic)
def test_write_checksum(self):
cctx = zstd.ZstdCompressor(level=1)
no_checksum = cctx.compress(b"foobar")
cctx = zstd.ZstdCompressor(level=1, write_checksum=True)
with_checksum = cctx.compress(b"foobar")
self.assertEqual(len(with_checksum), len(no_checksum) + 4)
no_params = zstd.get_frame_parameters(no_checksum)
with_params = zstd.get_frame_parameters(with_checksum)
self.assertFalse(no_params.has_checksum)
self.assertTrue(with_params.has_checksum)
def test_write_content_size(self):
cctx = zstd.ZstdCompressor(level=1)
with_size = cctx.compress(b"foobar" * 256)
cctx = zstd.ZstdCompressor(level=1, write_content_size=False)
no_size = cctx.compress(b"foobar" * 256)
self.assertEqual(len(with_size), len(no_size) + 1)
no_params = zstd.get_frame_parameters(no_size)
with_params = zstd.get_frame_parameters(with_size)
self.assertEqual(no_params.content_size, zstd.CONTENTSIZE_UNKNOWN)
self.assertEqual(with_params.content_size, 1536)
def test_no_dict_id(self):
samples = []
for i in range(128):
samples.append(b"foo" * 64)
samples.append(b"bar" * 64)
samples.append(b"foobar" * 64)
d = zstd.train_dictionary(1024, samples)
cctx = zstd.ZstdCompressor(level=1, dict_data=d)
with_dict_id = cctx.compress(b"foobarfoobar")
cctx = zstd.ZstdCompressor(level=1, dict_data=d, write_dict_id=False)
no_dict_id = cctx.compress(b"foobarfoobar")
self.assertEqual(len(with_dict_id), len(no_dict_id) + 4)
no_params = zstd.get_frame_parameters(no_dict_id)
with_params = zstd.get_frame_parameters(with_dict_id)
self.assertEqual(no_params.dict_id, 0)
self.assertEqual(with_params.dict_id, 1123828263)
def test_compress_dict_multiple(self):
samples = []
for i in range(128):
samples.append(b"foo" * 64)
samples.append(b"bar" * 64)
samples.append(b"foobar" * 64)
d = zstd.train_dictionary(8192, samples)
cctx = zstd.ZstdCompressor(level=1, dict_data=d)
for i in range(32):
cctx.compress(b"foo bar foobar foo bar foobar")
def test_dict_precompute(self):
samples = []
for i in range(128):
samples.append(b"foo" * 64)
samples.append(b"bar" * 64)
samples.append(b"foobar" * 64)
d = zstd.train_dictionary(8192, samples)
d.precompute_compress(level=1)
cctx = zstd.ZstdCompressor(level=1, dict_data=d)
for i in range(32):
cctx.compress(b"foo bar foobar foo bar foobar")
def test_multithreaded(self):
chunk_size = multithreaded_chunk_size(1)
source = b"".join([b"x" * chunk_size, b"y" * chunk_size])
cctx = zstd.ZstdCompressor(level=1, threads=2)
compressed = cctx.compress(source)
params = zstd.get_frame_parameters(compressed)
self.assertEqual(params.content_size, chunk_size * 2)
self.assertEqual(params.dict_id, 0)
self.assertFalse(params.has_checksum)
dctx = zstd.ZstdDecompressor()
self.assertEqual(dctx.decompress(compressed), source)
def test_multithreaded_dict(self):
samples = []
for i in range(128):
samples.append(b"foo" * 64)
samples.append(b"bar" * 64)
samples.append(b"foobar" * 64)
d = zstd.train_dictionary(1024, samples)
cctx = zstd.ZstdCompressor(dict_data=d, threads=2)
result = cctx.compress(b"foo")
params = zstd.get_frame_parameters(result)
self.assertEqual(params.content_size, 3)
self.assertEqual(params.dict_id, d.dict_id())
self.assertEqual(
result,
b"\x28\xb5\x2f\xfd\x23\x27\x42\xfc\x42\x03\x19\x00\x00"
b"\x66\x6f\x6f",
)
def test_multithreaded_compression_params(self):
params = zstd.ZstdCompressionParameters.from_level(0, threads=2)
cctx = zstd.ZstdCompressor(compression_params=params)
result = cctx.compress(b"foo")
params = zstd.get_frame_parameters(result)
self.assertEqual(params.content_size, 3)
self.assertEqual(
result, b"\x28\xb5\x2f\xfd\x20\x03\x19\x00\x00\x66\x6f\x6f"
)
def test_explicit_default_params(self):
cctx = zstd.ZstdCompressor(
level=3,
dict_data=None,
compression_params=None,
write_checksum=None,
write_content_size=None,
write_dict_id=None,
threads=0,
)
result = cctx.compress(b"")
self.assertEqual(result, b"\x28\xb5\x2f\xfd\x20\x00\x01\x00\x00")
def test_compression_params_with_other_params(self):
params = zstd.ZstdCompressionParameters.from_level(3)
cctx = zstd.ZstdCompressor(
level=3,
dict_data=None,
compression_params=params,
write_checksum=None,
write_content_size=None,
write_dict_id=None,
threads=0,
)
result = cctx.compress(b"")
self.assertEqual(result, b"\x28\xb5\x2f\xfd\x20\x00\x01\x00\x00")
with self.assertRaises(ValueError):
cctx = zstd.ZstdCompressor(
level=3,
dict_data=None,
compression_params=params,
write_checksum=False,
write_content_size=None,
write_dict_id=None,
threads=0,
)
with self.assertRaises(ValueError):
cctx = zstd.ZstdCompressor(
level=3,
dict_data=None,
compression_params=params,
write_checksum=None,
write_content_size=True,
write_dict_id=None,
threads=0,
)
with self.assertRaises(ValueError):
cctx = zstd.ZstdCompressor(
level=3,
dict_data=None,
compression_params=params,
write_checksum=None,
write_content_size=None,
write_dict_id=True,
threads=0,
)
with self.assertRaises(ValueError):
cctx = zstd.ZstdCompressor(
level=3,
dict_data=None,
compression_params=params,
write_checksum=None,
write_content_size=None,
write_dict_id=True,
threads=2,
)
|
|
from __future__ import unicode_literals
import uuid
import jinja2
import os
import copy
from IPython.display import HTML
from cytoolz.dicttoolz import valmap, merge
from IPython.display import display_html
from seaborn import color_palette
from matplotlib.colors import rgb2hex
from .js_utils import _dump_json, d3jsObject, JSCode
DATAGRAMAS_TEMPLATE_FILES = {'base.js', 'base.attributes.js', 'base.colorables.js',
'base.html', 'multiples.html', 'select-categories.html',
'scaffold.js'}
def _load_template(filename):
if filename in DATAGRAMAS_TEMPLATE_FILES:
filename = '{0}/templates/{1}'.format(SRC_DIR, filename)
with open(filename, 'r') as f:
code = f.read()
# the lambda avoids template caching
return (code, filename, lambda *a, **k: False)
SRC_DIR = os.path.dirname(os.path.realpath(__file__))
env = jinja2.environment.Environment()
env.loader = jinja2.FunctionLoader(_load_template)
class sketch(object):
"""
A sketch represents the state of a visualization before being rendered or scaffolded. It is built from a
configuration dictionary provided by each visualization. See build_sketch.
"""
datagram_events = ['datagram_start', 'datagram_end']
def __init__(self, **kwargs):
if not 'container_type' in kwargs:
raise Exception('need to define a container element')
if not 'data' in kwargs or kwargs['data'] is None:
raise Exception('you need to define at least one data variable')
self.configuration = kwargs.copy()
if not 'visualization_css' in kwargs:
self.configuration['visualization_css'] = '{0}/templates/{1}.css'.format(SRC_DIR, self.configuration['visualization_name'])
if not 'visualization_js' in kwargs:
self.configuration['visualization_js'] = '{0}/templates/{1}.js'.format(SRC_DIR, self.configuration['visualization_name'])
self.configuration['visualization_name'] = self.configuration['visualization_name'].replace('-', '_').replace('.', '_')
self.configuration['variables'] = valmap(self.process_variable, self.configuration['variables'])
if 'objects' in self.configuration:
self.configuration['objects'] = valmap(self.process_objects, self.configuration['objects'])
if 'attributes' in self.configuration:
self.configuration['attributes'] = valmap(self.process_attribute, self.configuration['attributes'])
else:
self.configuration['attributes'] = {}
if 'colorables' in self.configuration:
self.configuration['colorables'] = valmap(self.process_colorable, self.configuration['colorables'])
else:
self.configuration['colorables'] = {}
if 'allowed_events' in self.configuration['options'] and self.configuration['options']['allowed_events']:
if 'events' in self.configuration:
self.configuration['events'] = {k: self.process_event(k, v) for k, v in self.configuration['events'].items() if v is not None}
self.configuration['__data_variables__'] = list(self.configuration['data'].keys())
self.configuration['data'] = _dump_json(self.configuration['data'])
if 'facets' in self.configuration:
self.configuration['facets'] = _dump_json(self.configuration['facets'])
self.configuration['datagram_events'] = self.datagram_events
def process_variable(self, variable):
if type(variable) != JSCode:
return _dump_json(variable)
return variable.render(context=self.configuration)
def process_event(self, key, variable):
if type(variable) != JSCode:
raise Exception('Events can only be of JSCode type.')
if key not in self.configuration['options']['allowed_events'] and key not in self.datagram_events:
raise Exception('Unsupported event: {0}.'.format(key))
rendered = variable.render(context=self.configuration)
return rendered
def process_objects(self, variable):
if type(variable) != d3jsObject:
raise Exception('Non-object passed as object argument.')
rendered = variable.render(context=self.configuration)
return rendered
def process_attribute(self, attribute):
if 'legend' not in attribute:
attribute['legend'] = False
if 'legend_location' not in attribute:
attribute['legend_location'] = None
if 'legend_orientation' not in attribute:
attribute['legend_orientation'] = None
return valmap(self.process_variable, attribute)
def process_colorable(self, colorable):
if 'domain' not in colorable:
colorable['domain'] = None
if 'n_colors' not in colorable or colorable['n_colors'] is None:
if colorable['domain'] is None:
# this is the seaborn default
colorable['n_colors'] = 6
else:
colorable['n_colors'] = len(colorable['domain'])
else:
if type(colorable['n_colors']) != int or colorable['n_colors'] < 1:
raise Exception('Number of colors must be an integer greater or equal than 1.')
if 'palette' in colorable and colorable['palette'] is not None:
if type(colorable['palette']) == str:
# a palette name
palette = color_palette(colorable['palette'], n_colors=colorable['n_colors'])
colorable['palette'] = list(map(rgb2hex, palette))
else:
# a list of colors. we override n_colors
colorable['palette'] = list(map(rgb2hex, colorable['palette']))
colorable['n_colors'] = len(colorable['palette'])
else:
colorable['palette'] = None
if 'legend' not in colorable:
colorable['legend'] = False
if 'legend_location' not in colorable:
colorable['legend_location'] = None
if 'legend_orientation' not in colorable:
colorable['legend_orientation'] = None
return valmap(self.process_variable, colorable)
def _render_(self, template_name='base.html', **extra_args):
repr_args = merge(self.configuration.copy(), extra_args)
if self.configuration['visualization_js']:
repr_args['visualization_js'] = env.get_template(self.configuration['visualization_js']).render(**repr_args)
else:
raise Exception('Empty Visualization code!')
if 'functions_js' in self.configuration and self.configuration['functions_js'] is not None:
repr_args['functions_js'] = env.get_template(self.configuration['functions_js']).render(**repr_args)
template = env.get_template(template_name)
if not 'figure_id' in repr_args or not repr_args['figure_id']:
repr_args['figure_id'] = 'fig-{0}'.format(uuid.uuid4())
if not 'vis_uuid' in repr_args or not repr_args['vis_uuid']:
repr_args['vis_uuid'] = 'datagram-vis-{0}'.format(uuid.uuid4())
if not 'define_js_module' in repr_args:
repr_args['define_js_module'] = True
if self.configuration['visualization_css']:
try:
repr_args['visualization_css'] = env.get_template(self.configuration['visualization_css']).render(**repr_args)
except IOError:
repr_args['visualization_css'] = None
# some dependencies have names with invalid characters for variable names in Javascript
repr_args['requirements_as_args'] = list(map(lambda x: x.replace('-', '_'), repr_args['requirements']))
# if there are defined events, we merge them here
repr_args['event_names'] = []
if 'allowed_events' in repr_args['options'] and repr_args['options']['allowed_events']:
repr_args['event_names'].extend(repr_args['options']['allowed_events'])
repr_args['event_names'].extend(self.datagram_events)
repr_args['event_names'] = list(set(repr_args['event_names']))
return template.render(**repr_args)
def _ipython_display_(self):
"""
Automatically displays the sketch when returned on a notebook cell.
"""
self.show()
def show(self, multiples=None):
"""
Displays the sketch on the notebook.
"""
if multiples == 'small-multiples':
template_name = 'multiples.html'
elif multiples == 'select-categories':
template_name = 'select-categories.html'
else:
template_name = 'base.html'
rendered = self._render_(template_name)
display_html(HTML(rendered))
return None
def scaffold(self, filename=None, define_js_module=True, style=None, append=False, author_comment=None):
rendered = self._render_('scaffold.js', define_js_module=define_js_module, author_comment=author_comment)
if filename is None:
return rendered
with open(filename, 'w') as f:
f.write(rendered)
if style is not None:
mode = 'a' if append is True else 'w'
with open(style, mode) as f:
f.write(env.get_template(self.configuration['visualization_css']).render(**self.configuration))
sketch_doc_string_template = jinja2.Template('''{{ summary }}
Data Arguments:
{% for key, value in data.items() %}{{ key }} -- (default: {{ value }})
{% endfor %}
{% if variables %}Keyword Arguments:
{% for key, value in variables.items() %}{{ key }} -- (default: {{ value }})
{% endfor %}{% endif %}
{% if options %}Sketch Arguments:
{% for key, value in options.items() %}{{ key }} -- (default: {{ value }})
{% endfor %}{% endif %}
{% if attributes %}Mappeable Attributes:
{% for key, value in attributes.items() %}{{ key }} -- (default: {{ value }})
{% endfor %}{% endif %}
{% if colorables %}Colorable Attributes:
{% for key, value in colorables.items() %}{{ key }} -- (default: {{ value }})
{% endfor %}{% endif %}
''')
def build_sketch(default_args, opt_process=None):
"""
Receives a visualization config and returns a sketch function that can be used to display or scaffold
visualizations.
The sketch function is not a visualization per-se. Instead, it gets called with parameters that can replace
or update the default configuration provided by default_args, and the updated configuration is used to
display/scaffold a visualization.
"""
def sketch_fn(**kwargs):
"""
This is the function executed each time a visualization is displayed or scaffolded.
The default arguments used when defining the visualization will be updated with those from the
keyword arguments. However, not all elements of default_args can be overwritten.
This also includes logic to decide when to update an entire setting or just a sub-element of it.
:param kwargs: arguments for the visualization.
:return: sketch
"""
sketch_args = copy.deepcopy(default_args)
for key, value in kwargs.items():
if key in sketch_args:
sketch_args[key] = value
elif key == 'events' and 'options' in sketch_args and 'allowed_events' in sketch_args['options']:
sketch_args[key] = value
elif key in sketch_args['data']:
sketch_args['data'][key] = value
elif key in sketch_args['options']:
if type(sketch_args['options'][key]) == dict:
sketch_args['options'][key].update(value)
else:
sketch_args['options'][key] = value
elif key in sketch_args['variables']:
if type(sketch_args['variables'][key]) == dict:
sketch_args['variables'][key].update(value)
else:
sketch_args['variables'][key] = value
elif 'attributes' in sketch_args and key in sketch_args['attributes']:
if type(value) == dict:
sketch_args['attributes'][key].update(value)
elif type(value) in (int, float):
sketch_args['attributes'][key]['value'] = value
elif value is None or type(value) == str:
sketch_args['attributes'][key]['value'] = value
else:
raise Exception('Not supported value for attribute {0}: {1}'.format(key, value))
elif 'colorables' in sketch_args and key in sketch_args['colorables']:
if type(value) == dict:
sketch_args['colorables'][key].update(value)
elif type(value) in (int, float):
sketch_args['colorables'][key]['value'] = value
elif value is None or type(value) == str:
sketch_args['colorables'][key]['value'] = value
else:
raise Exception('Not supported value for colorable {0}: {1}'.format(key, value))
elif key in ('figure_id', 'facets'):
sketch_args[key] = value
else:
raise Exception('Invalid argument: {0}'.format(key))
if callable(opt_process):
opt_process(sketch_args)
return sketch(**sketch_args)
if not 'summary' in default_args:
default_args['summary'] = default_args['visualization_name']
sketch_fn.__doc__ = sketch_doc_string_template.render(default_args)
sketch_fn.variable_names = list(default_args['data'].keys())
return sketch_fn
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import datetime
import logging
from django.db import models
from django.utils.translation import ugettext_lazy as _, ugettext
from django.utils.dateformat import format as date_format
from snisi_core.models.Periods import (MonthPeriod,
ONE_MICROSECOND_DELTA, ONE_MINUTE_DELTA)
from snisi_core.models.FixedWeekPeriods import (FixedMonthFirstWeek,
FixedMonthSecondWeek,
FixedMonthThirdWeek,
FixedMonthFourthWeek,
FixedMonthFifthWeek)
from snisi_tools.datetime import normalize_date
logger = logging.getLogger(__name__)
class MonthlyReportingManager(models.Manager):
def get_queryset(self):
return super(MonthlyReportingManager, self).get_queryset().filter(
period_type=DefaultMonthlyReportingPeriod.DMRP)
class DefaultMonthlyReportingPeriod(MonthPeriod):
DMRP = 'monthly_reporting'
class Meta:
proxy = True
app_label = 'snisi_core'
verbose_name = _("Monthly Reporting Period")
verbose_name_plural = _("Monthly Reporting Periods")
objects = MonthlyReportingManager()
@classmethod
def type(cls):
return cls.DMRP
@property
def pid(self):
return self.middle().strftime('MRP%m%Y')
def name(self):
# Translators: Django's date template format for MonthPeriod.name()
return date_format(self.middle(), ugettext("1-5 F Y"))
def full_name(self):
# Translators: Django's date tmpl format for MonthPeriod.full_name()
return date_format(self.middle(), ugettext("1st to 5th F Y"))
@classmethod
def delta(self):
return 31
@classmethod
def boundaries(cls, date_obj):
date_obj = normalize_date(date_obj, as_aware=True)
start = date_obj.replace(day=1, hour=0, minute=0,
second=0, microsecond=0)
end = start.replace(day=6) - ONE_MICROSECOND_DELTA
return (start, end)
def strid(self):
return self.middle().strftime('[1-5]-%m-%Y')
class MonthlyExtendedReportingManager(models.Manager):
def get_queryset(self):
return super(MonthlyExtendedReportingManager, self) \
.get_queryset().filter(
period_type=DefaultMonthlyExtendedReportingPeriod.DMERP)
class DefaultMonthlyExtendedReportingPeriod(MonthPeriod):
DMERP = 'monthly_extended_reporting'
class Meta:
proxy = True
app_label = 'snisi_core'
verbose_name = _("Monthly Extended Reporting Period")
verbose_name_plural = _("Monthly Extended Reporting Periods")
objects = MonthlyExtendedReportingManager()
@classmethod
def type(cls):
return cls.DMERP
@property
def pid(self):
return self.middle().strftime('MERP%m%Y')
def name(self):
# Translators: Django's date template format for MonthPeriod.name()
return date_format(self.middle(), ugettext("6-10 F Y"))
def full_name(self):
# Translators: Django's date tmpl format for MonthPeriod.full_name()
return date_format(self.middle(), ugettext("6th to 10th F Y"))
@classmethod
def delta(self):
return 31
@classmethod
def boundaries(cls, date_obj):
date_obj = normalize_date(date_obj, as_aware=True)
start = date_obj.replace(day=6, hour=0, minute=0,
second=0, microsecond=0)
end = start.replace(day=11) - ONE_MICROSECOND_DELTA
return (start, end)
def strid(self):
return self.middle().strftime('[6-10]-%m-%Y')
class FixedMonthFirstWeekReportingManager(models.Manager):
def get_queryset(self):
return super(FixedMonthFirstWeekReportingManager, self) \
.get_queryset().filter(
period_type=FixedMonthFirstWeekReportingPeriod.FWP)
class FixedMonthFirstWeekReportingPeriod(MonthPeriod):
FWP = 'fixed_month_first_week_reporting_period'
class Meta:
proxy = True
app_label = 'snisi_core'
verbose_name = _("Monthly Reporting Period")
verbose_name_plural = _("Monthly Reporting Periods")
objects = FixedMonthFirstWeekReportingManager()
@classmethod
def type(cls):
return cls.FWP
@property
def pid(self):
return self.middle().strftime('FM1WRP%m%Y')
def name(self):
# Translators: Django's date tmpl format for MonthPeriod.name()
return ugettext("W1/RP {}").format(date_format(
self.middle(), ugettext("F Y")))
def full_name(self):
# Translators: Django's date tmpl format for MonthPeriod.full_name()
return ugettext("W1/RP {}").format(date_format(
self.middle(), ugettext("F Y")))
@classmethod
def delta(self):
return 31
@classmethod
def boundaries(cls, date_obj):
fw = FixedMonthFirstWeek.find_create_by_date(
date_obj, dont_create=True)
start = fw.end_on + ONE_MINUTE_DELTA
end = start + datetime.timedelta(days=2) - ONE_MICROSECOND_DELTA
return (start, end)
def strid(self):
return "[W1/RP]-{}".format(self.middle().strftime('%m-%Y'))
class FixedMonthSecondWeekReportingManager(models.Manager):
def get_queryset(self):
return super(FixedMonthSecondWeekReportingManager, self) \
.get_queryset().filter(
period_type=FixedMonthSecondWeekReportingPeriod.FWP)
class FixedMonthSecondWeekReportingPeriod(MonthPeriod):
FWP = 'fixed_month_second_week_reporting_period'
class Meta:
proxy = True
app_label = 'snisi_core'
verbose_name = _("Monthly Reporting Period")
verbose_name_plural = _("Monthly Reporting Periods")
objects = FixedMonthSecondWeekReportingManager()
@classmethod
def type(cls):
return cls.FWP
@property
def pid(self):
return self.middle().strftime('FM2WRP%m%Y')
def name(self):
# Translators: Django's date tmpl format for MonthPeriod.name()
return ugettext("W2/RP {}").format(date_format(
self.middle(), ugettext("F Y")))
def full_name(self):
# Translators: Django's date tmpl format for MonthPeriod.full_name()
return ugettext("W2/RP {}").format(date_format(
self.middle(), ugettext("F Y")))
@classmethod
def delta(self):
return 31
@classmethod
def boundaries(cls, date_obj):
fw = FixedMonthSecondWeek.find_create_by_date(
date_obj, dont_create=True)
start = fw.end_on + ONE_MINUTE_DELTA
end = start + datetime.timedelta(days=2) - ONE_MICROSECOND_DELTA
return (start, end)
def strid(self):
return "[W2/RP]-{}".format(self.middle().strftime('%m-%Y'))
class FixedMonthThirdWeekReportingManager(models.Manager):
def get_queryset(self):
return super(FixedMonthThirdWeekReportingManager, self) \
.get_queryset().filter(
period_type=FixedMonthThirdWeekReportingPeriod.FWP)
class FixedMonthThirdWeekReportingPeriod(MonthPeriod):
FWP = 'fixed_month_third_week_reporting_period'
class Meta:
proxy = True
app_label = 'snisi_core'
verbose_name = _("Monthly Reporting Period")
verbose_name_plural = _("Monthly Reporting Periods")
objects = FixedMonthThirdWeekReportingManager()
@classmethod
def type(cls):
return cls.FWP
@property
def pid(self):
return self.middle().strftime('FM3WRP%m%Y')
def name(self):
# Translators: Django's date tmpl format for MonthPeriod.name()
return ugettext("W3/RP {}").format(date_format(
self.middle(), ugettext("F Y")))
def full_name(self):
# Translators: Django's date tmpl format for MonthPeriod.full_name()
return ugettext("W3/RP {}").format(date_format(
self.middle(), ugettext("F Y")))
@classmethod
def delta(self):
return 31
@classmethod
def boundaries(cls, date_obj):
fw = FixedMonthThirdWeek.find_create_by_date(
date_obj, dont_create=True)
start = fw.end_on + ONE_MINUTE_DELTA
end = start + datetime.timedelta(days=2) - ONE_MICROSECOND_DELTA
return (start, end)
def strid(self):
return "[W3/RP]-{}".format(self.middle().strftime('%m-%Y'))
class FixedMonthFourthWeekReportingManager(models.Manager):
def get_queryset(self):
return super(FixedMonthFourthWeekReportingManager, self) \
.get_queryset().filter(
period_type=FixedMonthFourthWeekReportingPeriod.FWP)
class FixedMonthFourthWeekReportingPeriod(MonthPeriod):
FWP = 'fixed_month_fourth_week_reporting_period'
class Meta:
proxy = True
app_label = 'snisi_core'
verbose_name = _("Monthly Reporting Period")
verbose_name_plural = _("Monthly Reporting Periods")
objects = FixedMonthFourthWeekReportingManager()
@classmethod
def type(cls):
return cls.FWP
@property
def pid(self):
return self.middle().strftime('FM4WRP%m%Y')
def name(self):
# Translators: Django's date tmpl format for MonthPeriod.name()
return ugettext("W4/RP {}").format(date_format(
self.middle(), ugettext("F Y")))
def full_name(self):
# Translators: Django's date tmpl format for MonthPeriod.full_name()
return ugettext("W4/RP {}").format(date_format(
self.middle(), ugettext("F Y")))
@classmethod
def delta(self):
return 31
@classmethod
def boundaries(cls, date_obj):
fw = FixedMonthFourthWeek.find_create_by_date(
date_obj, dont_create=True)
start = fw.end_on + ONE_MINUTE_DELTA
end = start + datetime.timedelta(days=2) - ONE_MICROSECOND_DELTA
return (start, end)
def strid(self):
return "[W4/RP]-{}".format(self.middle().strftime('%m-%Y'))
class FixedMonthFifthWeekReportingManager(models.Manager):
def get_queryset(self):
return super(FixedMonthFifthWeekReportingManager, self) \
.get_queryset().filter(
period_type=FixedMonthFifthWeekReportingPeriod.FWP)
class FixedMonthFifthWeekReportingPeriod(MonthPeriod):
FWP = 'fixed_month_fifth_week_reporting_period'
class Meta:
proxy = True
app_label = 'snisi_core'
verbose_name = _("Monthly Reporting Period")
verbose_name_plural = _("Monthly Reporting Periods")
objects = FixedMonthFifthWeekReportingManager()
@classmethod
def type(cls):
return cls.FWP
@property
def pid(self):
return self.middle().strftime('FM5WRP%m%Y')
def name(self):
# Translators: Django's date tmpl format for MonthPeriod.name()
return ugettext("W5/RP {}").format(date_format(
self.middle(), ugettext("F Y")))
def full_name(self):
# Translators: Django's date tmpl format for MonthPeriod.full_name()
return ugettext("W5/RP {}").format(date_format(
self.middle(), ugettext("F Y")))
@classmethod
def delta(self):
return 31
@classmethod
def boundaries(cls, date_obj):
fw = FixedMonthFifthWeek.find_create_by_date(
date_obj, dont_create=True)
start = fw.end_on + ONE_MINUTE_DELTA
end = start + datetime.timedelta(days=2) - ONE_MICROSECOND_DELTA
return (start, end)
def strid(self):
return "[W5/RP]-{}".format(self.middle().strftime('%m-%Y'))
class FixedMonthFirstWeekExtendedReportingManager(models.Manager):
def get_queryset(self):
return super(FixedMonthFirstWeekExtendedReportingManager, self) \
.get_queryset().filter(
period_type=FixedMonthFirstWeekExtendedReportingPeriod.FWP)
class FixedMonthFirstWeekExtendedReportingPeriod(MonthPeriod):
FWP = 'fixed_month_first_week_extended_reporting_period'
class Meta:
proxy = True
app_label = 'snisi_core'
verbose_name = _("Monthly Extended Reporting Period")
verbose_name_plural = _("Monthly ExtendedReporting Periods")
objects = FixedMonthFirstWeekExtendedReportingManager()
@classmethod
def type(cls):
return cls.FWP
@property
def pid(self):
return self.middle().strftime('FM1WERP%m%Y')
def name(self):
# Translators: Django's date tmpl format for MonthPeriod.name()
return ugettext("W1/ERP {}").format(date_format(
self.middle(), ugettext("F Y")))
def full_name(self):
# Translators: Django's date tmpl format for MonthPeriod.full_name()
return ugettext("W1/ERP {}").format(date_format(
self.middle(), ugettext("F Y")))
@classmethod
def delta(self):
return 31
@classmethod
def boundaries(cls, date_obj):
fw = FixedMonthFirstWeekReportingPeriod.find_create_by_date(
date_obj, dont_create=True)
start = fw.end_on + ONE_MINUTE_DELTA
end = start + datetime.timedelta(days=3) - ONE_MICROSECOND_DELTA
return (start, end)
def strid(self):
return "[W1/ERP]-{}".format(self.middle().strftime('%m-%Y'))
class FixedMonthSecondWeekExtendedReportingManager(models.Manager):
def get_queryset(self):
return super(FixedMonthSecondWeekExtendedReportingManager, self) \
.get_queryset().filter(
period_type=FixedMonthSecondWeekExtendedReportingPeriod.FWP)
class FixedMonthSecondWeekExtendedReportingPeriod(MonthPeriod):
FWP = 'fixed_month_second_week_extended_reporting_period'
class Meta:
proxy = True
app_label = 'snisi_core'
verbose_name = _("Monthly Extended Reporting Period")
verbose_name_plural = _("Monthly ExtendedReporting Periods")
objects = FixedMonthSecondWeekExtendedReportingManager()
@classmethod
def type(cls):
return cls.FWP
@property
def pid(self):
return self.middle().strftime('FM2WERP%m%Y')
def name(self):
# Translators: Django's date tmpl format for MonthPeriod.name()
return ugettext("W2/ERP {}").format(date_format(
self.middle(), ugettext("F Y")))
def full_name(self):
# Translators: Django's date tmpl format for MonthPeriod.full_name()
return ugettext("W2/ERP {}").format(date_format(
self.middle(), ugettext("F Y")))
@classmethod
def delta(self):
return 31
@classmethod
def boundaries(cls, date_obj):
fw = FixedMonthSecondWeekReportingPeriod.find_create_by_date(
date_obj, dont_create=True)
start = fw.end_on + ONE_MINUTE_DELTA
end = start + datetime.timedelta(days=3) - ONE_MICROSECOND_DELTA
return (start, end)
def strid(self):
return "[W2/ERP]-{}".format(self.middle().strftime('%m-%Y'))
class FixedMonthThirdWeekExtendedReportingManager(models.Manager):
def get_queryset(self):
return super(FixedMonthThirdWeekExtendedReportingManager, self) \
.get_queryset().filter(
period_type=FixedMonthThirdWeekExtendedReportingPeriod.FWP)
class FixedMonthThirdWeekExtendedReportingPeriod(MonthPeriod):
FWP = 'fixed_month_third_week_extended_reporting_period'
class Meta:
proxy = True
app_label = 'snisi_core'
verbose_name = _("Monthly Extended Reporting Period")
verbose_name_plural = _("Monthly ExtendedReporting Periods")
objects = FixedMonthThirdWeekExtendedReportingManager()
@classmethod
def type(cls):
return cls.FWP
@property
def pid(self):
return self.middle().strftime('FM3WERP%m%Y')
def name(self):
# Translators: Django's date tmpl format for MonthPeriod.name()
return ugettext("W3/ERP {}").format(date_format(
self.middle(), ugettext("F Y")))
def full_name(self):
# Translators: Django's date tmpl format for MonthPeriod.full_name()
return ugettext("W3/ERP {}").format(date_format(
self.middle(), ugettext("F Y")))
@classmethod
def delta(self):
return 31
@classmethod
def boundaries(cls, date_obj):
fw = FixedMonthThirdWeekReportingPeriod.find_create_by_date(
date_obj, dont_create=True)
start = fw.end_on + ONE_MINUTE_DELTA
end = start + datetime.timedelta(days=3) - ONE_MICROSECOND_DELTA
return (start, end)
def strid(self):
return "[W3/ERP]-{}".format(self.middle().strftime('%m-%Y'))
class FixedMonthFourthWeekExtendedReportingManager(models.Manager):
def get_queryset(self):
return super(FixedMonthFourthWeekExtendedReportingManager, self) \
.get_queryset().filter(
period_type=FixedMonthFourthWeekExtendedReportingPeriod.FWP)
class FixedMonthFourthWeekExtendedReportingPeriod(MonthPeriod):
FWP = 'fixed_month_fourth_week_extended_reporting_period'
class Meta:
proxy = True
app_label = 'snisi_core'
verbose_name = _("Monthly Extended Reporting Period")
verbose_name_plural = _("Monthly ExtendedReporting Periods")
objects = FixedMonthFourthWeekExtendedReportingManager()
@classmethod
def type(cls):
return cls.FWP
@property
def pid(self):
return self.middle().strftime('FM4WERP%m%Y')
def name(self):
# Translators: Django's date tmpl format for MonthPeriod.name()
return ugettext("W4/ERP {}").format(date_format(
self.middle(), ugettext("F Y")))
def full_name(self):
# Translators: Django's date tmpl format for MonthPeriod.full_name()
return ugettext("W4/ERP {}").format(date_format(
self.middle(), ugettext("F Y")))
@classmethod
def delta(self):
return 31
@classmethod
def boundaries(cls, date_obj):
fw = FixedMonthFourthWeekReportingPeriod.find_create_by_date(
date_obj, dont_create=True)
start = fw.end_on + ONE_MINUTE_DELTA
end = start + datetime.timedelta(days=3) - ONE_MICROSECOND_DELTA
return (start, end)
def strid(self):
return "[W4/ERP]-{}".format(self.middle().strftime('%m-%Y'))
class FixedMonthFifthWeekExtendedReportingManager(models.Manager):
def get_queryset(self):
return super(FixedMonthFifthWeekExtendedReportingManager, self) \
.get_queryset().filter(
period_type=FixedMonthFifthWeekExtendedReportingPeriod.FWP)
class FixedMonthFifthWeekExtendedReportingPeriod(MonthPeriod):
FWP = 'fixed_month_fifth_week_extended_reporting_period'
class Meta:
proxy = True
app_label = 'snisi_core'
verbose_name = _("Monthly Extended Reporting Period")
verbose_name_plural = _("Monthly ExtendedReporting Periods")
objects = FixedMonthFifthWeekExtendedReportingManager()
@classmethod
def type(cls):
return cls.FWP
@property
def pid(self):
return self.middle().strftime('FM5WERP%m%Y')
def name(self):
# Translators: Django's date tmpl format for MonthPeriod.name()
return ugettext("W5/ERP {}").format(date_format(
self.middle(), ugettext("F Y")))
def full_name(self):
# Translators: Django's date tmpl format for MonthPeriod.full_name()
return ugettext("W5/ERP {}").format(date_format(
self.middle(), ugettext("F Y")))
@classmethod
def delta(self):
return 31
@classmethod
def boundaries(cls, date_obj):
fw = FixedMonthFifthWeekReportingPeriod.find_create_by_date(
date_obj, dont_create=True)
start = fw.end_on + ONE_MINUTE_DELTA
end = start + datetime.timedelta(days=3) - ONE_MICROSECOND_DELTA
return (start, end)
def strid(self):
return "[W5/ERP]-{}".format(self.middle().strftime('%m-%Y'))
|
|
"""Assertion based test cases for monotable.table.MonoBlock."""
import pytest
import monotable.table
NOT_SPECIFIED = monotable.table.NOT_SPECIFIED
LEFT = monotable.alignment.LEFT
CENTER = monotable.alignment.CENTER
RIGHT = monotable.alignment.RIGHT
TOP = monotable.alignment.TOP
CENTER_TOP = monotable.alignment.CENTER_TOP
CENTER_BOTTOM = monotable.alignment.CENTER_BOTTOM
BOTTOM = monotable.alignment.BOTTOM
#
# Tests for MonoBlock.__init__().
#
def check_empty_instance(mb):
"""Helper- Make sure empty MonoBlock instance is properly initialized."""
assert mb.height == 1
assert mb.width == 0
assert mb.lines == ['']
def test_init_no_args():
mb = monotable.table.MonoBlock()
assert mb._halign == LEFT
check_empty_instance(mb)
def test_init_empty_string():
text = ''
mb = monotable.table.MonoBlock(text)
assert mb._halign == LEFT
check_empty_instance(mb)
def test_init_solo_newline():
mb = monotable.table.MonoBlock('\n')
assert mb._halign == LEFT
check_empty_instance(mb)
def test_init_halign_left():
mb = monotable.table.MonoBlock(halign=LEFT)
assert mb._halign == LEFT
check_empty_instance(mb)
def test_init_halign_center():
mb = monotable.table.MonoBlock(halign=CENTER)
assert mb._halign == CENTER
check_empty_instance(mb)
def test_init_halign_right():
mb = monotable.table.MonoBlock(halign=RIGHT)
assert mb._halign == RIGHT
check_empty_instance(mb)
def test_init_illegal_halign():
# Note- Does not test for newline between the lines.
bad_msg_start = 'Expected a horizontal align value, got:'
bad_msg_end = 'Allowed values are: _NOT_SPECIFIED, _LEFT, _CENTER, _RIGHT'
with pytest.raises(AssertionError) as excinfo:
_ = monotable.table.MonoBlock(halign=5)
assert str(excinfo.value).startswith(bad_msg_start)
assert '5' in str(excinfo.value)
assert bad_msg_end in str(excinfo.value)
with pytest.raises(AssertionError) as excinfo:
_ = monotable.table.MonoBlock(halign=-1)
assert str(excinfo.value).startswith(bad_msg_start)
assert '-1' in str(excinfo.value)
assert bad_msg_end in str(excinfo.value)
def test_init_blank_char():
text = ' '
mb = monotable.table.MonoBlock(text)
assert mb._halign == LEFT
assert mb.height == 1
assert mb.width == len(text)
assert mb.lines == [text]
def test_init_one_line():
text = ' just one line . ' # leading and trailing spaces
mb = monotable.table.MonoBlock(text)
assert mb._halign == LEFT
assert mb.height == 1
assert mb.width == len(text)
assert mb.lines == [text]
def test_init_double_newline():
mb = monotable.table.MonoBlock('\n\n')
assert mb._halign == LEFT
assert mb.height == 2
assert mb.width == 0
assert mb.lines == ['', '']
def test_init_triple_newline():
mb = monotable.table.MonoBlock('\n\n\n', CENTER)
assert mb._halign == CENTER
assert mb.height == 3
assert mb.width == 0
assert mb.lines == ['', '', '']
def test_init_multi_line():
mb = monotable.table.MonoBlock('a\nbc\ndef\ng\nhij', LEFT)
assert mb._halign == LEFT
assert mb.height == 5
assert mb.width == 3
assert mb.lines == ['a', 'bc', 'def', 'g', 'hij']
def test_init_multi_newline():
mb = monotable.table.MonoBlock('\n\na\nbc\ndef\ng\nhij\n\n', RIGHT)
assert mb._halign == RIGHT
assert mb.height == 8
assert mb.width == 3
assert mb.lines == ['', '', 'a', 'bc', 'def', 'g', 'hij', '']
#
# Tests for MonoBlock.__str__().
#
def test_str():
lines = ['ab hijk', '', 'm', ' n', '', 'p ']
text = '\n'.join(lines)
mb = monotable.table.MonoBlock(text)
assert str(mb) == text
def test_str_empty():
mb = monotable.table.MonoBlock()
assert str(mb) == ''
def test_str_empty_last_line():
mb = monotable.table.MonoBlock('\n\n')
assert str(mb) == '\n'
def test_trailing_whitespace():
mb = monotable.table.MonoBlock('abc ')
assert str(mb) == 'abc '
mb = monotable.table.MonoBlock('abc \n \t')
assert str(mb) == 'abc \n \t'
#
# Tests for MonoBlock.is_all_spaces().
#
def test_is_all_spaces():
mb = monotable.table.MonoBlock('')
assert mb.is_all_spaces()
mb = monotable.table.MonoBlock('\n')
assert mb.is_all_spaces()
mb = monotable.table.MonoBlock(' ')
assert mb.is_all_spaces()
mb = monotable.table.MonoBlock(' \n\n ')
assert mb.is_all_spaces()
mb = monotable.table.MonoBlock('a')
assert not mb.is_all_spaces()
mb = monotable.table.MonoBlock(' \na\n')
assert not mb.is_all_spaces()
mb = monotable.table.MonoBlock('a\n ')
assert not mb.is_all_spaces()
#
# Lines for MonoBlock instances for horizontal justification tests
# and list of them.
#
HORIZONTAL_LINE0 = 'hijk'
HORIZONTAL_LINE1 = ' t u'
HORIZONTAL_LINE2 = 'r '
HORIZONTAL_LINE3 = ' s'
HORIZONTAL_LINES = [HORIZONTAL_LINE0, HORIZONTAL_LINE1,
HORIZONTAL_LINE2, HORIZONTAL_LINE3]
HORIZONTAL_TEXT = '\n'.join(HORIZONTAL_LINES)
def make_instance_for_horizontal_tests(justification):
"""Create a MonoBlock instance with text from JUSTIFY_LINES.
justification
Left, center, right, or not specified alignment for justification.
"""
return monotable.table.MonoBlock(HORIZONTAL_TEXT, justification)
#
# Tests for MonoBlock.chop_to_fieldsize().
#
def test_illegal_fieldsize():
mb = make_instance_for_horizontal_tests(NOT_SPECIFIED)
msg = 'nonsense to truncate to fieldsize=0 or less'
with pytest.raises(AssertionError) as excinfo:
mb.chop_to_fieldsize(-1)
assert str(excinfo.value).startswith(msg)
with pytest.raises(AssertionError) as excinfo:
mb.chop_to_fieldsize(0)
assert str(excinfo.value).startswith(msg)
def test_fieldsize_is_1_no_marker():
mb = make_instance_for_horizontal_tests(NOT_SPECIFIED)
mb.chop_to_fieldsize(1)
assert mb.lines == ['h', ' ', 'r', ' ']
def test_fieldsize_is_1():
mb = make_instance_for_horizontal_tests(NOT_SPECIFIED)
mb.chop_to_fieldsize(1, more_marker='#')
assert mb.lines == ['#', '#', '#', '#']
def test_fieldsize_is_1_wider_marker():
mb = make_instance_for_horizontal_tests(NOT_SPECIFIED)
mb.chop_to_fieldsize(1, more_marker='##')
assert mb.lines == ['#', '#', '#', '#']
def test_fieldsize_is_3():
mb = make_instance_for_horizontal_tests(NOT_SPECIFIED)
mb.chop_to_fieldsize(3, more_marker='#')
assert mb.lines == ['hi#', ' t#', 'r ', ' s']
def test_fieldsize_is_width():
mb = make_instance_for_horizontal_tests(NOT_SPECIFIED)
mb.chop_to_fieldsize(mb.width, more_marker='#')
assert mb.lines == HORIZONTAL_LINES
def test_fieldsize_is_over_width():
mb = make_instance_for_horizontal_tests(NOT_SPECIFIED)
mb.chop_to_fieldsize(
mb.width + 2, more_marker='#')
assert mb.lines == HORIZONTAL_LINES
#
# Tests for MonoBlock.hjustify() of instance set to left justification.
#
def test_left_justify():
mb = make_instance_for_horizontal_tests(LEFT)
mb.hjustify()
assert mb.lines == [HORIZONTAL_LINE0.ljust(4),
HORIZONTAL_LINE1.ljust(4),
HORIZONTAL_LINE2.ljust(4),
HORIZONTAL_LINE3.ljust(4)]
def test_left_justify_into_larger():
mb = make_instance_for_horizontal_tests(LEFT)
mb.hjustify(5)
assert mb.lines == [HORIZONTAL_LINE0.ljust(5),
HORIZONTAL_LINE1.ljust(5),
HORIZONTAL_LINE2.ljust(5),
HORIZONTAL_LINE3.ljust(5)]
def test_left_justify_into_smaller_ignored():
mb = make_instance_for_horizontal_tests(LEFT)
mb.hjustify(2)
assert mb.lines == [HORIZONTAL_LINE0.ljust(4),
HORIZONTAL_LINE1.ljust(4),
HORIZONTAL_LINE2.ljust(4),
HORIZONTAL_LINE3.ljust(4)]
#
# Tests for MonoBlock.hjustify() of instance set to center justification.
#
def test_center_justify():
mb = make_instance_for_horizontal_tests(CENTER)
mb.hjustify()
assert mb.lines == [HORIZONTAL_LINE0.center(4),
HORIZONTAL_LINE1.center(4),
HORIZONTAL_LINE2.center(4),
HORIZONTAL_LINE3.center(4)]
def test_center_justify_into_larger():
mb = make_instance_for_horizontal_tests(CENTER)
mb.hjustify(5)
assert mb.lines == [HORIZONTAL_LINE0.center(5),
HORIZONTAL_LINE1.center(5),
HORIZONTAL_LINE2.center(5),
HORIZONTAL_LINE3.center(5)]
def test_center_justify_into_smaller_ignored():
mb = make_instance_for_horizontal_tests(CENTER)
mb.hjustify(2)
assert mb.lines == [HORIZONTAL_LINE0.center(4),
HORIZONTAL_LINE1.center(4),
HORIZONTAL_LINE2.center(4),
HORIZONTAL_LINE3.center(4)]
#
# Tests for MonoBlock.hjustify() of instance set to right justification.
#
def test_right_justify():
mb = make_instance_for_horizontal_tests(RIGHT)
mb.hjustify()
assert mb.lines == [HORIZONTAL_LINE0.rjust(4),
HORIZONTAL_LINE1.rjust(4),
HORIZONTAL_LINE2.rjust(4),
HORIZONTAL_LINE3.rjust(4)]
def test_right_justify_into_larger():
mb = make_instance_for_horizontal_tests(RIGHT)
mb.hjustify(5)
assert mb.lines == [HORIZONTAL_LINE0.rjust(5),
HORIZONTAL_LINE1.rjust(5),
HORIZONTAL_LINE2.rjust(5),
HORIZONTAL_LINE3.rjust(5)]
def test_right_justify_into_smaller_ignored():
mb = make_instance_for_horizontal_tests(RIGHT)
mb.hjustify(2)
assert mb.lines == [HORIZONTAL_LINE0.rjust(4),
HORIZONTAL_LINE1.rjust(4),
HORIZONTAL_LINE2.rjust(4),
HORIZONTAL_LINE3.rjust(4)]
#
# Tests for MonoBlock.vjustify().
#
# The following tests that call MonoBlock.vjustify are run on
# MonoBlocks initialized arbitrarily for left, center, and right justification.
# The tests should work for any horizontal justification.
#
def test_illegal_height():
mb = make_instance_for_horizontal_tests(RIGHT)
msg = 'nonsense if less than 1 line in result'
with pytest.raises(AssertionError) as excinfo:
mb.vjustify(TOP, 0)
assert str(excinfo.value).startswith(msg)
with pytest.raises(AssertionError) as excinfo:
mb.vjustify(TOP, -1)
assert str(excinfo.value).startswith(msg)
def test_top_justify():
mb = make_instance_for_horizontal_tests(LEFT)
mb.vjustify(TOP, 6)
assert mb.height == 6
assert mb.width == 4
assert mb.lines == [HORIZONTAL_LINE0, HORIZONTAL_LINE1,
HORIZONTAL_LINE2, HORIZONTAL_LINE3,
' ', ' ']
def test_illegal_vertical_justify():
# Note- Only tests the first part of the exception string.
mb = make_instance_for_horizontal_tests(LEFT)
msg = 'Expected a vertical align value, got:'
with pytest.raises(AssertionError) as excinfo:
mb.vjustify(-1, 6)
assert str(excinfo.value).startswith(msg)
with pytest.raises(AssertionError) as excinfo:
mb.vjustify(0, 6)
assert str(excinfo.value).startswith(msg)
with pytest.raises(AssertionError) as excinfo:
mb.vjustify(19, 6)
assert str(excinfo.value).startswith(msg)
with pytest.raises(AssertionError) as excinfo:
mb.vjustify(25, 6)
assert str(excinfo.value).startswith(msg)
def test_center_top_justify():
mb = make_instance_for_horizontal_tests(RIGHT)
mb.vjustify(CENTER_TOP, 7)
assert mb.height == 7
assert mb.width == 4
assert mb.lines == [' ',
HORIZONTAL_LINE0, HORIZONTAL_LINE1,
HORIZONTAL_LINE2, HORIZONTAL_LINE3,
' ', ' ']
def test_center_bottom_justify():
mb = make_instance_for_horizontal_tests(RIGHT)
mb.vjustify(CENTER_BOTTOM, 7)
assert mb.height == 7
assert mb.width == 4
assert mb.lines == [' ', ' ',
HORIZONTAL_LINE0, HORIZONTAL_LINE1, HORIZONTAL_LINE2,
HORIZONTAL_LINE3,
' ']
def test_bottom_justify():
mb = make_instance_for_horizontal_tests(RIGHT)
mb.vjustify(BOTTOM, 5)
assert mb.height == 5
assert mb.width == 4
assert mb.lines == [' ',
HORIZONTAL_LINE0, HORIZONTAL_LINE1,
HORIZONTAL_LINE2, HORIZONTAL_LINE3]
#
# Tests for MonoBlock.vjustify().
#
#
# Lines for MonoBlock instances for vertical justification tests
# and list of them.
#
VERTICAL_LINE0 = 'hijk'
VERTICAL_LINE1 = 't'
VERTICAL_LINE2 = 'r '
VERTICAL_LINE3 = ' s'
VERTICAL_TEXT = '\n'.join([VERTICAL_LINE0, VERTICAL_LINE1,
VERTICAL_LINE2, VERTICAL_LINE3])
def make_instance_for_vertical_tests(justification):
"""Create a MonoBlock instance with text from VERTICAL_LINES.
justification
Left, center, right, or not specified alignment for justification.
"""
return monotable.table.MonoBlock(VERTICAL_TEXT, justification)
def test_vertical_truncate_no_marker():
mb = make_instance_for_vertical_tests(RIGHT)
mb.vjustify(TOP, 3)
assert mb.height == 3
assert mb.width == 4
assert mb.lines == [VERTICAL_LINE0, VERTICAL_LINE1, 'r ']
def test_vertical_truncate_1_char_marker():
mb = make_instance_for_vertical_tests(RIGHT)
mb.vjustify(TOP, 3, more_marker='#')
assert mb.height == 3
assert mb.width == 4
assert mb.lines == [VERTICAL_LINE0, VERTICAL_LINE1, 'r #']
def test_vertical_truncate_2_char_marker():
mb = make_instance_for_vertical_tests(RIGHT)
mb.vjustify(TOP, 3, more_marker='##')
assert mb.height == 3
assert mb.width == 4
assert mb.lines == [VERTICAL_LINE0, VERTICAL_LINE1, 'r ##']
def test_vertical_truncate_3_char_marker():
mb = make_instance_for_vertical_tests(RIGHT)
mb.vjustify(TOP, 3, more_marker='###')
assert mb.height == 3
assert mb.width == 4
assert mb.lines == [VERTICAL_LINE0, VERTICAL_LINE1, 'r###']
def test_vertical_truncate_4_char_marker():
mb = make_instance_for_vertical_tests(RIGHT)
mb.vjustify(TOP, 3, more_marker='####')
assert mb.height == 3
assert mb.width == 4
assert mb.lines == [VERTICAL_LINE0, VERTICAL_LINE1, '####']
def test_vertical_truncate_wider_marker():
mb = make_instance_for_vertical_tests(RIGHT)
mb.vjustify(TOP, 3, more_marker='#####')
assert mb.height == 3
assert mb.width == 4
assert mb.lines == [VERTICAL_LINE0, VERTICAL_LINE1, '####']
def test_vertical_truncate_short_line():
mb = make_instance_for_vertical_tests(RIGHT)
mb.vjustify(TOP, 2, more_marker='###')
assert mb.height == 2
assert mb.width == 4
assert mb.lines == [VERTICAL_LINE0, 't###']
#
# Tests for MonoBlock.add_border().
#
def test_add_border_negative_hmargin_ignored():
mb = monotable.table.MonoBlock()
mb.add_border(hmargin=-1)
assert str(mb) == '++\n||\n++'
def test_add_border_negative_vmargin_ignored():
mb = monotable.table.MonoBlock()
mb.add_border(hmargin=0, vmargin=-1)
assert str(mb) == '++\n||\n++'
def test_add_border_empty_monoblock():
mb = monotable.table.MonoBlock()
mb.add_border(hmargin=0)
assert str(mb) == '++\n||\n++'
def test_add_border_1_char_monoblock():
mb = monotable.table.MonoBlock('b')
mb.add_border(hmargin=0, border_chars='1234')
assert str(mb) == '414\n3b3\n424'
def test_add_border_no_justify():
mb = monotable.table.MonoBlock('ABCDE\nF\nG')
mb.add_border()
assert str(mb) == '\n'.join(['+-------+',
'| ABCDE |',
'| F |',
'| G |',
'+-------+'])
def test_add_border_extra_border_chars():
mb = monotable.table.MonoBlock('ABCDE\nF\nG')
mb.add_border(border_chars='--|+=XXXXXX')
assert str(mb) == '\n'.join(['+-------+',
'| ABCDE |',
'| F |',
'| G |',
'+-------+'])
def test_add_border_fewer_border_chars():
mb = monotable.table.MonoBlock('ABCDE\nF\nG')
mb.add_border(border_chars='_')
assert str(mb) == '\n'.join(['+_______+',
'+ ABCDE +',
'+ F +',
'+ G +',
'+++++++++'])
def test_add_border_justified():
mb = monotable.table.MonoBlock('ABCDE\nF\nG', RIGHT)
mb.add_border()
assert str(mb) == '\n'.join(['+-------+',
'| ABCDE |',
'| F |',
'| G |',
'+-------+'])
def test_add_border_bigger_margins():
mb = monotable.table.MonoBlock('ABCDE\nF\nG', CENTER)
mb.add_border(hmargin=2, vmargin=3)
assert str(mb) == '\n'.join(['+---------+',
'| |',
'| |',
'| |',
'| ABCDE |',
'| F |',
'| G |',
'| |',
'| |',
'| |',
'+---------+'])
#
# Tests for MonoBlock.remove_top_line().
#
def test_remove_top_line_empty_monoblock():
mb = monotable.table.MonoBlock()
mb.remove_top_line()
assert mb.height == 1
assert mb.width == 0
assert str(mb) == ''
def test_remove_top_line_one_line_monoblock():
mb = monotable.table.MonoBlock('abc')
mb.remove_top_line()
assert mb.height == 1
assert mb.width == 0
assert str(mb) == ''
def test_remove_top_line_multiline_monoblock():
mb = monotable.table.MonoBlock('ABCDE\nF\nG', RIGHT)
mb.remove_top_line()
assert str(mb) == 'F\nG'
#
# Tests for MonoBlock.remove_left_column().
#
def test_remove_left_column_zero_width_monoblock():
mb = monotable.table.MonoBlock()
mb.remove_left_column()
assert mb.height == 1
assert mb.width == 0
assert str(mb) == ''
def test_remove_left_column_one_line_monoblock():
mb = monotable.table.MonoBlock('abc')
mb.remove_left_column()
assert mb.height == 1
assert mb.width == 2
assert str(mb) == 'bc'
def test_remove_left_column_multiline_monoblock():
mb = monotable.table.MonoBlock('ABCDE\nF\nG', RIGHT)
mb.remove_left_column()
assert str(mb) == 'BCDE\n\n'
|
|
import logging
import datetime
import json
import operator
from urlparse import urlparse
from util import cached_property
from util import dict_from_dir
from totalimpactwebapp import db
from totalimpactwebapp import json_sqlalchemy
logger = logging.getLogger("ti.biblio")
def matches_biblio(product1, product2):
biblio1 = product1.clean_biblio_dedup_dict
biblio2 = product2.clean_biblio_dedup_dict
is_equivalent = False
if biblio1["title"]==biblio2["title"]:
if biblio1["genre"]==biblio2["genre"]:
if biblio1["is_preprint"]==biblio2["is_preprint"]:
is_equivalent = True
return is_equivalent
class BiblioRow(db.Model):
__tablename__ = 'biblio'
tiid = db.Column(db.Text, db.ForeignKey('item.tiid'), primary_key=True)
provider = db.Column(db.Text, primary_key=True)
biblio_name = db.Column(db.Text, primary_key=True)
biblio_value = db.Column(json_sqlalchemy.JSONAlchemy(db.Text))
collected_date = db.Column(db.DateTime())
def __init__(self, **kwargs):
if "collected_date" not in kwargs:
self.collected_date = datetime.datetime.utcnow()
super(BiblioRow, self).__init__(**kwargs)
#if aliases.best_url is not None:
# self.url = aliases.best_url
@cached_property
def is_good_choice(self):
if self.biblio_name=="title":
try:
if self.biblio_value.isupper():
return False
except AttributeError: #some titles are ints, apparently
return False
return True
@cached_property
def sort_score(self):
score = {
"user_provided":0,
"crossref":1,
"pubmed":2,
"mendeley":3,
"webpage":99
}
return score.get(self.provider, 50)
def best_biblio_row(biblio_rows, field):
matching_biblio_rows = [row for row in biblio_rows if row.biblio_name==field]
if not matching_biblio_rows:
return None
matching_biblio_rows.sort(key=operator.attrgetter('sort_score'))
best_matching_row = next((row for row in matching_biblio_rows if row.is_good_choice), None)
# if no good choice, just pick the first one
if not best_matching_row:
best_matching_row = matching_biblio_rows[0]
return best_matching_row
class Biblio(object):
def __init__(self, biblio_rows):
# build out the properties of this object
biblio_name_fields = set([row.biblio_name for row in biblio_rows])
for field in biblio_name_fields:
row = best_biblio_row(biblio_rows, field)
if row:
setattr(self, row.biblio_name, row.biblio_value)
@cached_property
def display_year(self):
try:
return str(self.year)
except (AttributeError, UnicodeEncodeError):
return None
@cached_property
def calculated_genre(self):
if hasattr(self, "genre") and self.genre:
if self.genre not in ["undefined", "other"]:
return self.genre
if hasattr(self, "journal") and self.journal:
return "article"
return None
@cached_property
def calculated_host(self):
try:
return self.repository.split(" ")[0].lower()
except AttributeError:
return None
@cached_property
def display_authors(self):
try:
auths = ",".join(self.authors.split(",")[0:3])
if auths.isupper():
auths = auths.title()
if len(auths) < len(self.authors):
auths += " et al."
except AttributeError:
auths = None
return auths
@cached_property
def author_list(self):
try:
auth_list = self.authors.split(",")
except AttributeError:
auth_list = []
ret = []
for auth in auth_list:
my_auth = auth.strip()
try:
if my_auth.isupper():
my_auth = my_auth.title()
except AttributeError:
pass
ret.append(my_auth)
return ret
@cached_property
def display_title(self):
try:
ret = self.title
except AttributeError:
ret = "no title available"
try:
if ret.isupper():
ret = ret.title()
except AttributeError: #some titles are ints, apparently
pass
return ret
@cached_property
def display_host(self):
try:
return self.journal
except AttributeError:
try:
return self.repository
except AttributeError:
return ''
@cached_property
def free_fulltext_host(self):
try:
return self._get_url_host(self.free_fulltext_url)
except AttributeError:
return None
def _get_url_host(self, url):
# this should actually be done upstream, where we have a list of
# free-fulltext DOI fragments. this quick hack gets a few for now.
parsed = urlparse(url)
if parsed.path.startswith("/10.1371"):
host = "Public Library of Science"
elif parsed.path.startswith("/10.6084"):
host = "figshare"
elif parsed.netloc == "www.ncbi.nlm.nih.gov/":
host = "PubMed Central"
else:
host = parsed.netloc
return host
def to_dict(self):
attributes_to_ignore = [
"rows",
"dedup_key"
]
ret = dict_from_dir(self, attributes_to_ignore)
return ret
|
|
from datetime import datetime
from datetime import timezone
import pytest
import responses
import chwrapper
class TestSearch:
"""the Service.rate_limit decorator"""
current_timestamp = int(datetime.timestamp(datetime.now(timezone.utc)))
s = chwrapper.Search(access_token="pk.test")
with open("tests/results.json") as results:
results = results.read()
@responses.activate
def test_company_search(self):
"Searching by company name works"
responses.add(
responses.GET,
"https://api.companieshouse.gov.uk/search/companies?"
+ "access_token=pk.test&q=Python",
match_querystring=True,
status=200,
body=self.results,
content_type="application/json",
adding_headers={"X-Ratelimit-Remain": "10", "X-Ratelimit-Reset": "{}".format(self.current_timestamp)},
)
res = self.s.search_companies("Python")
assert res.status_code == 200
assert sorted(res.json().keys()) == [
"items",
"items_per_page",
"kind",
"page_number",
"start_index",
"total_results",
]
assert sorted(res.json()["items"][0].keys()) == [
"address",
"company_number",
"company_status",
"company_type",
"date_of_cessation",
"date_of_creation",
"description",
"description_identifier",
"kind",
"links",
"matches",
"snippet",
"title",
]
@responses.activate
def test_officer_appointments(self):
"""Searching for appointments by officer ID works."""
responses.add(
responses.GET,
"https://api.companieshouse.gov.uk/officers/12345/"
+ "appointments?access_token=pk.test",
match_querystring=True,
status=200,
body=self.results,
content_type="application/json",
adding_headers={"X-Ratelimit-Remain": "10", "X-Ratelimit-Reset": "{}".format(self.current_timestamp)},
)
res = self.s.appointments("12345")
assert res.status_code == 200
@responses.activate
def test_officer_search(self):
"""Searching by officer name works."""
responses.add(
responses.GET,
"https://api.companieshouse.gov.uk/search/officers?"
+ "access_token=pk.test&q=John",
match_querystring=True,
status=200,
body=self.results,
content_type="application/json",
adding_headers={"X-Ratelimit-Remain": "10", "X-Ratelimit-Reset": "{}".format(self.current_timestamp)},
)
res = chwrapper.Search(access_token="pk.test").search_officers("John")
assert res.status_code == 200
@responses.activate
def test_disqualified_officer_search(self):
"""Searching for disqualified officer by name works."""
responses.add(
responses.GET,
"https://api.companieshouse.gov.uk/search/disqualified-officers?"
+ "access_token=pk.test&q=John",
match_querystring=True,
status=200,
body=self.results,
content_type="application/json",
adding_headers={"X-Ratelimit-Remain": "10", "X-Ratelimit-Reset": "{}".format(self.current_timestamp)},
)
res = self.s.search_officers("John", disqualified=True)
assert res.status_code == 200
@responses.activate
def test_company_profile(self):
"""Getting a company profile works"""
responses.add(
responses.GET,
"https://api.companieshouse.gov.uk/company/12345?access_token=pk.test",
match_querystring=True,
status=200,
body=self.results,
content_type="application/json",
adding_headers={"X-Ratelimit-Remain": "10", "X-Ratelimit-Reset": "{}".format(self.current_timestamp)},
)
res = self.s.profile("12345")
assert res.status_code == 200
@responses.activate
def test_search_officers(self):
"""Searching for officers by company number works"""
responses.add(
responses.GET,
"https://api.companieshouse.gov.uk/company/12345/officers?"
+ "access_token=pk.test",
match_querystring=True,
status=200,
body=self.results,
content_type="application/json",
adding_headers={"X-Ratelimit-Remain": "10", "X-Ratelimit-Reset": "{}".format(self.current_timestamp)},
)
res = chwrapper.Search(access_token="pk.test").officers("12345")
assert res.status_code == 200
@responses.activate
def test_filing_history(self):
"""Searching for filing history works"""
responses.add(
responses.GET,
"https://api.companieshouse.gov.uk/company/12345/"
+ "filing-history?access_token=pk.test",
match_querystring=True,
status=200,
body=self.results,
content_type="application/json",
adding_headers={"X-Ratelimit-Remain": "10", "X-Ratelimit-Reset": "{}".format(self.current_timestamp)},
)
res = self.s.filing_history("12345")
assert res.status_code == 200
@responses.activate
def test_filing_transaction(self):
"""Searching for a specific filing transaction works"""
responses.add(
responses.GET,
"https://api.companieshouse.gov.uk/company/12345/"
+ "filing-history/6789jhefD?access_token=pk.test",
match_querystring=True,
status=200,
body=self.results,
content_type="application/json",
adding_headers={"X-Ratelimit-Remain": "10", "X-Ratelimit-Reset": "{}".format(self.current_timestamp)},
)
res = self.s.filing_history("12345", transaction="6789jhefD")
assert res.status_code == 200
@responses.activate
def test_insolvency(self):
"""Searching for an insolvency works"""
responses.add(
responses.GET,
"https://api.companieshouse.gov.uk/company/12345/"
+ "insolvency?access_token=pk.test",
match_querystring=True,
status=200,
body=self.results,
content_type="application/json",
adding_headers={"X-Ratelimit-Remain": "10", "X-Ratelimit-Reset": "{}".format(self.current_timestamp)},
)
res = self.s.insolvency("12345")
assert res.status_code == 200
@responses.activate
def test_charges(self):
"""Searching for a charge works"""
responses.add(
responses.GET,
"https://api.companieshouse.gov.uk/company/"
+ "12345/charges?access_token=pk.test",
match_querystring=True,
status=200,
body=self.results,
content_type="application/json",
adding_headers={"X-Ratelimit-Remain": "10", "X-Ratelimit-Reset": "{}".format(self.current_timestamp)},
)
res = self.s.charges("12345")
assert res.status_code == 200
responses.add(
responses.GET,
"https://api.companieshouse.gov.uk/company/"
+ "12345/charges/6789jhefD?access_token=pk.test",
match_querystring=True,
status=200,
body=self.results,
content_type="application/json",
adding_headers={"X-Ratelimit-Remain": "10", "X-Ratelimit-Reset": "{}".format(self.current_timestamp)},
)
res_charge = self.s.charges("12345", charge_id="6789jhefD")
assert res_charge.status_code == 200
@responses.activate
def test_registered_office(self):
"""Searching for a company's registered address works"""
responses.add(
responses.GET,
"https://api.companieshouse.gov.uk/company/12345/"
+ "registered-office-address?access_token=pk.test",
match_querystring=True,
status=200,
body=self.results,
content_type="application/json",
adding_headers={"X-Ratelimit-Remain": "10", "X-Ratelimit-Reset": "{}".format(self.current_timestamp)},
)
res = self.s.address("12345")
assert res.status_code == 200
@responses.activate
def test_disqualified_natural(self):
"""Get disqualified natural officers"""
responses.add(
responses.GET,
"https://api.companieshouse.gov.uk/disqualified-officers/natural/"
+ "1234?access_token=pk.test",
match_querystring=True,
status=200,
body=self.results,
content_type="application/json",
adding_headers={"X-Ratelimit-Remain": "10", "X-Ratelimit-Reset": "{}".format(self.current_timestamp)},
)
res = self.s.disqualified("1234")
assert res.status_code == 200
@responses.activate
def test_disqualified_corporate(self):
"""Get disqualified corporate officers"""
responses.add(
responses.GET,
"https://api.companieshouse.gov.uk/disqualified-officers/corporate/"
+ "1234?access_token=pk.test",
match_querystring=True,
status=200,
body=self.results,
content_type="application/json",
adding_headers={"X-Ratelimit-Remain": "10", "X-Ratelimit-Reset": "{}".format(self.current_timestamp)},
)
res = self.s.disqualified("1234", natural=False)
assert res.status_code == 200
@responses.activate
def test_getting_document(self):
"""Test for the document requesting method"""
responses.add(
responses.GET,
"https://document-api.companieshouse.gov.uk/document/"
+ "1234/content?access_token=pk.test",
match_querystring=True,
status=200,
body=self.results,
content_type="application/json",
adding_headers={"X-Ratelimit-Remain": "10", "X-Ratelimit-Reset": "{}".format(self.current_timestamp)},
)
res = chwrapper.Search(access_token="pk.test").document("1234")
assert res.status_code == 200
class TestSignificantControl:
"""Test the significant control endpoints"""
current_timestamp = int(datetime.timestamp(datetime.now(timezone.utc)))
s = chwrapper.Search(access_token="pk.test")
with open("tests/results.json") as results:
results = results.read()
items = [
"items",
"items_per_page",
"kind",
"page_number",
"start_index",
"total_results",
]
@responses.activate
def test_list_persons_significant_control(self):
"""Test the list of persons of significant control for a company"""
responses.add(
responses.GET,
(
"https://api.companieshouse.gov.uk/company/12345/"
+ "persons-with-significant-control?access_token=pk.test"
),
match_querystring=True,
status=200,
body=self.results,
content_type="application/json",
adding_headers={"X-Ratelimit-Remain": "10", "X-Ratelimit-Reset": "{}".format(self.current_timestamp)},
)
res = self.s.persons_significant_control("12345")
assert res.status_code == 200
assert sorted(res.json().keys()) == self.items
@responses.activate
def test_list_persons_significant_control_no_company_number(self):
"""Tests that correct exception raised when no company number used"""
with pytest.raises(TypeError):
_ = self.s.persons_significant_control() # pylint: disable=E1120
@responses.activate
def test_persons_significant_control_statements_true(self):
"""Test list of persons with significant control statements for a company"""
responses.add(
responses.GET,
(
"https://api.companieshouse.gov.uk/company/12345/"
+ "persons-with-significant-control-statements?access_token=pk.test"
),
match_querystring=True,
status=200,
body=self.results,
content_type="application/json",
adding_headers={"X-Ratelimit-Remain": "10", "X-Ratelimit-Reset": "{}".format(self.current_timestamp)},
)
res = self.s.persons_significant_control("12345", statements=True)
assert res.status_code == 200
assert sorted(res.json().keys()) == self.items
@responses.activate
def test_persons_significant_control_statements(self):
"""Test list of persons with significant control statements for a
company when set statements set to False"""
responses.add(
responses.GET,
(
"https://api.companieshouse.gov.uk/company/12345/"
+ "persons-with-significant-control?access_token=pk.test"
),
match_querystring=True,
status=200,
body=self.results,
content_type="application/json",
adding_headers={"X-Ratelimit-Remain": "10", "X-Ratelimit-Reset": "{}".format(self.current_timestamp)},
)
res = self.s.persons_significant_control("12345", statements=False)
assert res.status_code == 200
assert sorted(res.json().keys()) == self.items
assert res.url == (
"https://api.companieshouse.gov.uk/company/12345/"
+ "persons-with-significant-control?"
+ "access_token=pk.test"
)
@responses.activate
def test_person_significant_control(self):
"""Test single person of significant control for a company"""
responses.add(
responses.GET,
(
"https://api.companieshouse.gov.uk/company/12345/"
+ "persons-with-significant-control/individual/12345?access_token=pk.test"
),
match_querystring=True,
status=200,
body=self.results,
content_type="application/json",
adding_headers={"X-Ratelimit-Remain": "10", "X-Ratelimit-Reset": "{}".format(self.current_timestamp)},
)
res = self.s.significant_control("12345", "12345")
assert res.status_code == 200
assert sorted(res.json().keys()) == self.items
@responses.activate
def test_person_significant_control_no_company_number(self):
"""Tests that correct exception raised when no company number used"""
with pytest.raises(TypeError):
_ = self.s.significant_control() # pylint: disable=E1120
@responses.activate
def test_person_significant_control_wrong_entity_string(self):
"""Tests that correct exception raised when wrong entity string used"""
with pytest.raises(Exception):
_ = self.s.significant_control("12345", "12345", entity_type="hello")
@responses.activate
def test_legal_persons_significant_control(self):
"""Test legal person of significant control for a company endpoint"""
responses.add(
responses.GET,
(
"https://api.companieshouse.gov.uk/company/12345/"
+ "persons-with-significant-control/legal-person/12345"
+ "?access_token=pk.test"
),
match_querystring=True,
status=200,
body=self.results,
content_type="application/json",
adding_headers={"X-Ratelimit-Remain": "10", "X-Ratelimit-Reset": "{}".format(self.current_timestamp)},
)
res = self.s.significant_control("12345", "12345", "legal")
assert res.status_code == 200
assert sorted(res.json().keys()) == self.items
@responses.activate
def test_secure_persons_significant_control(self):
"""Test single secure person of significant control for a company"""
responses.add(
responses.GET,
(
"https://api.companieshouse.gov.uk/company/12345/"
+ "persons-with-significant-control/super-secure/12345?"
+ "access_token=pk.test"
),
match_querystring=True,
status=200,
body=self.results,
content_type="application/json",
adding_headers={"X-Ratelimit-Remain": "10", "X-Ratelimit-Reset": "{}".format(self.current_timestamp)},
)
res = self.s.significant_control("12345", "12345", "secure")
assert res.status_code == 200
assert sorted(res.json().keys()) == self.items
@responses.activate
def test_corporates_significant_control(self):
"""Test single corporate entity with significant control for a company"""
responses.add(
responses.GET,
(
"https://api.companieshouse.gov.uk/company/12345/"
+ "persons-with-significant-control/corporate-entity/12345?"
+ "access_token=pk.test"
),
match_querystring=True,
status=200,
body=self.results,
content_type="application/json",
adding_headers={"X-Ratelimit-Remain": "10", "X-Ratelimit-Reset": "{}".format(self.current_timestamp)},
)
res = self.s.significant_control("12345", "12345", "corporate")
assert res.status_code == 200
assert sorted(res.json().keys()) == self.items
|
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
""" IMPORTS """
from typing import Dict, Callable, Optional, Any
from collections import OrderedDict
import traceback
import requests
from sixgill.sixgill_request_classes.sixgill_auth_request import SixgillAuthRequest
from sixgill.sixgill_feed_client import SixgillFeedClient
from sixgill.sixgill_constants import FeedStream
from sixgill.sixgill_utils import is_indicator
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
""" CONSTANTS """
INTEGRATION_NAME = "Sixgil_DVE_Feed"
CHANNEL_CODE = "7698e8287dfde53dcd13082be750a85a"
MAX_INDICATORS = 1000
DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
SUSPICIOUS_FEED_IDS = ["darkfeed_003"]
DEMISTO_DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
VERIFY = not demisto.params().get("insecure", True)
SESSION = requests.Session()
DESCRIPTION_FIELD_ORDER = OrderedDict(
[
("Description", "eventdescription"),
("Created", "creationdate"),
("Modified", "modified"),
("External id", "externalid"),
("Sixgill DVE score - current", "sixgilldvescorecurrent"),
("Sixgill DVE score - highest ever date", "sixgilldvescorehighesteverdate"),
("Sixgill DVE score - highest ever", "sixgilldvescorehighestever"),
("Sixgill - Previously exploited probability", "sixgillpreviouslyexploitedprobability"),
("Event Name", "eventname"),
("Event Type", "eventtype"),
("Event Action", "eventaction"),
("Previous level", "previouslevel"),
("Event Description", "eventdescription"),
("Event Datetime", "eventdatetime"),
("CVSS 3.1 score", "cvss31score"),
("CVSS 3.1 severity", "cvss31severity"),
("NVD Link", "nvdlink"),
("NVD - last modified date", "nvdlastmodifieddate"),
("NVD - publication date", "nvdpublicationdate"),
("CVSS 2.0 score", "cvss20score"),
("CVSS 2.0 severity", "cvss20severity"),
("NVD Vector - V2.0", "nvdvectorv20"),
("NVD Vector - V3.1", "nvdvectorv31"),
]
)
""" HELPER FUNCTIONS """
def module_command_test(*args):
"""
Performs basic Auth request
"""
response = SESSION.send(
request=SixgillAuthRequest(
demisto.params()["client_id"], demisto.params()["client_secret"], CHANNEL_CODE
).prepare(),
verify=VERIFY,
)
if not response.ok:
raise DemistoException("Auth request failed - please verify client_id, and client_secret.")
return "ok", None, "ok"
def get_description(fileds_obj):
description_string = ""
for name, sixgill_name in DESCRIPTION_FIELD_ORDER.items():
description_string += f"{name}: {fileds_obj.get(sixgill_name)}\n"
fileds_obj["description"] = description_string
return fileds_obj
def create_fields(stix_obj, event_obj, nvd_obj, score_obj, ext_id):
fields = {}
try:
fields = {
"description": "",
"creationdate": stix_obj.get("created", ""),
"modified": stix_obj.get("modified", ""),
"externalid": ext_id,
"sixgilldvescorecurrent": score_obj.get("current", ""),
"sixgilldvescorehighesteverdate": score_obj.get("highest", {}).get("date", ""),
"sixgilldvescorehighestever": score_obj.get("highest", {}).get("value", ""),
"sixgillpreviouslyexploitedprobability": score_obj.get("previouslyExploited", ""),
"eventname": event_obj.get("name", ""),
"eventtype": event_obj.get("type", ""),
"eventaction": event_obj.get("action", ""),
"previouslevel": event_obj.get("prev_level", ""),
"eventdescription": event_obj.get("description", ""),
"eventdatetime": event_obj.get("event_datetime", ""),
"cvss31score": nvd_obj.get("base_score_v3", ""),
"cvss31severity": nvd_obj.get("base_severity_v3", ""),
"nvdlink": nvd_obj.get("link", ""),
"nvdlastmodifieddate": nvd_obj.get("modified", ""),
"nvdpublicationdate": nvd_obj.get("published", ""),
"cvss20score": nvd_obj.get("score_2_0", ""),
"cvss20severity": nvd_obj.get("severity_2_0", ""),
"nvdvectorv20": nvd_obj.get("vector_v2", ""),
"nvdvectorv31": nvd_obj.get("vector_v3", ""),
}
except Exception as err:
err_msg = f'Error in {INTEGRATION_NAME} Integration [{err}]\nTrace:\n{traceback.format_exc()}'
raise DemistoException(err_msg)
return fields
def stix_to_indicator(stix_obj, tags: list = [], tlp_color: Optional[str] = None):
indicator: Dict[str, Any] = {}
try:
ext_obj = stix_obj.get("external_references", [])
ext_id = ""
if ext_obj and ext_obj[0]:
ext_id = ext_obj[0].get("external_id")
event_obj = stix_obj.get("x_sixgill_info", {}).get("event", {})
nvd_obj = stix_obj.get("x_sixgill_info", {}).get("nvd", {})
score_obj = stix_obj.get("x_sixgill_info", {}).get("score", {})
fields = create_fields(stix_obj, event_obj, nvd_obj, score_obj, ext_id)
fields = get_description(fields)
indicator["value"] = ext_id
indicator["type"] = "CVE"
indicator["rawJSON"] = {"value": ext_id, "type": "CVE"}
indicator["rawJSON"].update(stix_obj)
indicator["score"] = "3"
indicator["fields"] = fields
if tlp_color:
indicator["fields"]["trafficlightprotocol"] = str(tlp_color)
if tags:
indicator["fields"]["tags"] = ",".join(list(set(tags)))
except Exception as err:
err_msg = f'Error in {INTEGRATION_NAME} Integration [{err}]\nTrace:\n{traceback.format_exc()}'
raise DemistoException(err_msg)
return indicator
def fetch_indicators_command(
client, limit: int = 0, get_indicators_mode: bool = False, tags: list = [], tlp_color: Optional[str] = None
):
indicators_list = []
try:
records = client.get_bundle()
records = records.get("objects", [])
for rec in records:
if is_indicator(rec):
# if not rec.get("type", "") == "marking-definition":
ind = stix_to_indicator(rec, tags, tlp_color)
indicators_list.append(ind)
if get_indicators_mode and len(indicators_list) == limit:
break
if not get_indicators_mode:
client.commit_indicators()
except Exception as err:
err_msg = f'Error in {INTEGRATION_NAME} Integration [{err}]\nTrace:\n{traceback.format_exc()}'
raise DemistoException(err_msg)
return indicators_list
def get_indicators_command(client, args):
limit = int(args.get("limit"))
final_indicators = fetch_indicators_command(client, limit, True)
human_readable = tableToMarkdown("Indicators from Sixgill DVE Feed:", final_indicators)
return human_readable, {}, final_indicators
def get_limit(str_limit, default_limit):
try:
return int(str_limit)
except Exception:
return default_limit
def main():
max_indicators = get_limit(demisto.params().get("maxIndicators", MAX_INDICATORS), MAX_INDICATORS)
SESSION.proxies = handle_proxy()
client = SixgillFeedClient(
demisto.params()["client_id"],
demisto.params()["client_secret"],
CHANNEL_CODE,
FeedStream.DVEFEED,
bulk_size=max_indicators,
session=SESSION,
verify=VERIFY
)
command = demisto.command()
demisto.info(f"Command being called is {command}")
tags = argToList(demisto.params().get("feedTags", []))
tlp_color = demisto.params().get("tlp_color")
commands: Dict[str, Callable] = {"test-module": module_command_test, "cybersixgill-get-indicators": get_indicators_command}
try:
if demisto.command() == "fetch-indicators":
indicators = fetch_indicators_command(client, tags=tags, tlp_color=tlp_color)
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
else:
readable_output, outputs, raw_response = commands[command](client, demisto.args())
return_outputs(readable_output, outputs, raw_response)
except Exception as err:
demisto.error(traceback.format_exc())
return_error(f"Error failed to execute {demisto.command()}, error: [{err}]")
if __name__ == "__builtin__" or __name__ == "builtins":
main()
|
|
#!/usr/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
'''
Ansible module for zabbix actions
'''
#
# Zabbix action ansible module
#
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is in place because each module looks similar to each other.
# These need duplicate code as their behavior is very similar
# but different for each zabbix class.
# pylint: disable=duplicate-code
# pylint: disable=import-error
from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection, ZabbixAPIError
CUSTOM_SCRIPT_ACTION = '0'
IPMI_ACTION = '1'
SSH_ACTION = '2'
TELNET_ACTION = '3'
GLOBAL_SCRIPT_ACTION = '4'
EXECUTE_ON_ZABBIX_AGENT = '0'
EXECUTE_ON_ZABBIX_SERVER = '1'
OPERATION_REMOTE_COMMAND = '1'
def exists(content, key='result'):
''' Check if key exists in content or the size of content[key] > 0
'''
if not content.has_key(key):
return False
if not content[key]:
return False
return True
def conditions_equal(zab_conditions, user_conditions):
'''Compare two lists of conditions'''
c_type = 'conditiontype'
_op = 'operator'
val = 'value'
if len(user_conditions) != len(zab_conditions):
return False
for zab_cond, user_cond in zip(zab_conditions, user_conditions):
if zab_cond[c_type] != str(user_cond[c_type]) or zab_cond[_op] != str(user_cond[_op]) or \
zab_cond[val] != str(user_cond[val]):
return False
return True
def filter_differences(zabbix_filters, user_filters):
'''Determine the differences from user and zabbix for operations'''
rval = {}
for key, val in user_filters.items():
if key == 'conditions':
if not conditions_equal(zabbix_filters[key], val):
rval[key] = val
elif zabbix_filters[key] != str(val):
rval[key] = val
return rval
def opconditions_diff(zab_val, user_val):
''' Report whether there are differences between opconditions on
zabbix and opconditions supplied by user '''
if len(zab_val) != len(user_val):
return True
for z_cond, u_cond in zip(zab_val, user_val):
if not all([str(u_cond[op_key]) == z_cond[op_key] for op_key in \
['conditiontype', 'operator', 'value']]):
return True
return False
def opmessage_diff(zab_val, user_val):
''' Report whether there are differences between opmessage on
zabbix and opmessage supplied by user '''
for op_msg_key, op_msg_val in user_val.items():
if zab_val[op_msg_key] != str(op_msg_val):
return True
return False
def opmessage_grp_diff(zab_val, user_val):
''' Report whether there are differences between opmessage_grp
on zabbix and opmessage_grp supplied by user '''
zab_grp_ids = set([ugrp['usrgrpid'] for ugrp in zab_val])
usr_grp_ids = set([ugrp['usrgrpid'] for ugrp in user_val])
if usr_grp_ids != zab_grp_ids:
return True
return False
def opmessage_usr_diff(zab_val, user_val):
''' Report whether there are differences between opmessage_usr
on zabbix and opmessage_usr supplied by user '''
zab_usr_ids = set([usr['usrid'] for usr in zab_val])
usr_ids = set([usr['usrid'] for usr in user_val])
if usr_ids != zab_usr_ids:
return True
return False
def opcommand_diff(zab_op_cmd, usr_op_cmd):
''' Check whether user-provided opcommand matches what's already
stored in Zabbix '''
for usr_op_cmd_key, usr_op_cmd_val in usr_op_cmd.items():
if zab_op_cmd[usr_op_cmd_key] != str(usr_op_cmd_val):
return True
return False
def host_in_zabbix(zab_hosts, usr_host):
''' Check whether a particular user host is already in the
Zabbix list of hosts '''
for usr_hst_key, usr_hst_val in usr_host.items():
for zab_host in zab_hosts:
if usr_hst_key in zab_host and \
zab_host[usr_hst_key] == str(usr_hst_val):
return True
return False
def hostlist_in_zabbix(zab_hosts, usr_hosts):
''' Check whether user-provided list of hosts are already in
the Zabbix action '''
if len(zab_hosts) != len(usr_hosts):
return False
for usr_host in usr_hosts:
if not host_in_zabbix(zab_hosts, usr_host):
return False
return True
# We are comparing two lists of dictionaries (the one stored on zabbix and the
# one the user is providing). For each type of operation, determine whether there
# is a difference between what is stored on zabbix and what the user is providing.
# If there is a difference, we take the user-provided data for what needs to
# be stored/updated into zabbix.
def operation_differences(zabbix_ops, user_ops):
'''Determine the differences from user and zabbix for operations'''
# if they don't match, take the user options
if len(zabbix_ops) != len(user_ops):
return user_ops
rval = {}
for zab, user in zip(zabbix_ops, user_ops):
for oper in user.keys():
if oper == 'opconditions' and opconditions_diff(zab[oper], \
user[oper]):
rval[oper] = user[oper]
elif oper == 'opmessage' and opmessage_diff(zab[oper], \
user[oper]):
rval[oper] = user[oper]
elif oper == 'opmessage_grp' and opmessage_grp_diff(zab[oper], \
user[oper]):
rval[oper] = user[oper]
elif oper == 'opmessage_usr' and opmessage_usr_diff(zab[oper], \
user[oper]):
rval[oper] = user[oper]
elif oper == 'opcommand' and opcommand_diff(zab[oper], \
user[oper]):
rval[oper] = user[oper]
# opcommand_grp can be treated just like opcommand_hst
# as opcommand_grp[] is just a list of groups
elif oper == 'opcommand_hst' or oper == 'opcommand_grp':
if not hostlist_in_zabbix(zab[oper], user[oper]):
rval[oper] = user[oper]
# if it's any other type of operation than the ones tested above
# just do a direct compare
elif oper not in ['opconditions', 'opmessage', 'opmessage_grp',
'opmessage_usr', 'opcommand', 'opcommand_hst',
'opcommand_grp'] \
and str(zab[oper]) != str(user[oper]):
rval[oper] = user[oper]
return rval
def get_users(zapi, users):
'''get the mediatype id from the mediatype name'''
rval_users = []
for user in users:
content = zapi.get_content('user',
'get',
{'filter': {'alias': user}})
rval_users.append({'userid': content['result'][0]['userid']})
return rval_users
def get_user_groups(zapi, groups):
'''get the mediatype id from the mediatype name'''
user_groups = []
content = zapi.get_content('usergroup',
'get',
{'search': {'name': groups}})
for usr_grp in content['result']:
user_groups.append({'usrgrpid': usr_grp['usrgrpid']})
return user_groups
def get_mediatype_id_by_name(zapi, m_name):
'''get the mediatype id from the mediatype name'''
content = zapi.get_content('mediatype',
'get',
{'filter': {'description': m_name}})
return content['result'][0]['mediatypeid']
def get_priority(priority):
''' determine priority
'''
prior = 0
if 'info' in priority:
prior = 1
elif 'warn' in priority:
prior = 2
elif 'avg' == priority or 'ave' in priority:
prior = 3
elif 'high' in priority:
prior = 4
elif 'dis' in priority:
prior = 5
return prior
def get_event_source(from_src):
'''Translate even str into value'''
choices = ['trigger', 'discovery', 'auto', 'internal']
rval = 0
try:
rval = choices.index(from_src)
except ValueError as _:
ZabbixAPIError('Value not found for event source [%s]' % from_src)
return rval
def get_status(inc_status):
'''determine status for action'''
rval = 1
if inc_status == 'enabled':
rval = 0
return rval
def get_condition_operator(inc_operator):
''' determine the condition operator'''
vals = {'=': 0,
'<>': 1,
'like': 2,
'not like': 3,
'in': 4,
'>=': 5,
'<=': 6,
'not in': 7,
}
return vals[inc_operator]
def get_host_id_by_name(zapi, host_name):
'''Get host id by name'''
content = zapi.get_content('host',
'get',
{'filter': {'name': host_name}})
return content['result'][0]['hostid']
def get_trigger_value(inc_trigger):
'''determine the proper trigger value'''
rval = 1
if inc_trigger == 'PROBLEM':
rval = 1
else:
rval = 0
return rval
def get_template_id_by_name(zapi, t_name):
'''get the template id by name'''
content = zapi.get_content('template',
'get',
{'filter': {'host': t_name}})
return content['result'][0]['templateid']
def get_host_group_id_by_name(zapi, hg_name):
'''Get hostgroup id by name'''
content = zapi.get_content('hostgroup',
'get',
{'filter': {'name': hg_name}})
return content['result'][0]['groupid']
def get_condition_type(event_source, inc_condition):
'''determine the condition type'''
c_types = {}
if event_source == 'trigger':
c_types = {'host group': 0,
'host': 1,
'trigger': 2,
'trigger name': 3,
'trigger severity': 4,
'trigger value': 5,
'time period': 6,
'host template': 13,
'application': 15,
'maintenance status': 16,
}
elif event_source == 'discovery':
c_types = {'host IP': 7,
'discovered service type': 8,
'discovered service port': 9,
'discovery status': 10,
'uptime or downtime duration': 11,
'received value': 12,
'discovery rule': 18,
'discovery check': 19,
'proxy': 20,
'discovery object': 21,
}
elif event_source == 'auto':
c_types = {'proxy': 20,
'host name': 22,
'host metadata': 24,
}
elif event_source == 'internal':
c_types = {'host group': 0,
'host': 1,
'host template': 13,
'application': 15,
'event type': 23,
}
else:
raise ZabbixAPIError('Unkown event source %s' % event_source)
return c_types[inc_condition]
def get_operation_type(inc_operation):
''' determine the correct operation type'''
o_types = {'send message': 0,
'remote command': OPERATION_REMOTE_COMMAND,
'add host': 2,
'remove host': 3,
'add to host group': 4,
'remove from host group': 5,
'link to template': 6,
'unlink from template': 7,
'enable host': 8,
'disable host': 9,
}
return o_types[inc_operation]
def get_opcommand_type(opcommand_type):
''' determine the opcommand type '''
oc_types = {'custom script': CUSTOM_SCRIPT_ACTION,
'IPMI': IPMI_ACTION,
'SSH': SSH_ACTION,
'Telnet': TELNET_ACTION,
'global script': GLOBAL_SCRIPT_ACTION,
}
return oc_types[opcommand_type]
def get_execute_on(execute_on):
''' determine the execution target '''
e_types = {'zabbix agent': EXECUTE_ON_ZABBIX_AGENT,
'zabbix server': EXECUTE_ON_ZABBIX_SERVER,
}
return e_types[execute_on]
def action_remote_command(ansible_module, zapi, operation):
''' Process remote command type of actions '''
if 'type' not in operation['opcommand']:
ansible_module.exit_json(failed=True, changed=False, state='unknown',
results="No Operation Type provided")
operation['opcommand']['type'] = get_opcommand_type(operation['opcommand']['type'])
if operation['opcommand']['type'] == CUSTOM_SCRIPT_ACTION:
if 'execute_on' in operation['opcommand']:
operation['opcommand']['execute_on'] = get_execute_on(operation['opcommand']['execute_on'])
# custom script still requires the target hosts/groups to be set
operation['opcommand_hst'] = []
operation['opcommand_grp'] = []
for usr_host in operation['target_hosts']:
if usr_host['target_type'] == 'zabbix server':
# 0 = target host local/current host
operation['opcommand_hst'].append({'hostid': 0})
elif usr_host['target_type'] == 'group':
group_name = usr_host['target']
gid = get_host_group_id_by_name(zapi, group_name)
operation['opcommand_grp'].append({'groupid': gid})
elif usr_host['target_type'] == 'host':
host_name = usr_host['target']
hid = get_host_id_by_name(zapi, host_name)
operation['opcommand_hst'].append({'hostid': hid})
# 'target_hosts' is just to make it easier to build zbx_actions
# not part of ZabbixAPI
del operation['target_hosts']
else:
ansible_module.exit_json(failed=True, changed=False, state='unknown',
results="Unsupported remote command type")
def get_action_operations(ansible_module, zapi, inc_operations):
'''Convert the operations into syntax for api'''
for operation in inc_operations:
operation['operationtype'] = get_operation_type(operation['operationtype'])
if operation['operationtype'] == 0: # send message. Need to fix the
operation['opmessage']['mediatypeid'] = \
get_mediatype_id_by_name(zapi, operation['opmessage']['mediatypeid'])
operation['opmessage_grp'] = get_user_groups(zapi, operation.get('opmessage_grp', []))
operation['opmessage_usr'] = get_users(zapi, operation.get('opmessage_usr', []))
if operation['opmessage']['default_msg']:
operation['opmessage']['default_msg'] = 1
else:
operation['opmessage']['default_msg'] = 0
elif operation['operationtype'] == OPERATION_REMOTE_COMMAND:
action_remote_command(ansible_module, zapi, operation)
# Handle Operation conditions:
# Currently there is only 1 available which
# is 'event acknowledged'. In the future
# if there are any added we will need to pass this
# option to a function and return the correct conditiontype
if operation.has_key('opconditions'):
for condition in operation['opconditions']:
if condition['conditiontype'] == 'event acknowledged':
condition['conditiontype'] = 14
if condition['operator'] == '=':
condition['operator'] = 0
if condition['value'] == 'acknowledged':
condition['value'] = 1
else:
condition['value'] = 0
return inc_operations
def get_operation_evaltype(inc_type):
'''get the operation evaltype'''
rval = 0
if inc_type == 'and/or':
rval = 0
elif inc_type == 'and':
rval = 1
elif inc_type == 'or':
rval = 2
elif inc_type == 'custom':
rval = 3
return rval
def get_action_conditions(zapi, event_source, inc_conditions):
'''Convert the conditions into syntax for api'''
calc_type = inc_conditions.pop('calculation_type')
inc_conditions['evaltype'] = get_operation_evaltype(calc_type)
for cond in inc_conditions['conditions']:
cond['operator'] = get_condition_operator(cond['operator'])
# Based on conditiontype we need to set the proper value
# e.g. conditiontype = hostgroup then the value needs to be a hostgroup id
# e.g. conditiontype = host the value needs to be a host id
cond['conditiontype'] = get_condition_type(event_source, cond['conditiontype'])
if cond['conditiontype'] == 0:
cond['value'] = get_host_group_id_by_name(zapi, cond['value'])
elif cond['conditiontype'] == 1:
cond['value'] = get_host_id_by_name(zapi, cond['value'])
elif cond['conditiontype'] == 4:
cond['value'] = get_priority(cond['value'])
elif cond['conditiontype'] == 5:
cond['value'] = get_trigger_value(cond['value'])
elif cond['conditiontype'] == 13:
cond['value'] = get_template_id_by_name(zapi, cond['value'])
elif cond['conditiontype'] == 16:
cond['value'] = ''
return inc_conditions
def get_send_recovery(send_recovery):
'''Get the integer value'''
rval = 0
if send_recovery:
rval = 1
return rval
# The branches are needed for CRUD and error handling
# pylint: disable=too-many-branches
def main():
'''
ansible zabbix module for zbx_item
'''
module = AnsibleModule(
argument_spec=dict(
zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
zbx_debug=dict(default=False, type='bool'),
name=dict(default=None, type='str'),
event_source=dict(default='trigger', choices=['trigger', 'discovery', 'auto', 'internal'], type='str'),
action_subject=dict(default="{TRIGGER.NAME}: {TRIGGER.STATUS}", type='str'),
action_message=dict(default="{TRIGGER.NAME}: {TRIGGER.STATUS}\r\n" +
"Last value: {ITEM.LASTVALUE}\r\n\r\n{TRIGGER.URL}", type='str'),
reply_subject=dict(default="{TRIGGER.NAME}: {TRIGGER.STATUS}", type='str'),
reply_message=dict(default="Trigger: {TRIGGER.NAME}\r\nTrigger status: {TRIGGER.STATUS}\r\n" +
"Trigger severity: {TRIGGER.SEVERITY}\r\nTrigger URL: {TRIGGER.URL}\r\n\r\n" +
"Item values:\r\n\r\n1. {ITEM.NAME1} ({HOST.NAME1}:{ITEM.KEY1}): " +
"{ITEM.VALUE1}\r\n2. {ITEM.NAME2} ({HOST.NAME2}:{ITEM.KEY2}): " +
"{ITEM.VALUE2}\r\n3. {ITEM.NAME3} ({HOST.NAME3}:{ITEM.KEY3}): " +
"{ITEM.VALUE3}", type='str'),
send_recovery=dict(default=False, type='bool'),
status=dict(default=None, type='str'),
escalation_time=dict(default=60, type='int'),
conditions_filter=dict(default=None, type='dict'),
operations=dict(default=None, type='list'),
state=dict(default='present', type='str'),
),
#supports_check_mode=True
)
zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
module.params['zbx_user'],
module.params['zbx_password'],
module.params['zbx_debug']))
#Set the instance and the template for the rest of the calls
zbx_class_name = 'action'
state = module.params['state']
content = zapi.get_content(zbx_class_name,
'get',
{'search': {'name': module.params['name']},
'selectFilter': 'extend',
'selectOperations': 'extend',
})
#******#
# GET
#******#
if state == 'list':
module.exit_json(changed=False, results=content['result'], state="list")
#******#
# DELETE
#******#
if state == 'absent':
if not exists(content):
module.exit_json(changed=False, state="absent")
content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['actionid']])
module.exit_json(changed=True, results=content['result'], state="absent")
# Create and Update
if state == 'present':
conditions = get_action_conditions(zapi, module.params['event_source'], module.params['conditions_filter'])
operations = get_action_operations(module, zapi,
module.params['operations'])
params = {'name': module.params['name'],
'esc_period': module.params['escalation_time'],
'eventsource': get_event_source(module.params['event_source']),
'status': get_status(module.params['status']),
'def_shortdata': module.params['action_subject'],
'def_longdata': module.params['action_message'],
'r_shortdata': module.params['reply_subject'],
'r_longdata': module.params['reply_message'],
'recovery_msg': get_send_recovery(module.params['send_recovery']),
'filter': conditions,
'operations': operations,
}
# Remove any None valued params
_ = [params.pop(key, None) for key in params.keys() if params[key] is None]
#******#
# CREATE
#******#
if not exists(content):
content = zapi.get_content(zbx_class_name, 'create', params)
if content.has_key('error'):
module.exit_json(failed=True, changed=True, results=content['error'], state="present")
module.exit_json(changed=True, results=content['result'], state='present')
########
# UPDATE
########
_ = params.pop('hostid', None)
differences = {}
zab_results = content['result'][0]
for key, value in params.items():
if key == 'operations':
ops = operation_differences(zab_results[key], value)
if ops:
differences[key] = ops
elif key == 'filter':
filters = filter_differences(zab_results[key], value)
if filters:
differences[key] = filters
elif zab_results[key] != value and zab_results[key] != str(value):
differences[key] = value
if not differences:
module.exit_json(changed=False, results=zab_results, state="present")
# We have differences and need to update.
# action update requires an id, filters, and operations
differences['actionid'] = zab_results['actionid']
differences['operations'] = params['operations']
differences['filter'] = params['filter']
content = zapi.get_content(zbx_class_name, 'update', differences)
if content.has_key('error'):
module.exit_json(failed=True, changed=False, results=content['error'], state="present")
module.exit_json(changed=True, results=content['result'], state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
|
# Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import lockfile
import logging
import os
import socket
import traceback
from deployd.client.base_client import BaseClient
from deployd.client.restfulclient import RestfulClient
from deployd.common.decorators import retry
from deployd.common.stats import create_stats_timer, create_sc_increment
from deployd.common import utils
from deployd.types.ping_request import PingRequest
from deployd import IS_PINTEREST
log = logging.getLogger(__name__)
class Client(BaseClient):
def __init__(self, config=None, hostname=None, ip=None, hostgroup=None,
host_id=None, use_facter=None, use_host_info=False):
self._hostname = hostname
self._ip = ip
self._hostgroup = hostgroup
self._id = host_id
self._config = config
self._use_facter = use_facter
self._use_host_info = use_host_info
self._agent_version = self._config.get_deploy_agent_version()
self._autoscaling_group = None
self._availability_zone = None
self._stage_type = None
def _read_host_info(self):
if self._use_facter:
log.info("Use facter to get host info")
name_key = self._config.get_facter_name_key()
ip_key = self._config.get_facter_ip_key()
id_key = self._config.get_facter_id_key()
group_key = self._config.get_facter_group_key()
keys_to_fetch = set()
# facter call is expensive so collect all keys to fetch first
if not self._hostname and name_key:
keys_to_fetch.add(name_key)
if not self._ip and ip_key:
keys_to_fetch.add(ip_key)
if not self._id and id_key:
keys_to_fetch.add(id_key)
if not self._hostgroup and group_key:
keys_to_fetch.add(group_key)
facter_data = utils.get_info_from_facter(keys_to_fetch)
if not self._hostname:
self._hostname = facter_data.get(name_key, None)
if not self._ip:
self._ip = facter_data.get(ip_key, None)
if not self._id:
self._id = facter_data.get(id_key, None)
if not self._hostgroup and group_key in facter_data:
hostgroup = facter_data[group_key]
if hostgroup is not None:
self._hostgroup = hostgroup.split(",")
else:
# read host_info file
host_info_fn = self._config.get_host_info_fn()
lock_fn = '{}.lock'.format(host_info_fn)
lock = lockfile.FileLock(lock_fn)
if os.path.exists(host_info_fn):
with lock, open(host_info_fn, "r+") as f:
host_info = dict((n.strip('\"\n\' ') for n in line.split("=", 1)) for line in f)
if not self._hostname and "hostname" in host_info:
self._hostname = host_info.get("hostname")
if not self._ip and "ip" in host_info:
self._ip = host_info.get("ip")
if not self._id and "id" in host_info:
self._id = host_info.get("id")
if not self._hostgroup:
host_group = host_info.get("groups", None)
if host_group:
self._hostgroup = host_group.split(",")
# Hosts brought up outside of ASG or Teletraan might not have ASG
if not self._autoscaling_group:
self._autoscaling_group = host_info.get("autoscaling-group", None)
if not self._availability_zone:
self._availability_zone = host_info.get("availability-zone", None)
if not self._stage_type:
self._stage_type = host_info.get("stage-type", None)
else:
log.warn("Cannot find host information file {}. See doc for more details".format(host_info_fn))
# patch missing part
if not self._hostname:
self._hostname = socket.gethostname()
if not self._id:
if self._use_facter:
#Must fail here as it cannot identify the host if id is missing
return False
else:
self._id = self._hostname
if not self._ip:
try:
self._ip = socket.gethostbyname(self._hostname)
except Exception:
log.warn('Host ip information does not exist.')
pass
if IS_PINTEREST and self._use_host_info is False:
# Read new keys from facter always
az_key = self._config.get_facter_az_key()
asg_tag_key = self._config.get_facter_asg_tag_key()
ec2_tags_key = self._config.get_facter_ec2_tags_key()
stage_type_key = self._config.get_stage_type_key()
keys_to_fetch = set()
if not self._availability_zone and az_key:
keys_to_fetch.add(az_key)
if not self._autoscaling_group:
keys_to_fetch.add(ec2_tags_key)
if not self._stage_type:
keys_to_fetch.add(stage_type_key)
facter_data = utils.get_info_from_facter(keys_to_fetch)
if not self._availability_zone:
self._availability_zone = facter_data.get(az_key, None)
# Hosts brought up outside of ASG or Teletraan might not have ASG
# Note: on U14, facter -p ec2_tags.Autoscaling does not work.
# so need to read ec2_tags from facter and parse Autoscaling tag to cover this case
if not self._autoscaling_group:
self._autoscaling_group = facter_data.get(ec2_tags_key, {}).get(asg_tag_key, None)
if not self._stage_type:
self._stage_type = facter_data.get(stage_type_key, None)
log.info("Host information is loaded. "
"Host name: {}, IP: {}, host id: {}, agent_version={}, autoscaling_group: {}, "
"availability_zone: {}, stage_type: {}, group: {}".format(self._hostname, self._ip, self._id,
self._agent_version, self._autoscaling_group, self._availability_zone, self._stage_type, self._hostgroup))
return True
def send_reports(self, env_reports=None):
try:
if self._read_host_info():
reports = [status.report for status in env_reports.values()]
for report in reports:
if isinstance(report.errorMessage, bytes):
report.errorMessage = report.errorMessage.decode('utf-8')
# We ignore non-ascii charater for now, we should further solve this problem on
# the server side:
# https://app.asana.com/0/11815463290546/40714916594784
if report.errorMessage:
report.errorMessage = report.errorMessage.encode('ascii', 'ignore').decode()
ping_request = PingRequest(hostId=self._id, hostName=self._hostname, hostIp=self._ip,
groups=self._hostgroup, reports=reports,
agentVersion=self._agent_version,
autoscalingGroup=self._autoscaling_group,
availabilityZone=self._availability_zone,
stageType=self._stage_type)
with create_stats_timer('deploy.agent.request.latency',
tags={'host': self._hostname}):
ping_response = self.send_reports_internal(ping_request)
log.debug('%s -> %s' % (ping_request, ping_response))
return ping_response
else:
log.error("Fail to read host info")
create_sc_increment(name='deploy.failed.agent.hostinfocollection',
tags={'host': self._hostname})
except Exception:
log.error(traceback.format_exc())
create_sc_increment(name='deploy.failed.agent.requests',
tags={'host': self._hostname})
return None
@retry(ExceptionToCheck=Exception, delay=1, tries=3)
def send_reports_internal(self, request):
ping_service = RestfulClient(self._config)
response = ping_service.ping(request)
return response
|
|
# Copyright 2017: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import six
from glanceclient import exc as glance_exc
from novaclient import exceptions as nova_exc
from rally.task import types
from rally.common import logging
from rally.common import validation
from rally.common import yamlutils as yaml
from rally import consts
from rally import exceptions
from rally.plugins.common import validators
from rally.plugins.openstack.context.nova import flavors as flavors_ctx
from rally.plugins.openstack import types as openstack_types
LOG = logging.getLogger(__name__)
ValidationResult = validation.ValidationResult
@validation.add("required_platform", platform="openstack", users=True)
@validation.configure(name="image_exists", platform="openstack")
class ImageExistsValidator(validation.Validator):
def __init__(self, param_name, nullable):
"""Validator checks existed image or not
:param param_name: defines which variable should be used
to get image id value.
:param nullable: defines image id param is required
"""
super(ImageExistsValidator, self).__init__()
self.param_name = param_name
self.nullable = nullable
def validate(self, config, credentials, plugin_cls, plugin_cfg):
image_args = config.get("args", {}).get(self.param_name)
if not image_args and self.nullable:
return
image_context = config.get("context", {}).get("images", {})
image_ctx_name = image_context.get("image_name")
if not image_args:
message = ("Parameter %s is not specified.") % self.param_name
return self.fail(message)
if "image_name" in image_context:
# NOTE(rvasilets) check string is "exactly equal to" a regex
# or image name from context equal to image name from args
if "regex" in image_args:
match = re.match(image_args.get("regex"), image_ctx_name)
if image_ctx_name == image_args.get("name") or (
"regex" in image_args and match):
return
try:
for user in credentials["openstack"]["users"]:
clients = user.get("credential", {}).clients()
image_id = openstack_types.GlanceImage.transform(
clients=clients, resource_config=image_args)
clients.glance().images.get(image_id)
except (glance_exc.HTTPNotFound, exceptions.InvalidScenarioArgument):
message = ("Image '%s' not found") % image_args
return self.fail(message)
@validation.add("required_platform", platform="openstack", users=True)
@validation.configure(name="external_network_exists", platform="openstack")
class ExternalNetworkExistsValidator(validation.Validator):
def __init__(self, param_name):
"""Validator checks that external network with given name exists.
:param param_name: name of validated network
"""
super(ExternalNetworkExistsValidator, self).__init__()
self.param_name = param_name
def validate(self, config, credentials, plugin_cls, plugin_cfg):
ext_network = config.get("args", {}).get(self.param_name)
if not ext_network:
return
users = credentials["openstack"]["users"]
result = []
for user in users:
creds = user["credential"]
networks = creds.clients().neutron().list_networks()["networks"]
external_networks = [net["name"] for net in networks if
net.get("router:external", False)]
if ext_network not in external_networks:
message = ("External (floating) network with name {1} "
"not found by user {0}. "
"Available networks: {2}").format(creds.username,
ext_network,
networks)
result.append(message)
if result:
return self.fail(result)
@validation.add("required_platform", platform="openstack", users=True)
@validation.configure(name="required_neutron_extensions", platform="openstack")
class RequiredNeutronExtensionsValidator(validation.Validator):
def __init__(self, extensions, *args):
"""Validator checks if the specified Neutron extension is available
:param extensions: list of Neutron extensions
"""
super(RequiredNeutronExtensionsValidator, self).__init__()
if isinstance(extensions, (list, tuple)):
# services argument is a list, so it is a new way of validators
# usage, args in this case should not be provided
self.req_ext = extensions
if args:
LOG.warning("Positional argument is not what "
"'required_neutron_extensions' decorator expects. "
"Use `extensions` argument instead")
else:
# it is old way validator
self.req_ext = [extensions]
self.req_ext.extend(args)
def validate(self, config, credentials, plugin_cls, plugin_cfg):
clients = credentials["openstack"]["users"][0]["credential"].clients()
extensions = clients.neutron().list_extensions()["extensions"]
aliases = [x["alias"] for x in extensions]
for extension in self.req_ext:
if extension not in aliases:
msg = ("Neutron extension %s "
"is not configured") % extension
return self.fail(msg)
@validation.add("required_platform", platform="openstack", users=True)
@validation.configure(name="flavor_exists", platform="openstack")
class FlavorExistsValidator(validation.Validator):
def __init__(self, param_name):
"""Returns validator for flavor
:param param_name: defines which variable should be used
to get flavor id value.
"""
super(FlavorExistsValidator, self).__init__()
self.param_name = param_name
def _get_flavor_from_context(self, config, flavor_value):
if "flavors" not in config.get("context", {}):
raise exceptions.InvalidScenarioArgument("No flavors context")
flavors = [flavors_ctx.FlavorConfig(**f)
for f in config["context"]["flavors"]]
resource = types.obj_from_name(resource_config=flavor_value,
resources=flavors, typename="flavor")
flavor = flavors_ctx.FlavorConfig(**resource)
flavor.id = "<context flavor: %s>" % flavor.name
return flavor
def _get_validated_flavor(self, config, clients, param_name):
flavor_value = config.get("args", {}).get(param_name)
if not flavor_value:
msg = "Parameter %s is not specified." % param_name
return ValidationResult(False, msg), None
try:
flavor_id = openstack_types.Flavor.transform(
clients=clients, resource_config=flavor_value)
flavor = clients.nova().flavors.get(flavor=flavor_id)
return ValidationResult(True), flavor
except (nova_exc.NotFound, exceptions.InvalidScenarioArgument):
try:
return ValidationResult(True), self._get_flavor_from_context(
config, flavor_value)
except exceptions.InvalidScenarioArgument:
pass
message = "Flavor '%s' not found" % flavor_value
return ValidationResult(False, message), None
def validate(self, config, credentials, plugin_cls, plugin_cfg):
# flavors do not depend on user or tenant, so checking for one user
# should be enough
user = credentials["openstack"]["users"][0]
clients = user["credential"].clients()
return self._get_validated_flavor(config, clients, self.param_name)[0]
@validation.add("required_platform", platform="openstack", users=True)
@validation.configure(name="image_valid_on_flavor", platform="openstack")
class ImageValidOnFlavorValidator(FlavorExistsValidator):
def __init__(self, flavor_param, image_param,
fail_on_404_image=True, validate_disk=True):
"""Returns validator for image could be used for current flavor
:param flavor_param: defines which variable should be used
to get flavor id value.
:param image_param: defines which variable should be used
to get image id value.
:param validate_disk: flag to indicate whether to validate flavor's
disk. Should be True if instance is booted from
image. Should be False if instance is booted
from volume. Default value is True.
:param fail_on_404_image: flag what indicate whether to validate image
or not.
"""
super(ImageValidOnFlavorValidator, self).__init__(flavor_param)
self.image_name = image_param
self.fail_on_404_image = fail_on_404_image
self.validate_disk = validate_disk
def _get_validated_image(self, config, clients, param_name):
image_context = config.get("context", {}).get("images", {})
image_args = config.get("args", {}).get(param_name)
image_ctx_name = image_context.get("image_name")
if not image_args:
msg = ("Parameter %s is not specified.") % param_name
return (ValidationResult(False, msg), None)
if "image_name" in image_context:
# NOTE(rvasilets) check string is "exactly equal to" a regex
# or image name from context equal to image name from args
if "regex" in image_args:
match = re.match(image_args.get("regex"), image_ctx_name)
if image_ctx_name == image_args.get("name") or ("regex"
in image_args
and match):
image = {
"size": image_context.get("min_disk", 0),
"min_ram": image_context.get("min_ram", 0),
"min_disk": image_context.get("min_disk", 0)
}
return (ValidationResult(True), image)
try:
image_id = openstack_types.GlanceImage.transform(
clients=clients, resource_config=image_args)
image = clients.glance().images.get(image_id)
if hasattr(image, "to_dict"):
# NOTE(stpierre): Glance v1 images are objects that can be
# converted to dicts; Glance v2 images are already
# dict-like
image = image.to_dict()
if not image.get("size"):
image["size"] = 0
if not image.get("min_ram"):
image["min_ram"] = 0
if not image.get("min_disk"):
image["min_disk"] = 0
return (ValidationResult(True), image)
except (glance_exc.HTTPNotFound, exceptions.InvalidScenarioArgument):
message = ("Image '%s' not found") % image_args
return (ValidationResult(False, message), None)
def validate(self, config, credentials, plugin_cls, plugin_cfg):
flavor = None
for user in credentials["openstack"]["users"]:
clients = user["credential"].clients()
if not flavor:
valid_result, flavor = self._get_validated_flavor(
config, clients, self.param_name)
if not valid_result.is_valid:
return valid_result
valid_result, image = self._get_validated_image(
config, clients, self.image_name)
if not image and not self.fail_on_404_image:
return
if not valid_result.is_valid:
return valid_result
if flavor.ram < image["min_ram"]:
message = ("The memory size for flavor '%s' is too small "
"for requested image '%s'") % (flavor.id,
image["id"])
return self.fail(message)
if flavor.disk and self.validate_disk:
if image["size"] > flavor.disk * (1024 ** 3):
message = ("The disk size for flavor '%s' is too small "
"for requested image '%s'") % (flavor.id,
image["id"])
return self.fail(message)
if image["min_disk"] > flavor.disk:
message = ("The minimal disk size for flavor '%s' is "
"too small for requested "
"image '%s'") % (flavor.id, image["id"])
return self.fail(message)
@validation.add("required_platform", platform="openstack", users=True)
@validation.configure(name="required_clients", platform="openstack")
class RequiredClientsValidator(validation.Validator):
def __init__(self, components, *args, **kwargs):
"""Validator checks if specified OpenStack clients are available.
:param components: list of client components names
:param **kwargs: optional parameters:
admin - bool, whether to use admin clients
"""
super(RequiredClientsValidator, self).__init__()
if isinstance(components, (list, tuple)):
# services argument is a list, so it is a new way of validators
# usage, args in this case should not be provided
self.components = components
if args:
LOG.warning("Positional argument is not what "
"'required_clients' decorator expects. "
"Use `components` argument instead")
else:
# it is old way validator
self.components = [components]
self.components.extend(args)
self.options = kwargs
def _check_component(self, clients):
for client_component in self.components:
try:
getattr(clients, client_component)()
except ImportError:
msg = ("Client for {0} is not installed. To install it run "
"`pip install python-{0}client`").format(
client_component)
return validation.ValidationResult(False, msg)
def validate(self, config, credentials, plugin_cls, plugin_cfg):
LOG.warning("The validator 'required_clients' is deprecated since "
"Rally 0.10.0. If you are interested in it, please "
"contact Rally team via E-mail, IRC or Gitter (see "
"https://rally.readthedocs.io/en/latest/project_info"
"/index.html#where-can-i-discuss-and-propose-changes for "
"more details).")
if self.options.get("admin", False):
clients = credentials["openstack"]["admin"].clients()
result = self._check_component(clients)
else:
for user in credentials["openstack"]["users"]:
clients = user["credential"].clients()
result = self._check_component(clients)
if result:
return self.fail(result.msg)
@validation.add("required_platform", platform="openstack", users=True)
@validation.configure(name="required_services", platform="openstack")
class RequiredServicesValidator(validation.Validator):
def __init__(self, services, *args):
"""Validator checks if specified OpenStack services are available.
:param services: list with names of required services
"""
super(RequiredServicesValidator, self).__init__()
if isinstance(services, (list, tuple)):
# services argument is a list, so it is a new way of validators
# usage, args in this case should not be provided
self.services = services
if args:
LOG.warning("Positional argument is not what "
"'required_services' decorator expects. "
"Use `services` argument instead")
else:
# it is old way validator
self.services = [services]
self.services.extend(args)
def validate(self, config, credentials, plugin_cls, plugin_cfg):
creds = (credentials["openstack"].get("admin")
or credentials["openstack"]["users"][0]["credential"])
available_services = creds.clients().services().values()
if consts.Service.NOVA_NET in self.services:
LOG.warning("We are sorry, but Nova-network was deprecated for "
"a long time and latest novaclient doesn't support "
"it, so we too.")
for service in self.services:
# NOTE(andreykurilin): validator should ignore services configured
# via context(a proper validation should be in context)
service_config = config.get("context", {}).get(
"api_versions", {}).get(service, {})
if (service not in available_services and
not ("service_type" in service_config or
"service_name" in service_config)):
return self.fail(
("'{0}' service is not available. Hint: If '{0}' "
"service has non-default service_type, try to"
" setup it via 'api_versions'"
" context.").format(service))
@validation.add("required_platform", platform="openstack", users=True)
@validation.configure(name="validate_heat_template", platform="openstack")
class ValidateHeatTemplateValidator(validation.Validator):
def __init__(self, params, *args):
"""Validates heat template.
:param params: list of parameters to be validated.
"""
super(ValidateHeatTemplateValidator, self).__init__()
if isinstance(params, (list, tuple)):
# services argument is a list, so it is a new way of validators
# usage, args in this case should not be provided
self.params = params
if args:
LOG.warning("Positional argument is not what "
"'validate_heat_template' decorator expects. "
"Use `params` argument instead")
else:
# it is old way validator
self.params = [params]
self.params.extend(args)
def validate(self, config, credentials, plugin_cls, plugin_cfg):
for param_name in self.params:
template_path = config.get("args", {}).get(param_name)
if not template_path:
msg = ("Path to heat template is not specified. Its needed "
"for heat template validation. Please check the "
"content of `{}` scenario argument.")
return self.fail(msg.format(param_name))
template_path = os.path.expanduser(template_path)
if not os.path.exists(template_path):
msg = "No file found by the given path {}"
return self.fail(msg.format(template_path))
with open(template_path, "r") as f:
try:
for user in credentials["openstack"]["users"]:
clients = user["credential"].clients()
clients.heat().stacks.validate(template=f.read())
except Exception as e:
dct = {"path": template_path,
"msg": str(e)}
msg = ("Heat template validation failed on %(path)s. "
"Original error message: %(msg)s.") % dct
return self.fail(msg)
@validation.add("required_platform", platform="openstack", admin=True)
@validation.configure(name="required_cinder_services", platform="openstack")
class RequiredCinderServicesValidator(validation.Validator):
def __init__(self, services):
"""Validator checks that specified Cinder service is available.
It uses Cinder client with admin permissions to call
'cinder service-list' call
:param services: Cinder service name
"""
super(RequiredCinderServicesValidator, self).__init__()
self.services = services
def validate(self, config, credentials, plugin_cls, plugin_cfg):
clients = credentials["openstack"]["admin"].clients().cinder()
for service in clients.services.list():
if (service.binary == six.text_type(self.services)
and service.state == six.text_type("up")):
return
msg = ("%s service is not available") % self.services
return self.fail(msg)
@validation.add("required_platform", platform="openstack", users=True)
@validation.configure(name="required_api_versions", platform="openstack")
class RequiredAPIVersionsValidator(validation.Validator):
def __init__(self, component, versions):
"""Validator checks component API versions.
:param component: name of required component
:param versions: version of required component
"""
super(RequiredAPIVersionsValidator, self).__init__()
self.component = component
self.versions = versions
def validate(self, config, credentials, plugin_cls, plugin_cfg):
versions = [str(v) for v in self.versions]
versions_str = ", ".join(versions)
msg = ("Task was designed to be used with %(component)s "
"V%(version)s, but V%(found_version)s is "
"selected.")
for user in credentials["openstack"]["users"]:
clients = user["credential"].clients()
if self.component == "keystone":
if "2.0" not in versions and hasattr(
clients.keystone(), "tenants"):
return self.fail(msg % {"component": self.component,
"version": versions_str,
"found_version": "2.0"})
if "3" not in versions and hasattr(
clients.keystone(), "projects"):
return self.fail(msg % {"component": self.component,
"version": versions_str,
"found_version": "3"})
else:
used_version = config.get(
"context", {}).get(
"api_versions", {}).get(
self.component, {}).get(
"version", getattr(
clients, self.component).choose_version())
if not used_version:
return self.fail("Unable to determine the API version.")
if str(used_version) not in versions:
return self.fail(msg % {"component": self.component,
"version": versions_str,
"found_version": used_version})
@validation.add("required_platform", platform="openstack", users=True)
@validation.configure(name="volume_type_exists", platform="openstack")
class VolumeTypeExistsValidator(validation.Validator):
def __init__(self, param_name, nullable=True):
"""Returns validator for volume types.
:param param_name: defines variable to be used as the flag to
determine if volume types should be checked for
existence.
:param nullable: defines volume_type param is required
"""
super(VolumeTypeExistsValidator, self).__init__()
self.param = param_name
self.nullable = nullable
def validate(self, config, credentials, plugin_cls, plugin_cfg):
volume_type = config.get("args", {}).get(self.param, False)
if not volume_type and self.nullable:
return
if volume_type:
for user in credentials["openstack"]["users"]:
clients = user["credential"].clients()
vt_names = [vt.name for vt in
clients.cinder().volume_types.list()]
volume_types_ctx = config.get(
"context", {}).get("volume_types", [])
if volume_type not in vt_names + volume_types_ctx:
msg = ("Specified volume type {} not found for user {}. "
"List of available types: {}")
return self.fail(msg.format(volume_type, user, vt_names))
else:
msg = ("The parameter '{}' is required and should not be empty.")
return self.fail(msg.format(self.param))
@validation.configure(name="workbook_contains_workflow", platform="openstack")
class WorkbookContainsWorkflowValidator(validation.Validator):
def __init__(self, param_name, workflow_name):
"""Validate that workflow exist in workbook when workflow is passed
:param param_name: parameter containing the workbook definition
:param workflow_name: parameter containing the workflow name
"""
super(WorkbookContainsWorkflowValidator, self).__init__()
self.param_name = param_name
self.workflow_name = workflow_name
def validate(self, config, credentials, plugin_cls, plugin_cfg):
wf_name = config.get("args", {}).get(self.param_name)
if wf_name:
wb_path = config.get("args", {}).get(self.param_name)
wb_path = os.path.expanduser(wb_path)
file_result = validators.ValidatorUtils._file_access_ok(
config.get("args", {}).get(self.param_name),
os.R_OK, self.param_name)
if not file_result.is_valid:
return file_result
with open(wb_path, "r") as wb_def:
wb_def = yaml.safe_load(wb_def)
if wf_name not in wb_def["workflows"]:
self.fail("workflow '{}' not found "
"in the definition '{}'".format(wf_name,
wb_def))
|
|
#!/usr/bin/python
from __future__ import print_function
import argparse
import json
import os
import sys
import shlex
from cli.settings import Settings
from cli.appconfig import AppConfig
from cli.hooks import Hooks
from cli.utils import Utils
from cli.utils import printException, printErrorMsg
from cli.dockerutils import DockerUtils
from cli.docker_build import Docker
from termcolor import colored
from datetime import datetime
import contextlib
import urllib
@contextlib.contextmanager
def chdir(dirname):
'''Withable chdir function that restores directory'''
curdir = os.getcwd()
try:
os.chdir(dirname)
yield
finally:
os.chdir(curdir)
def describe():
return 'runs the docker build and optionally pushes it into the registry.'
class RogerBuild(object):
def __init__(self):
self.utils = Utils()
self.outcome = 1
self.registry = ""
self.tag_name = ""
def parse_args(self):
self.parser = argparse.ArgumentParser(prog='roger build', description=describe())
self.parser.add_argument('app_name', metavar='app_name',
help="application to build. Example: 'agora'.")
self.parser.add_argument('directory', metavar='directory',
help="App Repo will be checked out here, this is the working dir CLI will use."
"A temporary directory is created if no directory specified."
"Example: '/home/vagrant/work_dir'.")
self.parser.add_argument('tag_name', metavar='tag_name',
help="tag for the built image. Example: 'roger-collectd:0.20'.")
self.parser.add_argument('config_file', metavar='config_file',
help="configuration file to use. Example: 'content.json'.")
self.parser.add_argument('-v', '--verbose', help="verbose mode for debugging. Defaults to false.", action="store_true")
self.parser.add_argument('--push', '-p', help="Also push to registry. Defaults to false.", action="store_true")
self.parser.add_argument('--build-arg', action='append',
help='docker build-arg; Use flags multiple times to pass more than one arg')
self.parser.add_argument('-ns', '--disable-swaparoo', help="Disables swaparoo functionality", action="store_true")
return self.parser
def main(self, settingObj, appObj, hooksObj, dockerUtilsObj, dockerObj, args):
print(colored("******Building the Docker image now******", "grey"))
try:
config_dir = settingObj.getConfigDir()
root = settingObj.getCliDir()
config = appObj.getConfig(config_dir, args.config_file)
hooksObj.config_file = args.config_file
roger_env = appObj.getRogerEnv(config_dir)
config_name = ""
if 'name' in config:
config_name = config['name']
common_repo = config.get('repo', '')
if not hasattr(args, "env"):
args.env = "dev"
data = appObj.getAppData(config_dir, args.config_file, args.app_name)
if not data:
raise ValueError("Application with name [{}] or data for it not found at {}/{}.".format(
args.app_name, config_dir, args.config_file))
repo = ''
if common_repo != '':
repo = data.get('repo', common_repo)
else:
repo = data.get('repo', args.app_name)
docker_build_args = {}
if 'build-args' in data:
if 'environment' in data['build-args']:
if args.env in data['build-args']['environment']:
docker_build_args = data['build-args']['environment'][args.env]
# read the build-args from commandline like docker does as well
# build-args defined on command line will override the ones from the config file, for the same keys
# so this update of dictionary has to be done after we have read build arg values from the config file
if args.build_arg:
docker_build_args.update(dict(arg_key_val_str.split('=') for arg_key_val_str in args.build_arg))
projects = data.get('privateProjects', [])
# get/update target source(s)
file_exists = True
file_path = ''
cur_dir = ''
if "PWD" in os.environ:
cur_dir = os.environ.get('PWD')
# This is bad code, assuming current directory and then trying to again guess, this is not rocket science
# it's a fucking file path, as simple as that. https://seomoz.atlassian.net/browse/ROGER-2405
# dockerfile location possibilities
# 1. Path relative to the repo, we know repo path for cli is <checkout_dir>/<repo>
# 2. Absolute path
# This path comes from config file and not passed on commandline so we should not try to prefix current
# working directory if the relative path is passed, don't try to guess too much.
# changelog : relative path from current directory won't work for working_directory or checkout_dir
# changelog : working_directory or checkout_dir should be absolute path, not backward-compatible
checkout_dir = os.path.abspath(args.directory)
repo_name = appObj.getRepoName(repo)
# (vmahedia) todo : this should be called docker_file_dir
dockerfile_rel_repo_path = data.get('path', '')
file_path = os.path.join(checkout_dir, repo_name, dockerfile_rel_repo_path)
if not hasattr(args, "app_name"):
args.app_name = ""
if not hasattr(self, "identifier"):
self.identifier = self.utils.get_identifier(config_name, settingObj.getUser(), args.app_name)
args.app_name = self.utils.extract_app_name(args.app_name)
hookname = "pre_build"
exit_code = hooksObj.run_hook(hookname, data, file_path, args.env, settingObj.getUser())
if exit_code != 0:
raise ValueError("{} hook failed.".format(hookname))
build_filename = 'Dockerfile'
if 'build_filename' in data:
build_filename = ("{0}/{1}".format(file_path, data['build_filename']))
file_exists = os.path.exists(build_filename)
if not file_exists:
raise ValueError("Specified build file: {} does not exist. Exiting build.".format(build_filename))
else:
file_exists = os.path.exists("{0}/Dockerfile".format(file_path))
if file_exists:
# (vmahedia) todo: We know what parameters are required for build command so we should not wait until
# now to bailout. Config parser should have a validator for every command to see if all the Required
# parameters are passed or not. Why do all this up to this point if we know we will fail on this.
# RequiredParameter, below, "registry"
if 'registry' not in roger_env:
raise ValueError("Registry not found in roger-mesos-tools.config file.")
else:
self.registry = roger_env['registry']
self.tag_name = args.tag_name
image = "{0}/{1}".format(roger_env['registry'], args.tag_name)
try:
if checkout_dir == args.directory:
try:
dockerObj.docker_build(
dockerUtilsObj, appObj, args.directory, repo, projects, dockerfile_rel_repo_path, image, docker_build_args, args.verbose, build_filename, args.disable_swaparoo)
except ValueError:
raise ValueError("Docker build failed")
else:
directory = os.path.join(cur_dir, args.directory)
try:
dockerObj.docker_build(
dockerUtilsObj, appObj, directory, repo, projects, dockerfile_rel_repo_path, image, docker_build_args, args.verbose, build_filename, args.disable_swaparoo)
except ValueError:
print('Docker build failed.')
raise
print(colored("******Successfully built Docker image******", "green"))
build_message = "Image [{}]".format(image)
if(args.push):
print(colored("******Pushing Docker image to registry******", "grey"))
exit_code = dockerUtilsObj.docker_push(image, args.verbose)
if exit_code != 0:
raise ValueError(
'Docker push failed.')
build_message += " successfully pushed to registry [{}]*******".format(roger_env[
'registry'])
print(colored(build_message, "green"))
except (IOError) as e:
printException(e)
raise
else:
print(colored("Dockerfile does not exist in dir: {}".format(file_path), "red"))
hookname = "post_build"
exit_code = hooksObj.run_hook(hookname, data, file_path, args.env, settingObj.getUser())
if exit_code != 0:
raise ValueError('{} hook failed.'.format(hookname))
except (Exception) as e:
printException(e)
raise
finally:
# todo: maybe send a datadog event?
pass
if __name__ == "__main__":
settingObj = Settings()
appObj = AppConfig()
hooksObj = Hooks()
dockerUtilsObj = DockerUtils()
dockerObj = Docker()
roger_build = RogerBuild()
roger_build.parser = roger_build.parse_args()
args = roger_build.parser.parse_args()
try:
roger_build.main(settingObj, appObj, hooksObj, dockerUtilsObj, dockerObj, args)
except (Exception) as e:
printException(e)
|
|
#!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Process Android resources to generate R.java, and prepare for packaging.
This will crunch images and generate v14 compatible resources
(see generate_v14_compatible_resources.py).
"""
import codecs
import optparse
import os
import re
import shutil
import sys
import zipfile
import generate_v14_compatible_resources
from util import build_utils
# Import jinja2 from third_party/jinja2
sys.path.insert(1,
os.path.join(os.path.dirname(__file__), '../../../third_party'))
from jinja2 import Template # pylint: disable=F0401
def ParseArgs(args):
"""Parses command line options.
Returns:
An options object as from optparse.OptionsParser.parse_args()
"""
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--android-sdk', help='path to the Android SDK folder')
parser.add_option('--aapt-path',
help='path to the Android aapt tool')
parser.add_option('--non-constant-id', action='store_true')
parser.add_option('--android-manifest', help='AndroidManifest.xml path')
parser.add_option('--custom-package', help='Java package for R.java')
parser.add_option(
'--shared-resources',
action='store_true',
help='Make a resource package that can be loaded by a different'
'application at runtime to access the package\'s resources.')
parser.add_option('--resource-dirs',
help='Directories containing resources of this target.')
parser.add_option('--dependencies-res-zips',
help='Resources from dependents.')
parser.add_option('--resource-zip-out',
help='Path for output zipped resources.')
parser.add_option('--R-dir',
help='directory to hold generated R.java.')
parser.add_option('--srcjar-out',
help='Path to srcjar to contain generated R.java.')
parser.add_option('--r-text-out',
help='Path to store the R.txt file generated by appt.')
parser.add_option('--proguard-file',
help='Path to proguard.txt generated file')
parser.add_option(
'--v14-skip',
action="store_true",
help='Do not generate nor verify v14 resources')
parser.add_option(
'--extra-res-packages',
help='Additional package names to generate R.java files for')
parser.add_option(
'--extra-r-text-files',
help='For each additional package, the R.txt file should contain a '
'list of resources to be included in the R.java file in the format '
'generated by aapt')
parser.add_option(
'--include-all-resources',
action='store_true',
help='Include every resource ID in every generated R.java file '
'(ignoring R.txt).')
parser.add_option(
'--all-resources-zip-out',
help='Path for output of all resources. This includes resources in '
'dependencies.')
parser.add_option('--stamp', help='File to touch on success')
(options, args) = parser.parse_args(args)
if args:
parser.error('No positional arguments should be given.')
# Check that required options have been provided.
required_options = (
'android_sdk',
'aapt_path',
'android_manifest',
'dependencies_res_zips',
'resource_dirs',
'resource_zip_out',
)
build_utils.CheckOptions(options, parser, required=required_options)
if (options.R_dir is None) == (options.srcjar_out is None):
raise Exception('Exactly one of --R-dir or --srcjar-out must be specified.')
return options
def CreateExtraRJavaFiles(
r_dir, extra_packages, extra_r_text_files, shared_resources, include_all):
if include_all:
java_files = build_utils.FindInDirectory(r_dir, "R.java")
if len(java_files) != 1:
return
r_java_file = java_files[0]
r_java_contents = codecs.open(r_java_file, encoding='utf-8').read()
for package in extra_packages:
package_r_java_dir = os.path.join(r_dir, *package.split('.'))
build_utils.MakeDirectory(package_r_java_dir)
package_r_java_path = os.path.join(package_r_java_dir, 'R.java')
new_r_java = re.sub(r'package [.\w]*;', u'package %s;' % package,
r_java_contents)
codecs.open(package_r_java_path, 'w', encoding='utf-8').write(new_r_java)
else:
if len(extra_packages) != len(extra_r_text_files):
raise Exception('Need one R.txt file per extra package')
all_resources = {}
r_txt_file = os.path.join(r_dir, 'R.txt')
if not os.path.exists(r_txt_file):
return
with open(r_txt_file) as f:
for line in f:
m = re.match(r'(int(?:\[\])?) (\w+) (\w+) (.+)$', line)
if not m:
raise Exception('Unexpected line in R.txt: %s' % line)
java_type, resource_type, name, value = m.groups()
all_resources[(resource_type, name)] = (java_type, value)
for package, r_text_file in zip(extra_packages, extra_r_text_files):
if os.path.exists(r_text_file):
package_r_java_dir = os.path.join(r_dir, *package.split('.'))
build_utils.MakeDirectory(package_r_java_dir)
package_r_java_path = os.path.join(package_r_java_dir, 'R.java')
CreateExtraRJavaFile(
package, package_r_java_path, r_text_file, all_resources,
shared_resources)
def CreateExtraRJavaFile(
package, r_java_path, r_text_file, all_resources, shared_resources):
resources = {}
with open(r_text_file) as f:
for line in f:
m = re.match(r'int(?:\[\])? (\w+) (\w+) ', line)
if not m:
raise Exception('Unexpected line in R.txt: %s' % line)
resource_type, name = m.groups()
java_type, value = all_resources[(resource_type, name)]
if resource_type not in resources:
resources[resource_type] = []
resources[resource_type].append((name, java_type, value))
template = Template("""/* AUTO-GENERATED FILE. DO NOT MODIFY. */
package {{ package }};
public final class R {
{% for resource_type in resources %}
public static final class {{ resource_type }} {
{% for name, java_type, value in resources[resource_type] %}
{% if shared_resources %}
public static {{ java_type }} {{ name }} = {{ value }};
{% else %}
public static final {{ java_type }} {{ name }} = {{ value }};
{% endif %}
{% endfor %}
}
{% endfor %}
{% if shared_resources %}
public static void onResourcesLoaded(int packageId) {
{% for resource_type in resources %}
{% for name, java_type, value in resources[resource_type] %}
{% if java_type == 'int[]' %}
for(int i = 0; i < {{ resource_type }}.{{ name }}.length; ++i) {
{{ resource_type }}.{{ name }}[i] =
({{ resource_type }}.{{ name }}[i] & 0x00ffffff)
| (packageId << 24);
}
{% else %}
{{ resource_type }}.{{ name }} =
({{ resource_type }}.{{ name }} & 0x00ffffff)
| (packageId << 24);
{% endif %}
{% endfor %}
{% endfor %}
}
{% endif %}
}
""", trim_blocks=True, lstrip_blocks=True)
output = template.render(package=package, resources=resources,
shared_resources=shared_resources)
with open(r_java_path, 'w') as f:
f.write(output)
def CrunchDirectory(aapt, input_dir, output_dir):
"""Crunches the images in input_dir and its subdirectories into output_dir.
If an image is already optimized, crunching often increases image size. In
this case, the crunched image is overwritten with the original image.
"""
aapt_cmd = [aapt,
'crunch',
'-C', output_dir,
'-S', input_dir,
'--ignore-assets', build_utils.AAPT_IGNORE_PATTERN]
build_utils.CheckOutput(aapt_cmd, stderr_filter=FilterCrunchStderr,
fail_func=DidCrunchFail)
# Check for images whose size increased during crunching and replace them
# with their originals (except for 9-patches, which must be crunched).
for dir_, _, files in os.walk(output_dir):
for crunched in files:
if crunched.endswith('.9.png'):
continue
if not crunched.endswith('.png'):
raise Exception('Unexpected file in crunched dir: ' + crunched)
crunched = os.path.join(dir_, crunched)
original = os.path.join(input_dir, os.path.relpath(crunched, output_dir))
original_size = os.path.getsize(original)
crunched_size = os.path.getsize(crunched)
if original_size < crunched_size:
shutil.copyfile(original, crunched)
def FilterCrunchStderr(stderr):
"""Filters out lines from aapt crunch's stderr that can safely be ignored."""
filtered_lines = []
for line in stderr.splitlines(True):
# Ignore this libpng warning, which is a known non-error condition.
# http://crbug.com/364355
if ('libpng warning: iCCP: Not recognizing known sRGB profile that has '
+ 'been edited' in line):
continue
filtered_lines.append(line)
return ''.join(filtered_lines)
def DidCrunchFail(returncode, stderr):
"""Determines whether aapt crunch failed from its return code and output.
Because aapt's return code cannot be trusted, any output to stderr is
an indication that aapt has failed (http://crbug.com/314885).
"""
return returncode != 0 or stderr
def ZipResources(resource_dirs, zip_path):
# Python zipfile does not provide a way to replace a file (it just writes
# another file with the same name). So, first collect all the files to put
# in the zip (with proper overriding), and then zip them.
files_to_zip = dict()
for d in resource_dirs:
for root, _, files in os.walk(d):
for f in files:
archive_path = os.path.join(os.path.relpath(root, d), f)
path = os.path.join(root, f)
files_to_zip[archive_path] = path
with zipfile.ZipFile(zip_path, 'w') as outzip:
for archive_path, path in files_to_zip.iteritems():
outzip.write(path, archive_path)
def CombineZips(zip_files, output_path):
# When packaging resources, if the top-level directories in the zip file are
# of the form 0, 1, ..., then each subdirectory will be passed to aapt as a
# resources directory. While some resources just clobber others (image files,
# etc), other resources (particularly .xml files) need to be more
# intelligently merged. That merging is left up to aapt.
with zipfile.ZipFile(output_path, 'w') as outzip:
for i, z in enumerate(zip_files):
with zipfile.ZipFile(z, 'r') as inzip:
for name in inzip.namelist():
new_name = '%d/%s' % (i, name)
outzip.writestr(new_name, inzip.read(name))
def main():
args = build_utils.ExpandFileArgs(sys.argv[1:])
options = ParseArgs(args)
android_jar = os.path.join(options.android_sdk, 'android.jar')
aapt = options.aapt_path
input_files = []
with build_utils.TempDir() as temp_dir:
deps_dir = os.path.join(temp_dir, 'deps')
build_utils.MakeDirectory(deps_dir)
v14_dir = os.path.join(temp_dir, 'v14')
build_utils.MakeDirectory(v14_dir)
gen_dir = os.path.join(temp_dir, 'gen')
build_utils.MakeDirectory(gen_dir)
input_resource_dirs = build_utils.ParseGypList(options.resource_dirs)
if not options.v14_skip:
for resource_dir in input_resource_dirs:
generate_v14_compatible_resources.GenerateV14Resources(
resource_dir,
v14_dir)
dep_zips = build_utils.ParseGypList(options.dependencies_res_zips)
input_files += dep_zips
dep_subdirs = []
for z in dep_zips:
subdir = os.path.join(deps_dir, os.path.basename(z))
if os.path.exists(subdir):
raise Exception('Resource zip name conflict: ' + os.path.basename(z))
build_utils.ExtractAll(z, path=subdir)
dep_subdirs.append(subdir)
# Generate R.java. This R.java contains non-final constants and is used only
# while compiling the library jar (e.g. chromium_content.jar). When building
# an apk, a new R.java file with the correct resource -> ID mappings will be
# generated by merging the resources from all libraries and the main apk
# project.
package_command = [aapt,
'package',
'-m',
'-M', options.android_manifest,
'--auto-add-overlay',
'-I', android_jar,
'--output-text-symbols', gen_dir,
'-J', gen_dir,
'--ignore-assets', build_utils.AAPT_IGNORE_PATTERN]
for d in input_resource_dirs:
package_command += ['-S', d]
for d in dep_subdirs:
package_command += ['-S', d]
if options.non_constant_id:
package_command.append('--non-constant-id')
if options.custom_package:
package_command += ['--custom-package', options.custom_package]
if options.proguard_file:
package_command += ['-G', options.proguard_file]
if options.shared_resources:
package_command.append('--shared-lib')
build_utils.CheckOutput(package_command, print_stderr=False)
if options.extra_res_packages:
CreateExtraRJavaFiles(
gen_dir,
build_utils.ParseGypList(options.extra_res_packages),
build_utils.ParseGypList(options.extra_r_text_files),
options.shared_resources,
options.include_all_resources)
# This is the list of directories with resources to put in the final .zip
# file. The order of these is important so that crunched/v14 resources
# override the normal ones.
zip_resource_dirs = input_resource_dirs + [v14_dir]
base_crunch_dir = os.path.join(temp_dir, 'crunch')
# Crunch image resources. This shrinks png files and is necessary for
# 9-patch images to display correctly. 'aapt crunch' accepts only a single
# directory at a time and deletes everything in the output directory.
for idx, input_dir in enumerate(input_resource_dirs):
crunch_dir = os.path.join(base_crunch_dir, str(idx))
build_utils.MakeDirectory(crunch_dir)
zip_resource_dirs.append(crunch_dir)
CrunchDirectory(aapt, input_dir, crunch_dir)
ZipResources(zip_resource_dirs, options.resource_zip_out)
if options.all_resources_zip_out:
CombineZips([options.resource_zip_out] + dep_zips,
options.all_resources_zip_out)
if options.R_dir:
build_utils.DeleteDirectory(options.R_dir)
shutil.copytree(gen_dir, options.R_dir)
else:
build_utils.ZipDir(options.srcjar_out, gen_dir)
if options.r_text_out:
r_text_path = os.path.join(gen_dir, 'R.txt')
if os.path.exists(r_text_path):
shutil.copyfile(r_text_path, options.r_text_out)
else:
open(options.r_text_out, 'w').close()
if options.depfile:
input_files += build_utils.GetPythonDependencies()
build_utils.WriteDepfile(options.depfile, input_files)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2014, Fundacion Dr. Manuel Sadosky
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import pickle
import platform
import random
import unittest
import pyasmjit
from barf.arch import ARCH_X86_MODE_64
from barf.arch.x86.x86base import X86ArchitectureInformation
from barf.arch.x86.x86parser import X86Parser
from barf.arch.x86.x86translator import FULL_TRANSLATION
from barf.arch.x86.x86translator import X86Translator
from barf.core.reil import ReilContainer
from barf.core.reil import ReilEmulator
from barf.core.reil import ReilSequence
@unittest.skipUnless(platform.machine().lower() == 'x86_64',
'Not running on an x86_64 system')
class X86TranslationTests(unittest.TestCase):
def setUp(self):
self.arch_mode = ARCH_X86_MODE_64
self.arch_info = X86ArchitectureInformation(self.arch_mode)
self.x86_parser = X86Parser(self.arch_mode)
self.x86_translator = X86Translator(self.arch_mode, FULL_TRANSLATION)
self.reil_emulator = ReilEmulator(self.arch_info)
self.context_filename = "failing_context.data"
def test_lea(self):
asm = ["lea eax, [ebx + 0x100]"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cld(self):
asm = ["cld"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_clc(self):
asm = ["clc"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_nop(self):
asm = ["nop"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_test(self):
asm = ["test eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
# Undefined flags...
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "af")
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_not(self):
asm = ["not eax"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_xor(self):
asm = ["xor eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
# Undefined flags...
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "af")
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_or(self):
asm = ["or eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
# Undefined flags...
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "af")
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_and(self):
asm = ["and eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
# Undefined flags...
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "af")
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmp(self):
asm = ["cmp eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_neg(self):
asm = ["neg eax"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_dec(self):
asm = ["dec eax"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_inc(self):
asm = ["inc eax"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_div_1(self):
asm = ["div ebx"]
ctx_init = {
'rax' : 0x10,
'rbx' : 0x2,
'rdx' : 0x0,
'rflags' : 0x202,
}
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
# Undefined flags...
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "cf")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "of")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "sf")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "zf")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "af")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "pf")
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_div_2(self):
asm = ["div ebx"]
ctx_init = {
'rax' : 0xFFFFFFFFFFFFFFFF,
'rbx' : 0x2,
'rdx' : 0x0000000000000000,
'rflags' : 0x202,
}
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
# Undefined flags...
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "cf")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "of")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "sf")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "zf")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "af")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "pf")
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_div_3(self):
asm = ["div ebx"]
ctx_init = {
'rax' : 0xFFFFFFFFFFFFFFFF,
'rbx' : 0x2,
'rdx' : 0x0000000000000001,
'rflags' : 0x202,
}
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
# Undefined flags...
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "cf")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "of")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "sf")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "zf")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "af")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "pf")
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_div_4(self):
asm = ["div ebx"]
ctx_init = {
'rax' : 0xFFFFFFFFFFFFFFFF,
'rbx' : 0x4,
'rdx' : 0x0000000000000002,
'rflags' : 0x202,
}
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
# Undefined flags...
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "cf")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "of")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "sf")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "zf")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "af")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "pf")
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
# # TODO: Uncomment once imul translation gets fixed.
# def test_imul(self):
# asm = ["imul eax, ebx"]
# ctx_init = self.__init_context()
# x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
# # Undefined flags...
# reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "sf")
# reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "zf")
# reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "af")
# reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "pf")
# cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self.__save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_mul(self):
asm = ["mul ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
# Undefined flags...
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "sf")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "zf")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "af")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "pf")
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_sbb(self):
asm = ["sbb eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
# FIX: Remove this once the sbb translation gets fixed.
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "of")
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_sub(self):
asm = ["sub eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_adc(self):
asm = ["adc eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_add(self):
asm = ["add eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_xchg(self):
asm = ["xchg eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_movzx(self):
asm = ["movzx eax, bx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_mov(self):
asm = ["mov eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmova(self):
asm = ["cmova eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovae(self):
asm = ["cmovae eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovb(self):
asm = ["cmovb eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovbe(self):
asm = ["cmovbe eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovc(self):
asm = ["cmovc eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmove(self):
asm = ["cmove eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovg(self):
asm = ["cmovg eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovge(self):
asm = ["cmovge eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovl(self):
asm = ["cmovl eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovle(self):
asm = ["cmovle eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovna(self):
asm = ["cmovna eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovnae(self):
asm = ["cmovnae eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovnb(self):
asm = ["cmovnb eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovnbe(self):
asm = ["cmovnbe eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovnc(self):
asm = ["cmovnc eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovne(self):
asm = ["cmovne eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovng(self):
asm = ["cmovng eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovnge(self):
asm = ["cmovnge eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovnl(self):
asm = ["cmovnl eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovnle(self):
asm = ["cmovnle eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovno(self):
asm = ["cmovno eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovnp(self):
asm = ["cmovnp eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovns(self):
asm = ["cmovns eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovnz(self):
asm = ["cmovnz eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovo(self):
asm = ["cmovo eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovp(self):
asm = ["cmovp eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovpe(self):
asm = ["cmovpe eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovpo(self):
asm = ["cmovpo eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovs(self):
asm = ["cmovs eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmovz(self):
asm = ["cmovz eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_seta(self):
asm = ["seta al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setae(self):
asm = ["setae al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setb(self):
asm = ["setb al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setbe(self):
asm = ["setbe al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setc(self):
asm = ["setc al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_sete(self):
asm = ["sete al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setg(self):
asm = ["setg al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setge(self):
asm = ["setge al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setl(self):
asm = ["setl al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setle(self):
asm = ["setle al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setna(self):
asm = ["setna al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setnae(self):
asm = ["setnae al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setnb(self):
asm = ["setnb al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setnbe(self):
asm = ["setnbe al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setnc(self):
asm = ["setnc al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setne(self):
asm = ["setne al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setng(self):
asm = ["setng al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setnge(self):
asm = ["setnge al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setnl(self):
asm = ["setnl al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setnle(self):
asm = ["setnle al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setno(self):
asm = ["setno al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setnp(self):
asm = ["setnp al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setns(self):
asm = ["setns al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setnz(self):
asm = ["setnz al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_seto(self):
asm = ["seto al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setp(self):
asm = ["setp al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setpe(self):
asm = ["setpe al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setpo(self):
asm = ["setpo al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_sets(self):
asm = ["sets al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_setz(self):
asm = ["setz al"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_all_jcc(self):
conds = [
'a', 'ae', 'b', 'be', 'c', 'e', 'g', 'ge', 'l', 'le', 'na', 'nae',
'nb', 'nbe', 'nc', 'ne', 'ng', 'nge', 'nl', 'nle', 'no', 'np', 'ns',
'nz', 'o', 'p', 'pe', 'po', 's', 'z'
]
for c in conds:
self.__test_jcc(c)
def __test_jcc(self, jmp_cond):
untouched_value = 0x45454545
touched_value = 0x31313131
asm = [
"mov rax, 0x{:x}".format(untouched_value),
"j" + jmp_cond + " {:s}",
"mov rax, 0x{:x}".format(touched_value),
"xchg rax, rax",
]
asm_reil = list(asm)
asm_reil[1] = asm_reil[1].format(str(0xdeadbeef + 0x3))
asm_pyasmjit = list(asm)
asm_pyasmjit[1] = asm_pyasmjit[1].format("$+0x07")
reil_instrs = self.__asm_to_reil(asm_reil, 0xdeadbeef)
ctx_init = self.__init_context()
_, x86_ctx_out = pyasmjit.x86_execute("\n".join(asm_pyasmjit), ctx_init)
reil_ctx_out, _ = self.reil_emulator.execute(
reil_instrs,
start=0xdeadbeef << 8,
registers=ctx_init
)
reil_ctx_out = self.__fix_reil_flags(reil_ctx_out, x86_ctx_out)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_shr(self):
asm = ["shr eax, 3"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
# Undefined flags...
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "af")
# NOTE: OF and CF can be left undefined in some cases. They are
# not cover by this test.
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "cf")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "of")
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_shl(self):
asm = ["shl eax, 3"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
# Undefined flags...
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "af")
# NOTE: OF and CF can be left undefined in some cases. They are
# not cover by this test.
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "cf")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "of")
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_sal(self):
asm = ["sal eax, 3"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
# Undefined flags...
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "af")
# NOTE: OF and CF can be left undefined in some cases. They are
# not cover by this test.
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "cf")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "of")
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_sar(self):
asm = ["sar eax, 3"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
# Undefined flags...
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "af")
# NOTE: OF and CF can be left undefined in some cases. They are
# not cover by this test.
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "cf")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "of")
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_stc(self):
asm = ["stc"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_rol(self):
asm = ["rol eax, 8"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
# NOTE: OF and CF can be left undefined in some cases. They are
# not cover by this test.
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "cf")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "of")
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_ror(self):
asm = ["ror eax, 8"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
# NOTE: OF and CF can be left undefined in some cases. They are
# not cover by this test.
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "cf")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "of")
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_rcl(self):
asm = ["rcl eax, 8"]
ctx_init = self.__init_context()
# set carry flag
ctx_init['rflags'] = ctx_init['rflags'] | 0x1
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
# NOTE: OF and CF can be left undefined in some cases. They are
# not cover by this test.
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "cf")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "of")
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_rcr(self):
asm = ["rcr eax, 3"]
ctx_init = self.__init_context()
# set carry flag
ctx_init['rflags'] = ctx_init['rflags'] | 0x1
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
# NOTE: OF and CF can be left undefined in some cases. They are
# not cover by this test.
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "cf")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "of")
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_bt(self):
asm = ["bt eax, ebx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
# NOTE: The OF, SF, AF, and PF flags are undefined.
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "of")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "sf")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "af")
reil_ctx_out = self.__fix_reil_flag(reil_ctx_out, x86_ctx_out, "pf")
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_cmpxchg(self):
asm = ["cmpxchg ebx, ecx"]
ctx_init = self.__init_context()
_, x86_ctx_out = pyasmjit.x86_execute("\n".join(asm), ctx_init)
reil_ctx_out, _ = self.reil_emulator.execute(
self.__asm_to_reil(asm, 0xdeadbeef),
start=0xdeadbeef << 8,
end=(0xdeadbeef + 0x1) << 8,
registers=ctx_init
)
reil_ctx_out = self.__fix_reil_flags(reil_ctx_out, x86_ctx_out)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
def test_movsx(self):
asm = ["movsx eax, bx"]
ctx_init = self.__init_context()
x86_ctx_out, reil_ctx_out = self.__run_code(asm, 0xdeadbeef, ctx_init)
cmp_result = self.__compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
if not cmp_result:
self.__save_failing_context(ctx_init)
self.assertTrue(cmp_result, self.__print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
# Auxiliary methods
# ======================================================================== #
def __init_context(self):
"""Initialize register with random values.
"""
if os.path.isfile(self.context_filename):
context = self.__load_failing_context()
else:
context = self.__create_random_context()
return context
def __create_random_context(self):
context = {}
for reg in self.arch_info.registers_gp_base:
if reg not in ['rsp', 'rip', 'rbp']:
min_value, max_value = 0, 2**self.arch_info.operand_size - 1
context[reg] = random.randint(min_value, max_value)
context['rflags'] = self.__create_random_flags()
return context
def __create_random_flags(self):
# TODO: Check why PyAsmJIT throws an exception when DF flag is
# set.
flags_mapper = {
0 : "cf", # bit 0
2 : "pf", # bit 2
4 : "af", # bit 4
6 : "zf", # bit 6
7 : "sf", # bit 7
# 10 : "df", # bit 10
11 : "of", # bit 11
}
# Set 'mandatory' flags.
flags = 0x202
for bit, _ in flags_mapper.items():
flags = flags | (2**bit * random.randint(0, 1))
return flags
def __load_failing_context(self):
f = open(self.context_filename, "rb")
context = pickle.load(f)
f.close()
return context
def __save_failing_context(self, context):
f = open(self.context_filename, "wb")
pickle.dump(context, f)
f.close()
def __compare_contexts(self, context_init, x86_context, reil_context):
match = True
mask = 2**64-1
for reg in sorted(context_init.keys()):
if (x86_context[reg] & mask) != (reil_context[reg] & mask):
match = False
break
return match
def __print_contexts(self, context_init, x86_context, reil_context):
out = "Contexts don't match!\n\n"
header_fmt = " {0:^8s} : {1:^16s} | {2:>16s} ?= {3:<16s}\n"
header = header_fmt.format("Register", "Initial", "x86", "REIL")
ruler = "-" * len(header) + "\n"
out += header
out += ruler
fmt = " {0:>8s} : {1:016x} | {2:016x} {eq} {3:016x} {marker}\n"
mask = 2**64-1
for reg in sorted(context_init.keys()):
if (x86_context[reg] & mask) != (reil_context[reg] & mask):
eq, marker = "!=", "<"
else:
eq, marker = "==", ""
out += fmt.format(
reg,
context_init[reg] & mask,
x86_context[reg] & mask,
reil_context[reg] & mask,
eq=eq,
marker=marker
)
# Pretty print flags.
reg = "rflags"
fmt = "{0:s} ({1:>7s}) : {2:016x} ({3:s})"
init_value = context_init[reg] & mask
x86_value = x86_context[reg] & mask
reil_value = reil_context[reg] & mask
init_flags_str = self.__print_flags(context_init[reg])
x86_flags_str = self.__print_flags(x86_context[reg])
reil_flags_str = self.__print_flags(reil_context[reg])
out += "\n"
out += fmt.format(reg, "initial", init_value, init_flags_str) + "\n"
out += fmt.format(reg, "x86", x86_value, x86_flags_str) + "\n"
out += fmt.format(reg, "reil", reil_value, reil_flags_str)
return out
def __print_registers(self, registers):
out = ""
header_fmt = " {0:^8s} : {1:^16s}\n"
header = header_fmt.format("Register", "Value")
ruler = "-" * len(header) + "\n"
out += header
out += ruler
fmt = " {0:>8s} : {1:016x}\n"
for reg in sorted(registers.keys()):
out += fmt.format(reg, registers[reg])
print(out)
def __print_flags(self, flags):
# flags
flags_mapper = {
0 : "cf", # bit 0
2 : "pf", # bit 2
4 : "af", # bit 4
6 : "zf", # bit 6
7 : "sf", # bit 7
10 : "df", # bit 10
11 : "of", # bit 11
}
out = ""
for bit, flag in flags_mapper.items():
flag_str = flag.upper() if flags & 2**bit else flag.lower()
out += flag_str + " "
return out[:-1]
def __fix_reil_flag(self, reil_context, x86_context, flag):
reil_context_out = dict(reil_context)
flags_reg = 'eflags' if 'eflags' in reil_context_out else 'rflags'
_, bit = self.arch_info.alias_mapper[flag]
# Clean flag.
reil_context_out[flags_reg] &= ~(2**bit) & (2**32-1)
# Copy flag.
reil_context_out[flags_reg] |= (x86_context[flags_reg] & 2**bit)
return reil_context_out
def __fix_reil_flags(self, reil_context, x86_context):
reil_context_out = dict(reil_context)
# Remove this when AF and PF are implemented.
reil_context_out = self.__fix_reil_flag(reil_context_out, x86_context, "af")
reil_context_out = self.__fix_reil_flag(reil_context_out, x86_context, "pf")
return reil_context_out
def __set_address(self, address, x86_instrs):
addr = address
for x86_instr in x86_instrs:
x86_instr.address = addr
x86_instr.size = 1
addr += 1
def __translate(self, asm_instrs):
instr_container = ReilContainer()
asm_instr_last = None
instr_seq_prev = None
for asm_instr in asm_instrs:
instr_seq = ReilSequence()
for reil_instr in self.x86_translator.translate(asm_instr):
instr_seq.append(reil_instr)
if instr_seq_prev:
instr_seq_prev.next_sequence_address = instr_seq.address
instr_container.add(instr_seq)
instr_seq_prev = instr_seq
if instr_seq_prev:
if asm_instr_last:
instr_seq_prev.next_sequence_address = (asm_instr_last.address + asm_instr_last.size) << 8
return instr_container
def __asm_to_reil(self, asm_list, address):
x86_instrs = [self.x86_parser.parse(asm) for asm in asm_list]
self.__set_address(address, x86_instrs)
reil_instrs = self.__translate(x86_instrs)
return reil_instrs
def __run_code(self, asm_list, address, ctx_init):
reil_instrs = self.__asm_to_reil(asm_list, address)
_, x86_ctx_out = pyasmjit.x86_execute("\n".join(asm_list), ctx_init)
reil_ctx_out, _ = self.reil_emulator.execute(reil_instrs, start=0xdeadbeef << 8, registers=ctx_init)
# Fix AF and PF.
reil_ctx_out = self.__fix_reil_flags(reil_ctx_out, x86_ctx_out)
return x86_ctx_out, reil_ctx_out
def main():
unittest.main()
if __name__ == '__main__':
main()
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
from pants.base.build_environment import get_buildroot
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_rmtree
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
def shared_artifacts(version, extra_jar=None):
published_file_list = ['ivy-{0}.xml'.format(version),
'hello-greet-{0}.jar'.format(version),
'hello-greet-{0}.pom'.format(version),
'hello-greet-{0}-sources.jar'.format(version)]
if extra_jar:
published_file_list.append(extra_jar)
return {'org/pantsbuild/testproject/publish/hello-greet/{0}'.format(version): published_file_list}
# TODO: Right now some options are set via config and some via cmd-line flags. Normalize this?
def publish_extra_config(unique_config):
return {
'DEFAULT': {
'pythonpath': [
'examples/src/python',
'pants-plugins/src/python',
],
'backend_packages': [
'example.pants_publish_plugin',
'internal_backend.repositories',
'pants.backend.android', # There are android target source roots defined in examples/BUILD
],
},
'publish.jar': {
'publish_extras': {
'extra_test_jar_example': unique_config,
},
},
}
class JarPublishIntegrationTest(PantsRunIntegrationTest):
GOLDEN_DATA_DIR = 'tests/python/pants_test/tasks/jar_publish_resources/'
# This is where all pushdb properties files will end up.
@property
def pushdb_root(self):
return os.path.join(get_buildroot(), 'testprojects', 'ivy', 'pushdb')
def setUp(self):
# This attribute is required to see the full diff between ivy and pom files.
self.maxDiff = None
safe_rmtree(self.pushdb_root)
def tearDown(self):
safe_rmtree(self.pushdb_root)
def test_scala_publish(self):
unique_artifacts = {'org/pantsbuild/testproject/publish/jvm-example-lib_2.10/0.0.1-SNAPSHOT':
['ivy-0.0.1-SNAPSHOT.xml',
'jvm-example-lib_2.10-0.0.1-SNAPSHOT.jar',
'jvm-example-lib_2.10-0.0.1-SNAPSHOT.pom',
'jvm-example-lib_2.10-0.0.1-SNAPSHOT-sources.jar'],
'org/pantsbuild/testproject/publish/hello/welcome_2.10/0.0.1-SNAPSHOT':
['ivy-0.0.1-SNAPSHOT.xml',
'welcome_2.10-0.0.1-SNAPSHOT.jar',
'welcome_2.10-0.0.1-SNAPSHOT.pom',
'welcome_2.10-0.0.1-SNAPSHOT-sources.jar']}
self.publish_test('testprojects/src/scala/org/pantsbuild/testproject/publish'
':jvm-run-example-lib',
dict(unique_artifacts.items() + shared_artifacts('0.0.1-SNAPSHOT').items()),
['org.pantsbuild.testproject.publish/hello-greet/publish.properties',
'org.pantsbuild.testproject.publish/jvm-example-lib_2.10/publish.properties',
'org.pantsbuild.testproject.publish.hello/welcome_2.10/publish.properties'],
extra_options=['--doc-scaladoc-skip'],
expected_primary_artifact_count=3,
assert_publish_config_contents=True)
def test_java_publish(self):
self.publish_test('testprojects/src/java/org/pantsbuild/testproject/publish/hello/greet',
shared_artifacts('0.0.1-SNAPSHOT'),
['org.pantsbuild.testproject.publish/hello-greet/publish.properties'],)
def test_protobuf_publish(self):
unique_artifacts = {'org/pantsbuild/testproject/publish/protobuf/protobuf-java/0.0.1-SNAPSHOT':
['ivy-0.0.1-SNAPSHOT.xml',
'protobuf-java-0.0.1-SNAPSHOT.jar',
'protobuf-java-0.0.1-SNAPSHOT.pom',
'protobuf-java-0.0.1-SNAPSHOT-sources.jar'],
'org/pantsbuild/testproject/protobuf/distance/0.0.1-SNAPSHOT/':
['ivy-0.0.1-SNAPSHOT.xml',
'distance-0.0.1-SNAPSHOT.jar',
'distance-0.0.1-SNAPSHOT.pom',
'distance-0.0.1-SNAPSHOT-sources.jar']}
self.publish_test('testprojects/src/java/org/pantsbuild/testproject/publish/protobuf'
':protobuf-java',
unique_artifacts,
['org.pantsbuild.testproject.publish.protobuf/protobuf-java/'
'publish.properties',
'org.pantsbuild.testproject.protobuf/distance/publish.properties'],
extra_options=['--doc-javadoc-skip'],
expected_primary_artifact_count=2)
def test_named_snapshot(self):
name = "abcdef0123456789"
self.publish_test('testprojects/src/java/org/pantsbuild/testproject/publish/hello/greet',
shared_artifacts(name),
['org.pantsbuild.testproject.publish/hello-greet/publish.properties'],
extra_options=['--named-snapshot={}'.format(name)])
def test_publish_override_flag_succeeds(self):
override = "com.twitter.foo#baz=0.1.0"
self.publish_test('testprojects/src/java/org/pantsbuild/testproject/publish/hello/greet',
shared_artifacts('0.0.1-SNAPSHOT'),
['org.pantsbuild.testproject.publish/hello-greet/publish.properties'],
extra_options=['--override={}'.format(override)])
# Collect all the common factors for running a publish_extras test, and execute the test.
def publish_extras_runner(self, extra_config=None, artifact_name=None, success_expected=True):
self.publish_test('testprojects/src/java/org/pantsbuild/testproject/publish/hello/greet',
shared_artifacts('0.0.1-SNAPSHOT', artifact_name),
['org.pantsbuild.testproject.publish/hello-greet/publish.properties'],
extra_options=['--doc-javadoc-skip'],
extra_config=extra_config,
success_expected=success_expected)
#
# Run through all the permutations of the config parameters for publish_extras.
#
def test_publish_extras_name_classifier(self):
self.publish_extras_runner(extra_config=publish_extra_config({
'override_name': '{target_provides_name}-extra_example',
'classifier': 'classy',
}),
artifact_name='hello-greet-extra_example-0.0.1-SNAPSHOT-classy.jar')
def test_publish_extras_name(self):
self.publish_extras_runner(extra_config=publish_extra_config({
'override_name': '{target_provides_name}-extra_example',
}),
artifact_name='hello-greet-extra_example-0.0.1-SNAPSHOT.jar')
def test_publish_extras_name_extension(self):
self.publish_extras_runner(extra_config=publish_extra_config({
'override_name': '{target_provides_name}-extra_example',
'extension': 'zip'
}),
artifact_name='hello-greet-extra_example-0.0.1-SNAPSHOT.zip')
def test_publish_extras_extension(self):
self.publish_extras_runner(extra_config=publish_extra_config({
'extension': 'zip'
}),
artifact_name='hello-greet-0.0.1-SNAPSHOT.zip')
def test_publish_extras_extension_classifier(self):
self.publish_extras_runner(extra_config=publish_extra_config({
'classifier': 'classy',
'extension': 'zip'
}),
artifact_name='hello-greet-0.0.1-SNAPSHOT-classy.zip')
def test_publish_extras_classifier(self):
self.publish_extras_runner(extra_config=publish_extra_config({
'classifier': 'classy',
}),
artifact_name='hello-greet-0.0.1-SNAPSHOT-classy.jar')
# This test doesn't specify a proper set of parameters that uniquely name the extra artifact, and
# should fail with an error from pants.
def test_publish_extras_invalid_args(self):
self.publish_extras_runner(extra_config=publish_extra_config({
'extension': 'jar',
}),
artifact_name='hello-greet-0.0.1-SNAPSHOT.jar',
success_expected=False)
def test_scala_publish_classifiers(self):
self.publish_test('testprojects/src/scala/org/pantsbuild/testproject/publish/classifiers',
dict({
'org/pantsbuild/testproject/publish/classifiers_2.10/0.0.1-SNAPSHOT': [
'classifiers_2.10-0.0.1-SNAPSHOT.pom',
'ivy-0.0.1-SNAPSHOT.xml',
]}),
[],
assert_publish_config_contents=True)
def test_override_via_coord(self):
self.publish_test(
target='testprojects/src/scala/org/pantsbuild/testproject/publish/classifiers',
artifacts=dict({'org/pantsbuild/testproject/publish/classifiers_2.10/1.2.3-SNAPSHOT': [
'classifiers_2.10-1.2.3-SNAPSHOT.pom',
'ivy-1.2.3-SNAPSHOT.xml',
]}),
pushdb_files=[],
extra_options=['--override=org.pantsbuild.testproject.publish#classifiers_2.10=1.2.3'],
assert_publish_config_contents=True)
def test_override_via_address(self):
target = 'testprojects/src/scala/org/pantsbuild/testproject/publish/classifiers'
self.publish_test(
target=target,
artifacts=dict({'org/pantsbuild/testproject/publish/classifiers_2.10/1.2.3-SNAPSHOT': [
'classifiers_2.10-1.2.3-SNAPSHOT.pom',
'ivy-1.2.3-SNAPSHOT.xml',
]}),
pushdb_files=[],
extra_options=['--override={}=1.2.3'.format(target)],
assert_publish_config_contents=True)
def publish_test(self, target, artifacts, pushdb_files, extra_options=None, extra_config=None,
extra_env=None, expected_primary_artifact_count=1, success_expected=True,
assert_publish_config_contents=False):
"""Tests that publishing the given target results in the expected output.
:param target: Target to test.
:param artifacts: A map from directories to a list of expected filenames.
:param pushdb_files: list of pushdb files that would be created if this weren't a local publish
:param extra_options: Extra command-line options to the pants run.
:param extra_config: Extra pants.ini configuration for the pants run.
:param expected_primary_artifact_count: Number of artifacts we expect to be published.
:param extra_env: Extra environment variables for the pants run.
:param assert_publish_config_contents: Test the contents of the generated ivy and pom file.
If set to True, compares the generated ivy.xml and pom files in
tests/python/pants_test/tasks/jar_publish_resources/<package_name>/<artifact_name>/
"""
with temporary_dir() as publish_dir:
options = ['--local={}'.format(publish_dir),
'--no-dryrun',
'--force']
if extra_options:
options.extend(extra_options)
yes = 'y' * expected_primary_artifact_count
pants_run = self.run_pants(['publish.jar'] + options + [target], config=extra_config,
stdin_data=yes, extra_env=extra_env)
if success_expected:
self.assert_success(pants_run, "'pants goal publish' expected success, but failed instead.")
else:
self.assert_failure(pants_run,
"'pants goal publish' expected failure, but succeeded instead.")
return
# New pushdb directory should be created for all artifacts.
for pushdb_file in pushdb_files:
pushdb_dir = os.path.dirname(os.path.join(self.pushdb_root, pushdb_file))
self.assertTrue(os.path.exists(pushdb_dir))
# But because we are doing local publishes, no pushdb files are created
for pushdb_file in pushdb_files:
self.assertFalse(os.path.exists(os.path.join(self.pushdb_root, pushdb_file)))
for directory, artifact_list in artifacts.items():
for artifact in artifact_list:
artifact_path = os.path.join(publish_dir, directory, artifact)
self.assertTrue(os.path.exists(artifact_path))
if assert_publish_config_contents:
if artifact.endswith('xml') or artifact.endswith('pom'):
self.compare_file_contents(artifact_path, directory)
def compare_file_contents(self, artifact_path, directory):
"""
Tests the ivy.xml and pom
:param artifact_path: Path of the artifact
:param directory: Directory where the artifact resides.
:return:
"""
# Strip away the version number
[package_dir, artifact_name, version] = directory.rsplit(os.path.sep, 2)
file_name = os.path.basename(artifact_path)
golden_file_nm = os.path.join(JarPublishIntegrationTest.GOLDEN_DATA_DIR,
package_dir.replace(os.path.sep, '.'), artifact_name, file_name)
with open(artifact_path, 'r') as test_file:
generated_file = test_file.read()
with open(golden_file_nm, 'r') as golden_file:
golden_file_contents = golden_file.read()
# Remove the publication sha attribute from ivy.xml
if artifact_path.endswith('.xml'):
generated_file = re.sub(r'publication=.*', '/>', generated_file)
return self.assertMultiLineEqual(generated_file, golden_file_contents)
|
|
"""
this file does variant calling for DNAseq
"""
#============= import required packages =================
import os
import sys,subprocess
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # disable buffer so that in the log file all information is printed in order.
from f00_Message import Message
from f01_list_trim_fq import list_files_human,Trimmomatic
from f02_aligner_command import bwa_vari
from f03_samtools import sam2bam_sort
from f07_picard import markduplicates
from f08_GATK import *
from p01_FileProcess import remove,get_parameters,rg_bams
#============= define some parameters ===================
"""these parameters and read group names are different for
different samples, should only change this part for
running pipeline
"""
parFile = sys.argv[1]
param = get_parameters(parFile)
thread = param['thread']
email = param['email']
startMessage = param['startMessage']
endMessage = param['endMessage']
ref_fa = param['refSequence']
file_path = param['filePath']
bwaDb = param['alignerDb']
trim = param['trim']
phred = param['phred']
picard = param['picard']
trimmomatic = param['trimmomatic']
trimmoAdapter = param['trimmoAdapter']
gold_snp = param['dbSNP']
phaseINDEL= param['phase1INDEL']
gold_indel= param['MillINDEL']
omni = param['omni']
hapmap = param['hapMap']
gatk = param['gatk']
read_group = param['readGroup']
organism = param['organism']
##***************** Part 0. Build index file for bwa and GATK ******
##================= Part I. Preprocess ============================
#======== 1. map and dedupping =====================================
#======== (0) enter the directory ========================
os.chdir(file_path)
Message(startMessage,email)
#======== (1) read files ================================
fastqFiles = list_files_human(file_path)
if trim == 'True':
fastqFiles = Trimmomatic(trimmomatic,fastqFiles,phred,trimmoAdapter)
print 'list file succeed'
print 'fastqFiles is: ',fastqFiles
#======== (2) define group ===============================
#defined above
#======== (3) align using bwa ============================
try:
map_sam = bwa_vari(read_group,fastqFiles,bwaDb,thread)
print 'align succeed'
print 'map_sam is: ',map_sam
except:
print 'align failed'
Message('align failed',email)
sys.exit(1)
#======== (4) Convert sam to sorted bam ==================
try:
sort_bams = sam2bam_sort(map_sam,thread)
print 'sort bam files succeed'
print 'sort_bams is: ',sort_bams
except:
print 'sort bam files failed'
Message('sort bam files failed',email)
sys.exit(1)
#======== (5) Markduplicates using picard ================
try:
dedup_files = markduplicates(picard,sort_bams)
print 'mark duplicates succeed'
print 'dedup_files is: ',dedup_files
remove(sort_bams)
except:
print 'mark duplicates failed'
Message('mark duplicates failed',email)
sys.exit(1)
#======== 2. Indel realignment ====================================
#======== (6) Create a target list of intervals===========
try:
interval = RealignerTargetCreator(gatk,dedup_files,ref_fa,thread,phaseINDEL,gold_indel)
print 'RealignerTarget Creator succeed'
print 'interval is: ',interval
except:
print 'RealignerTarget Creator failed'
Message('RealignerTarget Creator failed',email)
sys.exit(1)
#======== (7) realignment of target intervals ============
try:
realign_bams = IndelRealigner(gatk,dedup_files,ref_fa,interval,phaseINDEL,gold_indel)
print 'IndexRealigner succeed'
print 'realign_bams is: ',realign_bams
remove(dedup_files)
except:
print 'IndelRealigner failed'
Message('IndelRealigner failed',email)
sys.exit(1)
#======== 3. Base quality recalibration =================
roundNum = '1'
try:
recal_bam_files = BaseRecalibrator(gatk,realign_bams,ref_fa,gold_snp,
gold_indel,roundNum,thread)
print 'round 1 recalibration succeed'
print 'recal_bam_files is: ',recal_bam_files
except:
print 'round 1 recalibration failed'
Message('round 1 recalibration failed',email)
sys.exit(1)
##================= Part II. Variant Calling ======================
#======== 1. call raw variant using HaplotypeCaller =====
#======== (1) determine parameters ======================
#======== (2) call variant ==============================
#======== !!! merge lanes for the same sample ============
if len(recal_bam_files) !=1:
#========= (3) merge samples =========================
try:
merged_bams = rg_bams(read_group,recal_bam_files)
print 'merged succeed'
print 'merged_bams is: ',merged_bams
remove(recal_bam_files)
except:
print 'merged failed'
Message('merged failed',email)
sys.exit(1)
#========= (4) mark duplicates ========================
try:
dedup_files = markduplicates(picard,merged_bams)
print 'dedup succeed'
print 'merged dedup_files is: ',dedup_files
remove(merged_bams)
except:
print 'merged dedup failed'
Message('merged dedup failed',email)
sys.exit(1)
#========= (5) Realignment ============================
try:
interval = RealignerTargetCreator(gatk,dedup_files,ref_fa,
thread,phaseINDEL,gold_indel)
realign_bams = IndelRealigner(gatk,dedup_files,ref_fa,
interval,phaseINDEL,gold_indel)
print 'merged indelrealigner succeed'
print 'merged realign_bams is: ',realign_bams
remove(dedup_files)
except:
print 'merged realign failed'
Message('merged realign failed',email)
sys.exit(1)
#========= (6) call variant ==============================
try:
raw_gvcf_files = HaplotypeCaller_DNA_gVCF(gatk,realign_bams,ref_fa,thread)
print 'merged final call succeed'
print 'raw_gvcf_files is:',raw_gvcf_files
except:
print 'final call failed'
Message('final call failed',email)
sys.exit(1)
#======== (7) Joint Genotyping ==========================
try:
joint_gvcf_file = JointGenotype(gatk,raw_gvcf_files,ref_fa,organism,thread)
print 'final joint succeed'
print 'joint_gvcf_file is: ',joint_gvcf_file
except:
print 'final joint failed'
Message('final joint failed',email)
sys.exit(1)
#======== (8) VQSR ======================================
try:
recal_variant = VQSR_human(gatk,joint_gvcf_file,ref_fa,thread,hapmap,omni,phaseINDEL,gold_snp,gold_indel)
print 'vcf recalibration succeed'
print 'recal_variant is: ',recal_variant
except:
print 'final vcf recalibration failed'
Message('final vcf recalibration failed',email)
sys.exit(1)
else:
# for only one file, just run calling with recalibration bam file
#======== Calling variant =================================
try:
raw_vcf_file = HaplotypeCaller_DNA_VCF(gatk,recal_bam_files[0],ref_fa,thread)
print 'final call succeed'
print 'raw_gvcf_files is:',raw_vcf_file
except:
print 'final call failed'
Message('final call failed',email)
sys.exit(1)
#======== Hard filtering ==================================
try:
final_filtered_files = HardFilter(gatk,raw_vcf_file,ref_fa,thread)
print 'final filter succeed'
print 'final_filtered_files is: ',final_filtered_files
except:
print 'final filter failed'
Message('final filter failed',email)
sys.exit(1)
#======== Combine snp and indel ===========================
try:
combinedVcf = CombineSNPandINDEL(gatk,ref_fa,final_filtered_files,'--assumeIdenticalSamples --genotypemergeoption UNSORTED')
print 'combine snp and indel succeed'
print 'combineVcf file is: ',combinedVcf
except:
print 'combine snp and indel failed'
sys.exit(1)
Message(endMessage,email)
|
|
import pymongo
import socket
import json
# import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
import re
from collections import defaultdict, deque
import math
import numpy as np
from matplotlib.font_manager import FontProperties
hardcoded_designs = (
"1.GroundSta@SUR1 2.GroundSta@SUR4 1.Sat@MEO1 1.Sat@MEO4 2.Sat@MEO5 1.Sat@LEO1 2.Sat@LEO2",
"1.GroundSta@SUR1 2.GroundSta@SUR4 2.Sat@GEO4 1.Sat@MEO1 1.Sat@MEO4 2.Sat@MEO5 1.Sat@LEO1 2.Sat@LEO2",
"1.GroundSta@SUR1 2.GroundSta@SUR3 3.GroundSta@SUR5 1.Sat@MEO1 1.Sat@MEO2 2.Sat@MEO3 2.Sat@MEO5 3.Sat@MEO6",
"1.GroundSta@SUR1 2.GroundSta@SUR3 3.GroundSta@SUR5 3.Sat@GEO5 1.Sat@MEO1 1.Sat@MEO2 2.Sat@MEO3 2.Sat@MEO5 3.Sat@MEO6",
# "1.GroundSta@SUR1 2.GroundSta@SUR3 3.GroundSta@SUR5 1.Sat@MEO1 2.Sat@MEO2 3.Sat@MEO5 1.Sat@LEO2 2.Sat@LEO4 3.Sat@LEO6",
# "1.GroundSta@SUR1 2.GroundSta@SUR3 3.GroundSta@SUR5 1.Sat@GEO1 1.Sat@MEO1 2.Sat@MEO4 3.Sat@MEO5 2.Sat@LEO4 3.Sat@LEO6",
"1.GroundSta@SUR1 2.GroundSta@SUR3 3.GroundSta@SUR5 1.Sat@MEO1 2.Sat@MEO2 3.Sat@MEO3 1.Sat@LEO1 2.Sat@LEO2 3.Sat@LEO3",
)
design_dict = {d: i for i,d in enumerate(hardcoded_designs)}
xtickDict = {0: 'I', 1: 'II', 2: 'III', 3: 'IV', 4: 'V', 5: 'VI', 6: 'VII', 7: 'VIII', 8: 'XI', 9: 'X'}
xticklist = ['Design %s' % xtickDict[i] for i in range(len(hardcoded_designs))]
divider = 1000000
def filterInvalidDics(dictlist):
return [dic for dic in dictlist if len(set(dic['costDictList'])) <= 2]
def findBalancedMembers(dictlist):
return [dic for dic in dictlist if len(set(dic['costDictList'])) == 1]
def fops2costs(fops):
costSGL, storagePenalty, auctioneer = re.search('x([-\d]+),([-\.\d]+),([-\d]+)', fops).groups()
# print(costSGL, costISL, storagePenalty)
return (int(costSGL), float(storagePenalty), int(auctioneer))
def fopsGen(des, test):
# print("test:",test)
numPlayers = 2
if '3.' in des:
numPlayers = 3
if 'regular storage' in test:
if 'stochastic' in test or 'random' in test:
costsgl = [-3]
else:
costsgl = [600]
storage = [-1, 400, 800]
for sgl in costsgl:
for stor in storage:
fopslist = numPlayers*['x%d,%1.2f,%d'%(sgl, stor, -1)]
yield fopslist
# print("design:", des, numPlayers)
elif 'storage' in test.lower():
if 'stochastic' in test.lower():
yield numPlayers * ["x%d,%1.2f,%d" % (-3, 400, -1)]
yield numPlayers * ["x%d,%1.2f,%d" % (-3, 800, -1)]
for k in np.linspace(0., 1.99, 19):
yield numPlayers * ["x%d,%1.2f,%d" % (-3, -1*k, -1)]
else:
yield numPlayers * ["x%d,%1.2f,%d" % (600, 400, -1)]
yield numPlayers * ["x%d,%1.2f,%d" % (600, 800, -1)]
for k in np.linspace(0., 1.99, 19):
yield numPlayers * ["x%d,%1.2f,%d" % (600, -1*k, -1)]
elif 'federate adaptive' in test:
costrange = [-3, 0, 1200, 600]
for sgl in costrange:
fops_adaptive = json.dumps(['x%d,%d,%d' % (-2, -1, -1)] + (numPlayers-1) * ['x%d,%d,%d' % (sgl, -1, -1)])
fops = json.dumps(numPlayers * ['x%d,%d,%d' % (sgl, -1, -1)])
yield (fops, fops_adaptive)
elif 'total' in test:
costrange = [0, 600, 1200]
# print(costrange)
for sgl in costrange:
# print(sgl)
if numPlayers == 2:
fops_1 = ['x%d,%d,%d' % (sgl, -1, -1), 'x%d,%d,%d' % (sgl, -1, -1)]
fops_2 = ['x%d,%d,%d' % (-2, -1, -1), 'x%d,%d,%d' % (sgl, -1, -1)]
fops_3 = ['x%d,%d,%d' % (-2, -1, -1), 'x%d,%d,%d' % (-2, -1, -1)]
print("new")
yield (fops_1, fops_2, fops_3)
elif numPlayers == 3:
fops_1 = ['x%d,%d,%d' % (sgl, -1, -1), 'x%d,%d,%d' % (sgl, -1, -1), 'x%d,%d,%d' % (sgl, -1, -1)]
fops_2 = ['x%d,%d,%d' % (-2, -1, -1), 'x%d,%d,%d' % (sgl, -1, -1), 'x%d,%d,%d' % (sgl, -1, -1)]
fops_3 = ['x%d,%d,%d' % (-2, -1, -1), 'x%d,%d,%d' % (-2, -1, -1), 'x%d,%d,%d' % (sgl, -1, -1)]
fops_4 = ['x%d,%d,%d' % (-2, -1, -1), 'x%d,%d,%d' % (-2, -1, -1), 'x%d,%d,%d' % (-2, -1, -1)]
yield (fops_1, fops_2, fops_3, fops_4)
elif 'auctioneer' in test:
fops_1 = numPlayers * ['x%d,%d,%d' % (-2, -1, -1)]
fops_2 = numPlayers * ['x%d,%d,%d' % (-2, -1, 1)]
yield (fops_1, fops_2)
def fopsGenTotal(des):
numPlayers = 2
if '3.' in des:
numPlayers = 3
costrange = [0, 600, 1200]
for sgl in costrange:
if numPlayers == 2:
fops_1 = ['x%d,%d,%d' % (sgl, -1, -1), 'x%d,%d,%d' % (sgl, -1, -1)]
fops_2 = ['x%d,%d,%d' % (-2, -1, -1), 'x%d,%d,%d' % (sgl, -1, -1)]
fops_3 = ['x%d,%d,%d' % (-2, -1, -1), 'x%d,%d,%d' % (-2, -1, -1)]
yield (fops_1, fops_2, fops_3)
elif numPlayers == 3:
fops_1 = ['x%d,%d,%d' % (sgl, -1, -1), 'x%d,%d,%d' % (sgl, -1, -1), 'x%d,%d,%d' % (sgl, -1, -1)]
fops_2 = ['x%d,%d,%d' % (-2, -1, -1), 'x%d,%d,%d' % (sgl, -1, -1), 'x%d,%d,%d' % (sgl, -1, -1)]
fops_3 = ['x%d,%d,%d' % (-2, -1, -1), 'x%d,%d,%d' % (-2, -1, -1), 'x%d,%d,%d' % (sgl, -1, -1)]
fops_4 = ['x%d,%d,%d' % (-2, -1, -1), 'x%d,%d,%d' % (-2, -1, -1), 'x%d,%d,%d' % (-2, -1, -1)]
yield (fops_1, fops_2, fops_3, fops_4)
def convertLocation2xy(location):
if 'SUR' in location:
r = 0.5
elif 'LEO' in location:
r = 1.
elif 'MEO' in location:
r = 1.5
elif "GEO" in location:
r = 2
else:
r = 2.35
sect = int(re.search(r'.+(\d)', location).group(1))
tetha = +math.pi / 3 - (sect - 1) * math.pi / 3
x, y = (r * math.cos(tetha), r * math.sin(tetha))
# print location, x, y
return (x, y)
def createPoints(letters, x, y, xdelta, ydelta, k):
# print(letters)
letterdict = defaultdict(list)
d = 0.35
k = (1.+k)/2.
xdelta = k*xdelta
ydelta = k*ydelta/1.5
if len(letters) == 2:
if letters == 'NN':
delta = -d
elif letters == 'AN':
delta = 0
elif letters == 'AA':
delta = d
xlist = [delta + x-xdelta/2., delta + x + xdelta/2.]
ylist = 2*[y]
for l, x, y in zip(letters, xlist, ylist):
letterdict[l].append((x,y))
elif len(letters) == 3:
if letters == 'NNN':
delta = -d
elif letters == 'ANN':
delta = -d/3.
elif letters == 'AAN':
delta = d/3.
elif letters == 'AAA':
delta = d
xlist = [delta + x - xdelta/2., delta + x, delta + x + xdelta/2.]
ylist = [y - ydelta*0.2886, y + ydelta/2., y - ydelta*0.2886]
# ylist = 3*[y]
for l, x, y in zip(letters, xlist, ylist):
letterdict[l].append((x,y))
return letterdict
def drawTotalAdaptive(query):
global hardcoded_designs
# print(hardcoded_designs)
totalcash_dict = {}
divider = 1000000.
ydelta_dict = {0: 0.58575022518545405, 600: 0.9811286044285239, 1200: 2.111500681313383}
my_dpi = 150
fig = plt.figure(figsize=(800 / my_dpi, 800 / my_dpi), dpi=my_dpi)
ax = fig.add_axes([0.1, 0.1, 0.9, 0.9])
k = 0.5
all_legends = []
letter_dict = defaultdict(set)
cost_color_dict = {600: 'b', 1200: 'm', 0: 'r'}
cost_marker_dict = {600: 'H', 1200: '*', 0: '^'}
totalcash_tuple_dict = defaultdict(list)
color_dict = defaultdict(str)
markerdict = defaultdict(str)
order_dict = {'AA-AAA': 1, 'AAN': 2, 'AN-ANN': 3, 'NN-NNN': 4}
design_point_dict = defaultdict(set)
base_letter_dict = {(0, "A"): 'A', (600, "A"): "A", (1200, "A"): 'A', (0, "N"): 'Z', (600, "N"): "S", (1200, "N"): 'O'}
letter_marker_dict = {'A': ('s', 'k', 'Federate Cost: Adaptive'),
'Z': (cost_marker_dict[0], cost_color_dict[0], 'Federate Cost: %d' % 0),
'S': (cost_marker_dict[600], cost_color_dict[600], 'Federate Cost: %d' % 600),
'O': (cost_marker_dict[1200], cost_color_dict[1200], 'Federate Cost: %d' % 1200)
}
for basecost in [0, 600]:
all_points = []
for i, des in enumerate(hardcoded_designs):
numPlayers = 2
if '3.' in des:
numPlayers = 3
query['elementlist'] = des
# print(query)
# for sgl in costrange:
# test = 'total adaptive'
allfops = list(fopsGenTotal(des))
if numPlayers == 2:
allfops = list(fopsGenTotal(des))
for tup in allfops:
# print(fops, fops_A6, fops_AA)
sgl = int(re.search(r'x([-\d]+).+', tup[0][0]).group(1))
if sgl != basecost:
continue
names = ['D%d_%s%s' % (i, 'N', 'N'), 'D%d_A%s' % (i, 'N'), 'D%d_AA' % i]
for fops, name in zip(tup, names):
query['fops'] = json.dumps(fops)
# print(query)
docs = list(db.results.find(query))[0]
results = json.loads(docs['results'])
totalcash = sum([e[1] for e in results])
# print(fops, results, totalcash)
totalcash_dict[name] = totalcash / divider
elif numPlayers == 3:
for tup in allfops:
# fops = tup[0]
sgl = int(re.search(r'x([-\d]+).+', tup[0][0]).group(1))
if sgl != basecost:
continue
# fops_list = [fops, fops_A66, fops_AA6, fops_AAA]
# names = ['D%d_%d%d%d'%(i, sgl//100, sgl//100, sgl//100), 'D%d_A%d%d'%(i, sgl//100, sgl//100), 'D%d_AA%d'%(i, sgl//100), 'D%d_AAA'%i]
names = ['D%d_%s%s%s' % (i, 'N', 'N', 'N'), 'D%d_A%s%s' % (i, 'N', 'N'), 'D%d_AA%s' % (i, 'N'),
'D%d_AAA' % i]
for fops, name in zip(tup, names):
query['fops'] = json.dumps(fops)
# print(query)
docs = list(db.results.find(query))[0]
results = json.loads(docs['results'])
totalcash = sum([e[1] for e in results])
# print(fops, results, totalcash)
totalcash_dict[name] = totalcash / divider
# print(totalcash_dict)
# totalcash_tuple_dict = defaultdict(list)
# color_dict = defaultdict(str)
# markerdict = defaultdict(str)
# order_dict = {'AA-AAA': 1, 'AAN': 2, 'AN-ANN': 3, 'NN-NNN': 4}
# plt.figure()
design_point_dict = defaultdict(list)
for D, y in totalcash_dict.items():
# print(D,y)
x = int(re.search(r'D(\d+)_(.+)', D).group(1)) + 1
label = re.search(r'D(\d+)_(.+)', D).group(2)
# print(label)
# plt.annotate(label, xy=(x, y), xycoords='data', textcoords='offset points')
# label_dict = ({'AA': r'$\clubsuit \clubsuit$', 'AN': r'$\clubsuit \blacksquare$',
# 'NN':r'$\blacksquare \blacksquare$', 'AAA': r'$\clubsuit \clubsuit \clubsuit$',
# 'ANN': r'$\clubsuit \blacksquare \blacksquare$', 'NNN':r'$\blacksquare \blacksquare \blacksquare$', 'AAN': r'$\clubsuit \clubsuit \blacksquare$'})
# label2 = label_dict[label]
# print(label2)
# plt.text(x, y, ha="center", va="center", s = label2)
xdelta = 0.14
tempdict = createPoints(label, x, y, xdelta, xdelta*2.2, k)#ydelta_dict[basecost])
values = [e for l in tempdict.values() for e in l]
avgx = sum([e[0] for e in values]) / len(values)
avgy = sum([e[1] for e in values]) / len(values)
all_points.append((avgx, avgy))
design_point_dict[x].append((round(avgx,2), round(avgy,2)))
print(x, avgx, avgy)
for l in tempdict:
# print(l)
l2 = base_letter_dict[(basecost, l)]
letter_dict[l2] = letter_dict[l2].union(set(tempdict[l]))
if label.count('A') == 0:
lab = 'NN-NNN'
color_dict[lab] = 'b'
markerdict[lab] = '*'
totalcash_tuple_dict[lab].append((x, y))
elif label.count('A') == len(label):
lab = 'AA-AAA'
color_dict[lab] = 'k'
markerdict[lab] = 's'
totalcash_tuple_dict[lab].append((x, y))
elif label.count('A') >= 2:
lab = 'AAN'
color_dict[lab] = 'g'
markerdict[lab] = '^'
totalcash_tuple_dict[lab].append((x, y))
else:
lab = 'AN-ANN'
color_dict[lab] = 'r'
markerdict[lab] = 'o'
totalcash_tuple_dict[lab].append((x, y))
# plt.scatter(x,y, color = color)
legends = []
# for label, points in sorted(totalcash_tuple_dict.items(), key = lambda x: order_dict[x[0]]):
# legends.append(label)
# plt.scatter(*zip(*points), color = color_dict[label], marker = markerdict[label], s = 40)
#
# plt.legend(legends, frameon=False,ncol=4, loc='upper center', bbox_to_anchor=(0.5, 1.15), labelspacing=2)
# for letter, points in letter_dict.items():
# marker, color, legend = letter_marker_dict[letter]
# plt.scatter(*zip(*points), marker=marker, color=color, s=k*30, linewidths='2')
# legends.append(legend)
#
# plt.legend(legends)
# for i in range(len(hardcoded_designs) - 1):
# plt.axvline(i + 1.5, color='k', linestyle=':', linewidth=0.6)
#
for d, points in design_point_dict.items():
print(points)
plt.plot(*zip(*points), 'k--', zorder = -3)
plt.scatter(*zip(*all_points), marker='o', s=k * 400, facecolors='w', edgecolors='k', zorder=-2,
linewidths='1')
# plt.xlim(0.5, len(hardcoded_designs) + 0.5)
# # print("x lim and y lim:", ax.get_ylim(), ax.get_xlim())
# xdelta = ax.get_xlim()[1] - ax.get_xlim()[0]
# ydelta = ax.get_ylim()[1] - ax.get_ylim()[0]
# # ydelta_dict[basecost] = ydelta/xdelta
# plt.ylabel('total cash (M$)')
# # plt.title('cost functions: $N = %d, A= adaptive$'%basecost)
# xtickDict = {0: 'I', 1: 'II', 2: 'III', 3: 'IV', 4: 'V', 5: 'VI', 6: 'VII', 7: 'VIII', 8: 'XI', 9: 'X'}
# xticklist = ['Design %s' % xtickDict[i] for i in range(len(hardcoded_designs))]
# plt.xticks(range(1, len(hardcoded_designs) + 1), xticklist, rotation=0)
# plt.savefig("Total_revenue_CostFunctions_Default%s.pdf" % str(basecost).zfill(4), bbox_inches='tight')
legends = []
lines = []
for letter, points in letter_dict.items():
marker, color, legend = letter_marker_dict[letter]
newline = plt.scatter(*zip(*points), marker=marker, color=color, s=k * 30, linewidths='2')
lines.append(newline)
legends.append(legend)
plt.legend(lines, legends)
for i in range(len(hardcoded_designs) - 1):
plt.axvline(i + 1.5, color='k', linestyle=':', linewidth=0.6)
plt.xlim(0.5, len(hardcoded_designs) + 0.5)
# print("x lim and y lim:", ax.get_ylim(), ax.get_xlim())
xdelta = ax.get_xlim()[1] - ax.get_xlim()[0]
ydelta = ax.get_ylim()[1] - ax.get_ylim()[0]
# ydelta_dict[basecost] = ydelta/xdelta
plt.ylabel('total cash (M$)')
# plt.title('cost functions: $N = %d, A= adaptive$'%basecost)
xtickDict = {0: 'I', 1: 'II', 2: 'III', 3: 'IV', 4: 'V', 5: 'VI', 6: 'VII', 7: 'VIII', 8: 'XI', 9: 'X'}
xticklist = ['Design %s' % xtickDict[i] for i in range(len(hardcoded_designs))]
plt.xticks(range(1, len(hardcoded_designs) + 1), xticklist, rotation=0)
plt.savefig("Total_revenue_CostFunctions_Default.pdf", bbox_inches='tight')
# print(ydelta_dict)
plt.show()
def drawStorage(docslist, design, query):
# fopslist = [d["fops"] for d in docslist]
# print(docslist)
storageset = sorted(list(set([d['storagePenalty'] for d in docslist])))
# storageset = [-1, 0, 100, 300, 500]
# storageset = [-2, -1]
# plt.figure()
storage_cashlist_dict = defaultdict(list)
for s in storageset:
# print("storage: ", s)
tempdocs = [d for d in docslist if int(d["storagePenalty"]) == s]
costlsit = [d['costSGL'] for d in tempdocs]
resultlist = [json.loads(d["results"]) for d in tempdocs]
cashlist = [sum([e[1] for e in r]) / 1000000. for r in resultlist]
storage_cashlist_dict[s] = [e[1] for e in sorted(list(zip(costlsit, cashlist)))]
storage_residual_dict = defaultdict(int)
baseline = storage_cashlist_dict[400]
# print("base line 400:", baseline)
# print(maxlist)
for s in storageset:
cashlist = storage_cashlist_dict[s]
# print(s, ' cash list:', cashlist)
residual = [100*(b-a)/a for a,b in zip(baseline, cashlist)]
# residual = [b for a,b in zip(baseline, cashlist)]
storage_residual_dict[s] = sum(residual)
# print(storage_residual_dict)
return storage_residual_dict
def calResidual(docslist, design, query):
# fopslist = [d["fops"] for d in docslist]
print(docslist)
storageset = sorted(list(set([d['storagePenalty'] for d in docslist])))
# storageset = [-1, 0, 100, 300, 500]
# storageset = [-2, -1]
# plt.figure()
storage_cashlist_dict = defaultdict(list)
for s in storageset:
# print("storage: ", s)
tempdocs = [d for d in docslist if int(d["storagePenalty"]) == s]
print("length of tempdocs:", len(tempdocs))
costlsit = [d['costSGL'] for d in tempdocs]
# resultlist = [json.loads(d["results"]) for d in tempdocs]
cashlist = [d["cashlist"] for d in tempdocs]
storage_cashlist_dict[s] = [e[1] for e in sorted(list(zip(costlsit, cashlist)))]
print(s)
print(len(storage_cashlist_dict[s]))
print([len(a) for a in storage_cashlist_dict[s]])
storage_residual_dict = defaultdict(list)
baseline = [sum(a)/float(len(a)) for a in storage_cashlist_dict[400]]
# print("base line 400:", baseline)
# print(maxlist)
for s in storageset:
cashlist = storage_cashlist_dict[s]
# print(s, ' cash list:', cashlist)
residual = [[100*(b-a)/a for b in l] for a,l in zip(baseline, cashlist)]
# residual = [b for a,b in zip(baseline, cashlist)]
print("length of residual:", len(residual))
storage_residual_dict[s] = residual
# print(storage_residual_dict)
return storage_residual_dict
def runQuery(db, query, test):
global design_dict
residual_dict = defaultdict(list)
N = len(design_dict)
numseeds = 30
boxplot_dict = {}
storage_dict = {400: 0, 800: 1, -1: 2}
for des, i in sorted(design_dict.items(), key = lambda x: x[1]):
query['elementlist'] = des
query['numTurns'] = 240
templist = []
for fops in fopsGen(des, test):
query['fops'] = json.dumps(fops)
# print("query :", query['elementlist'])
docsresult = list(db.results.find(query))
sample = docsresult[0]
# print(len(docsresult), len([d['numTurns'] for d in docsresult]))
resultlist = sorted([(d['seed'], json.loads(d["results"])) for d in docsresult])
cashlist = [sum([e[1] for e in r[1]]) / 1000000. for r in resultlist]
row = {k: sample[k] for k in ['fops']}
row['cashlist'] = cashlist[:numseeds]
costsgl, storage, auctioneer = fops2costs(row['fops'])
if costsgl not in [600, -3]:
continue
row['costSGL'] = costsgl
row['costISL'] = costsgl
row['storagePenalty'] = storage
# print("row:", row)
x = i * 3 + storage_dict[storage]
print(i, des, sum(cashlist))
boxplot_dict[x] = cashlist
# templist.append(row)
# if list(db.results.find(query)):
# templist.append(list(db.results.find(query))[0])
# else:
# print(fops)
# termslist = [re.search(r"(x.+),([-\.\d]+),(.+)", f) for f in fops]
# newfops = ["%s,%1.2f,%s"%(terms.group(1), int(terms.group(2)), terms.group(3)) for terms in termslist]
# query['fops'] = json.dumps(newfops)
# print(query)
# templist.append(list(db.results.find(query))[0])
# templist2 = []
# # fopslist = [d['fops'] for d in templist]
# for row in templist:
# # print("row: ", row)
# fops = row['fops']
# costsgl, storage, auctioneer = fops2costs(fops)
# if costsgl not in [600, -3]:
# continue
#
# row['costSGL'] = costsgl
# row['costISL'] = costsgl
# row['storagePenalty'] = storage
# templist2.append(row)
# print(len(templist))
# storage_residual_dict = calResidual(templist, design=design_dict[des], query = query)
# print("Storage residual dict:", len(storage_residual_dict), storage_residual_dict)
# for s, v in storage_residual_dict.items():
# print(s)
# print(v)
# residual_dict[s].append(v)
# print(boxplot_dict)
typ = 'Stochastic' if 'stochastic' in test else 'Deterministic'
xstick = list(range(1,16))
xstick_minor = [2, 5, 8, 11, 14]
xstick_design = ['Design %s'%s for s in ['I', 'II', 'III', 'IV', 'V']]
xstick_storagepenalty = 5 * [400, 800, 'Marginal']
xlines = [3.5, 6.5, 9.5, 12.5]
# print(len(boxplot_dict))
boxplot_list = [b[1] for b in sorted(boxplot_dict.items())]
# print(boxplot_list)
fig = plt.figure()
ax1 = fig.add_subplot(111)
plt.boxplot(boxplot_list, whis = 3)
if typ == 'Stochastic':
for i in range(1, 6):
j = 3*i-1
print(xstick_design[i-1])
# print("800:", stats.f_oneway(boxplot_list[j], boxplot_list[j-1]))
# print("400:", stats.f_oneway(boxplot_list[j], boxplot_list[j-2]))
print("std:", np.std(boxplot_list[j-2]), np.std(boxplot_list[j-1]), np.std(boxplot_list[j]))
print("mean:", np.mean(boxplot_list[j-2]), np.mean(boxplot_list[j-1]), np.mean(boxplot_list[j]))
plt.xticks(xstick, xstick_storagepenalty, rotation = 60)
# plt.xticks(xstick_minor, xstick_design, minor = True)
plt.ylabel('federation value (000)')
for xline in xlines:
plt.axvline(xline, color='k', linestyle='-', linewidth=0.3)
plt.xlabel('Storage Penalty')
ax2 = plt.twiny()
ax2.set_xlim(ax1.get_xlim())
ax2.set_xticks([a for a in xstick_minor])
ax2.set_xticklabels(xstick_design)
# plt.savefig("storagepenalty_%s.pdf" %(typ), bbox_inches='tight')
plt.show()
return boxplot_dict
def draw_Dictionary(residual_dict):
plt.figure()
legends = deque()
dic = {e: 'SP:%d' % e for e in residual_dict if e >= 0}
dic[-1] = 'SP:Marginal'
dic[-2] = 'SP:QL'
dic[-3] = 'SP:random'
# dic[400] = 'SP: Fixed'
dic[1200] = 'SP>1000'
dic[0] = 'SP: Collaborative'
baselist = [-1, 400, 800]
# xy = sorted(zip(*[residual_dict[e] for e in baselist], list(range(len(residual_dict[-2])))), reverse=True, key=lambda x: x[1])
xy = zip(*[residual_dict[e] for e in baselist], list(range(len(residual_dict[-1]))))
legends = [dic[e] for e in baselist]
Y = list(zip(*xy))
designs = Y[-1]
# print(designs)
ls = iter(['--', ':', '-.', '-'])
for s, y in zip(baselist, Y[:-1]):
if s == 0:
continue
if s == -1:
plt.scatter(range(len(y)), y, alpha = 0.5, color = 'k', marker = 's', label = dic[s], s = 80)
# plt.plot(y, marker='o')
elif s == 400:
plt.scatter(range(len(y)), y, color = 'm', alpha = 0.5, marker = 'v', label = dic[s], s = 80)
# plt.plot(y, marker='s')
else:
plt.scatter(range(len(y)), y, color = 'g', alpha=0.5, marker='o', label=dic[s], s = 80)
xtickDict = {0: 'I', 1: 'II', 2:'III', 3:'IV', 4:'V', 5:'VI', 6:'VII', 7:'VIII', 8:'XI', 9:'X'}
xticklist = ['Design %s'%xtickDict[i] for i in list(designs)]
plt.xticks(list(range(len(residual_dict[-1]))),xticklist, rotation = 0)
plt.legend(legends)
for i in range(len(residual_dict[-1])-1):
plt.axvline(i + 0.5, color='k', linestyle='-', linewidth=0.3)
plt.xlim(-0.5, len(residual_dict[-1])-0.5)
def sumDics(db, query, test):
typ = 'Stochastic' if 'stochastic' in test else 'Deterministic'
residual_dict = defaultdict(list)
# residual_dict = runQuery(db, query)
for capacity, links in [(2,2)]:
query['capacity'] = capacity
query['links'] = links
# print('Sum Dics query:')
# print(query)
tempdict = runQuery(db, query, test)
print(len(tempdict))
print([type(t) for t in tempdict])
# for s in tempdict:
# # print("temp dict:", s)
# # print("tempdict seed:", s)
#
# if s in residual_dict:
# residual_dict[s] = [a+b for a,b in zip(residual_dict[s], tempdict[s])]
# else:
# residual_dict[s] = tempdict[s]
#
# print('residual dict s:', residual_dict[s])
# draw_Dictionary(tempdict)
# # plt.title('storage:%d, links:%d' % (capacity, links))
# plt.ylabel('Improvement over baseline (%)')
#
#
# plt.savefig("%s_storagepenalty_%d_%d.pdf" % (typ, capacity, links), bbox_inches='tight')
def avgQueryResults(query):
# receives a query and calculates results as the average of all seeds (global variables)
global db, seed1, seed2
resultslist = []
for seed in range(seed1, seed2):
query['seed'] = seed
# print(query)
resultslist.append(json.loads(list(db.results.find(query))[0]['results']))
final = resultslist[0]
for i, _ in enumerate(final):
# print(i, [len(r) for r in resultslist])
final[i][1] = sum([r[i][1] for r in resultslist])/(seed2 - seed1)
# print(final)
return final
def drawAdaptiveSGL(query, test):
global divider
federatecash_dict_list1 = []
federatecash_dict_list2 = []
totalcash_dict_list = []
for des in hardcoded_designs:
query['elementlist'] = des
numPlayers = 2
if '3.' in des:
numPlayers = 3
federate_dict_1 = {}
federate_dict_2 = {}
totalcash_dict = {}
# fopslist = list(fopsGen(des, test))
for fops, fops_adaptive in fopsGen(des, test):
print("fops:", fops, fops_adaptive)
query['fops'] = fops
# print(query)
sgl = int(re.search(r"x([-\d]+),.+", fops).group(1))
# print("length of query:", list(db.results.find(query)))
# docs = list(db.results.find(query))[0]
results = avgQueryResults(query)
federatecash_1 = sum([e[1] for e in results])/len(results)
cashlist_2 = []
cashlist_a2 = []
for n in range(len(results)):
tempfops = json.loads(fops_adaptive)
temp = tempfops[0]
tempfops[0] = tempfops[n]
tempfops[n] = temp
query['fops'] = json.dumps(tempfops)
results_adaptive = avgQueryResults(query)
cashlist_a2.append(results_adaptive[n][1])
cashlist_2.extend([r[1] for i, r in enumerate(results_adaptive) if i!=n])
print(cashlist_2)
print(cashlist_a2)
federatecash_2 = sum(cashlist_2)/float(len(cashlist_2))
federatecash_a2 = sum(cashlist_a2)/float(len(cashlist_a2))
# print(query)
# print("length of query:", list(db.results.find(query)))
# docs_a = list(db.results.find(query))[0]
# results = json.loads(docs['results'])
# results = avgQueryResults(query)
# results_adaptive = json.loads(docs_a['results'])
# federatecash_a1 = results_adaptive[0][1]
# federatecash_2 = sum([e[1] for e in results[1:]])/len(results[1:])
# federetecash_a2 = sum([e[1] for e in results_adaptive[1:]])/len(results_adaptive[1:])
totalcash = sum([e[1] for e in results])
totalcash_adaptive = sum([e[1] for e in results_adaptive])
print("Federate cash:", federatecash_1, federatecash_2, federatecash_a2)
federate_dict_1[sgl] = (federatecash_1, federatecash_a2)
federate_dict_2[sgl] = (federatecash_1, federatecash_2)
# print(federatecash_1, federatecash_a1)
# print(federatecash_2, federetecash_a2)
totalcash_dict[sgl] = (totalcash, totalcash_adaptive)
federatecash_dict_list1.append(federate_dict_1)
federatecash_dict_list2.append(federate_dict_2)
totalcash_dict_list.append(totalcash_dict)
xtickDict = {0: 'I', 1: 'II', 2:'III', 3:'IV', 4:'V', 5:'VI', 6:'VII', 7:'VIII', 8:'XI', 9:'X'}
xticklist = ['Design %s'%xtickDict[i] for i in range(len(hardcoded_designs))]
delta = 0.3
marker_dict = {-3: 'v', 0: '^', 600: 's', 1200: '*' ,'adaptive': 'o'}
color_dict = {-3: 'g', 0: 'r', 600: 'b', 1200: 'm' ,'adaptive': 'k'}
function_dict = {-3: 'CF=tri-random', 0: 'CF= 0', 600: 'CF= 600', 1200: 'CF>1000' ,'adaptive': 'CF=adaptive'}
order_dict = {-3: 4.5, 0: 2, 600: 3, 1200: 4 ,'adaptive': 5}
sp_list = [-3, 0, 600, 1200]
all_points1 = defaultdict(list)
all_points_adaptive1 = defaultdict(list)
all_edges1 = defaultdict(list)
all_points2 = defaultdict(list)
all_edges2 = defaultdict(list)
all_points_adaptive2 = defaultdict(list)
for i, cash_dict in enumerate(federatecash_dict_list1):
for k, v in cash_dict.items():
print("adaptive cash: ", v)
point1 = (i+1-delta, v[0]/divider)
point2 = (i+1+delta, v[1]/divider)
all_points1[k].append(point1)
# all_points1['adaptive'].append(point2)
all_points_adaptive1[k].append(point2)
all_edges1[k].append((point1, point2))
for i, cash_dict in enumerate(federatecash_dict_list2):
for k, v in cash_dict.items():
print("nonadaptive cash: ", v)
point1 = (i+1-delta, v[0]/divider)
point2 = (i+1+delta, v[1]/divider)
all_points2[k].append(point1)
# all_points2['adaptive'].append(point2)
all_points_adaptive2[k].append(point2)
# all_points2['adaptive'].append(point2)
all_edges2[k].append((point1, point2))
legends = []
lines = []
for s in sp_list:
fig = plt.figure()
ax1 = fig.add_axes([0.1, 0.5, 0.9, 0.35])
points = all_points1[s]
legends = []
lines = []
# for s, points in sorted(all_points.items(), key = lambda x: order_dict[x[0]]):
lines.append(ax1.scatter(*zip(*points), marker = marker_dict[s], color = color_dict[s], s = 60, facecolors = 'w', linewidth='2'))
legends.append(function_dict[s])
points = all_points_adaptive1[s]
lines.append(ax1.scatter(*zip(*points), marker = marker_dict['adaptive'], color = 'k', s = 60, facecolors = 'w', linewidth='2'))
legends.append(function_dict['adaptive'])
# plt.legend(legends, loc = 2)
# fig.legend(lines, legends, frameon=False, ncol=3, loc='upper center', bbox_to_anchor=(0.4, 1.2), labelspacing=2)
for edge in all_edges1[s]:
# plt.plot(*zip(*edge), 'k:', linewidth = 0.7)
ax1.arrow(edge[0][0], edge[0][1], 0.5*(edge[1][0]-edge[0][0]), 0.5*(edge[1][1]-edge[0][1]), head_width=0.07, head_length=0.2, linewidth = 0.4, fc ='k', ec = 'k', zorder = -1)
plt.xticks(range(1, len(hardcoded_designs)+1), ['' for i in xticklist], rotation = 0)
for i in range(len(hardcoded_designs)-1):
ax1.axvline(i+1.5, color = 'k', linestyle = '-', linewidth = 0.3)
plt.ylabel('adaptive cash (M$)')
plt.xlim(0.5, len(hardcoded_designs)+0.5)
ax2 = fig.add_axes([0.1, 0.1, 0.9, 0.35])
# if s in all_edges2:
points = all_points2[s]
# print(s, points)
# for s, points in sorted(all_points2.items(), key = lambda x: order_dict[x[0]]):
lines.append(ax2.scatter(*zip(*points), marker = marker_dict[s], color = color_dict[s], s = 60, facecolors = 'w', linewidth='2'))
legends.append(function_dict[s])
# elif s in all_points_adaptive:
points = all_points_adaptive2[s]
# print(s, points)
# for s, points in sorted(all_points_adaptive.items(), key = lambda x: order_dict[x[0]]):
lines.append(ax2.scatter(*zip(*points), marker = marker_dict[s], color = 'k', s = 60, facecolors = 'w', linewidth='2'))
legends.append(function_dict[s])
# edge = all_edges2[s]
for edge in all_edges2[s]:
# plt.plot(*zip(*edge), 'k:', linewidth = 0.7)
ax2.arrow(edge[0][0], edge[0][1], 0.5*(edge[1][0]-edge[0][0]), 0.5*(edge[1][1]-edge[0][1]), head_width=0.07, head_length=0.2, linewidth = 0.4, fc ='k', ec = 'k', zorder = -1)
plt.xticks(range(1, len(hardcoded_designs)+1), xticklist, rotation = 0)
for i in range(len(hardcoded_designs)-1):
ax2.axvline(i+1.5, color = 'k', linestyle = '-', linewidth = 0.3)
fig.legend(lines[:2]+lines[3:], legends[:2]+legends[3:], loc='upper center', ncol = 3)
plt.ylabel('non-adaptive (M$)')
plt.savefig("Federate_revenue_costfunction_V3_sp%s.pdf"%str(s), bbox_inches='tight')
plt.show()
# print(federatecash_dict_list1)
# print(totalcash_dict_list)
def drawStoragePenalty(db):
query = {'experiment': 'Storage Penalty V2'}
test = 'regular storage deterministic'
sumDics(db, query, test)
test = 'regular storage stochastic'
sumDics(db, query, test)
plt.show()
def drawFederateAdaptive(db):
global numTurns
query = {'experiment': 'Adaptive Cost V2', 'capacity': 2, 'links': 2, 'numTurns': numTurns}
test = 'federate adaptive'
drawAdaptiveSGL(query, test)
# def drawTotalAdaptive(db):
# query = {'experiment': 'Adaptive Cost', 'capacity': 2, 'links': 2}
# drawTotalAdaptive(query)
def drawAdaptiveAuctioneer(db):
global design_dict, xticklist
query = {'capacity': 2, 'links': 2, 'numTurns': 2400}
totalcash_dict = defaultdict(list)
all_points1 = []
all_points2 = []
all_edges = []
test = 'auctioneer'
divider = 1000000
all_federate_edges = []
for des in hardcoded_designs:
query['elementlist'] = des
numPlayers = 2
if '3.' in des:
numPlayers = 3
for fops_adaptive, fops_auctioneer in fopsGen(des, test):
query['fops'] = json.dumps(fops_adaptive)
query['experiment'] = 'Adaptive Cost'
# print(query)
docs_adaptive = list(db.results.find(query))[0]
query['fops'] = json.dumps(fops_auctioneer)
query['experiment'] = 'Adaptive Cost Auctioneer'
# print(query)
docs_auctioneer = list(db.results.find(query))[0]
results1 = json.loads(docs_adaptive['results'])
results2 = json.loads(docs_auctioneer['results'])
totalcash1 = sum([e[1] for e in results1])
totalcash2 = sum([e[1] for e in results2])
points1 = [(1+design_dict[des]-0.3, e[1]/divider) for e in results1]
points2 = [(1+design_dict[des]+0.3, e[1]/divider) for e in results2]
all_points1.extend(points1)
all_points2.extend(points2)
all_federate_edges.extend(list(zip(points1, points2)))
point1 = (1+design_dict[des]-0.3, totalcash1/divider)
point2 = (1+design_dict[des]+0.3, totalcash2/divider)
totalcash_dict['adaptive'].append(point1)
totalcash_dict['auctioneer'].append(point2)
all_edges.append((point1, point2))
print(totalcash_dict)
fig = plt.figure()
ax1 = fig.add_axes([0.1, 0.5, 0.9, 0.35])
ax1.scatter(*zip(*totalcash_dict['adaptive']), marker='s', color='k', s=70, facecolors='w', linewidth='2')
ax1.scatter(*zip(*totalcash_dict['auctioneer']), marker='s', color='k', s=70, facecolors='k', linewidth='2')
plt.legend(['Adaptive (A)', 'A with Auctioneer'])
for i in range(len(hardcoded_designs)-1):
plt.axvline(i+1.5, color = 'k', linestyle = '-', linewidth = 0.3)
for edge in all_edges:
# plt.plot(*zip(*edge), 'k:', linewidth = 0.7)
plt.arrow(edge[0][0], edge[0][1], 0.8*(edge[1][0]-edge[0][0]), 0.8*(edge[1][1]-edge[0][1]), head_width=0.1, head_length=0.1,
linewidth = 1., fc ='k', ec = 'k', zorder = -1, linestyle = ':')
plt.xticks(range(1, len(hardcoded_designs) + 1), ['' for i in xticklist], rotation=0)
plt.ylabel("total cash")
ax2 = fig.add_axes([0.1, 0.1, 0.9, 0.35])
ax2.scatter(*zip(*all_points1), marker='o', color='k', s=60, facecolors='w', linewidth='2')
ax2.scatter(*zip(*all_points2), marker='o', color='k', s=60, facecolors='k', linewidth='2')
plt.legend(['Adaptive Federate (AF)', 'AF with auctioneer'])
for i in range(len(hardcoded_designs) - 1):
plt.axvline(i + 1.5, color='k', linestyle='-', linewidth=0.3)
for edge in all_federate_edges:
# plt.plot(*zip(*edge), 'k:', linewidth = 0.7)
plt.arrow(edge[0][0], edge[0][1], 0.8 * (edge[1][0] - edge[0][0]), 0.8 * (edge[1][1] - edge[0][1]),
head_width=0.1, head_length=0.1, linewidth=1, fc='k', ec='k', zorder=-1, linestyle = ':')
plt.ylabel("federate cash")
plt.xticks(range(1, len(hardcoded_designs) + 1), xticklist, rotation=0)
plt.savefig("TotalCash_Adaptive_vs_Auctioneer.pdf", bbox_inches='tight')
# plt.show()
def drawStorageCoefficient(db):
global xticklist, hardcoded_designs, divider
# print(xticklist)
query = {'experiment': 'Storage Penalty', 'capacity': 2, 'links': 2, 'numTurns': 2400}
for j, des in enumerate(hardcoded_designs):
# print(des)
# print(xticklist[j])
query['elementlist'] = des
numPlayers = 2
if '3.' in des:
numPlayers = 3
coefreslist = []
pricereslist = []
legends = []
for test in ['storage stochastic', 'storage deterministic']:
coefresulttuples = []
priceresultsdict = {}
for fops in fopsGen(des, test):
# print(fops)
query['fops'] = json.dumps(fops)
query['experiment'] = 'Storage Penalty'
# print(query)
docs = list(db.results.find(query))
# print(query, len(docs))
docs = docs[0]
# print("length of docs:", len(docs))
results = json.loads(docs['results'])
totalcash = sum([e[1] for e in results])/divider
k = float(re.search(r'x.+,([-\.\d]+),.+', fops[0]).group(1))
# print(k)
if k<0:
coefresulttuples.append((abs(k), totalcash))
else:
priceresultsdict[k] = totalcash
coefreslist.append(coefresulttuples)
pricereslist.append(priceresultsdict)
legends.append(test)
# print(coefresulttuples)
# print(priceresultsdict)
plt.figure()
# coefresulttuples = sorted(coefresulttuples)
plt.plot(*list(zip(*sorted(coefreslist[0]))))
plt.plot(*list(zip(*sorted(coefreslist[1]))))
stochasticMAX = max(pricereslist[0].items(), key = lambda x: x[1])
deterministicMAX = max(pricereslist[1].items(), key = lambda x: x[1])
plt.axhline(deterministicMAX[1], linestyle = '--', c = 'r')
legends.append("deter-cost SP:%d"%deterministicMAX[0])
plt.axhline(stochasticMAX[1], linestyle = '-.', c = 'b')
legends.append("stoch-cost SP:%d"%stochasticMAX[0])
plt.legend(legends)
plt.title("%s"%(xticklist[j]))
plt.ylabel('total cash')
plt.xlabel('storage coefficient')
plt.savefig("storagepenalty_coefficient_%s.pdf"%xticklist[j], bbox_inches='tight')
plt.show()
def drawGraphbyDesign(number, design):
elements = design.split(' ')
federates = set([int(e[0]) for e in elements])
federates_location_dict = defaultdict(list)
federates_type_dict = defaultdict(list)
federate_coordinates_dict = defaultdict(list)
my_dpi = 150
plt.figure(figsize=(800/my_dpi, 800/my_dpi), dpi=my_dpi)
for r in [4, 2.25, 1.]:
x = np.linspace(-1.0*r, 1.0*r, 50)
y = np.linspace(-1.0*r, 1.0*r, 50)
X, Y = np.meshgrid(x, y)
F = X ** 2 + Y ** 2 - r
plt.contour(X, Y, F, [0], colors='k', linewidths = 0.3, origin = 'lower', zorder = -1)
font = FontProperties()
font.set_style('italic')
font.set_weight('bold')
font.set_size('x-small')
for x,y,lab in [(0,0,'SUR'), (0, 1, "LEO"),(0, 1.5, 'MEO'),(0, 2, 'GEO')]:
# plt.annotate(lab, xy = (x,y), xytext = (x-0.2, y-0.1))
plt.text(x,y, ha="center", va="center", s = lab, bbox = dict(fc="w", ec="w", lw=2),fontproperties=font)
for i, (x, y) in enumerate([convertLocation2xy(e) for e in ['OOO'+str(i) for i in range(1,7)]]):
plt.text(x, y, ha="center", va="center", s=str(i+1), bbox=dict(fc="none", ec="none", lw=2), fontproperties=font)
font.set_size('medium')
plt.text(0, 2.3 , ha="left", va="center", s=r'$|\rightarrow \theta$', bbox=dict(fc="w", ec="w", lw=2), fontproperties=font)
types_dict = {'GroundSta': "G", 'Sat': 'S'}
colordict = {'F1': 'yellow', 'F2': 'lightcyan', 'F3': 'lightgrey'}
allpossiblelocations = []
for location in ['SUR', 'LEO', 'MEO', 'GEO']:
for i in range(1,7):
allpossiblelocations.append(location + str(i))
allpossiblecoordinates = [convertLocation2xy(e) for e in allpossiblelocations]
plt.scatter(*zip(*allpossiblecoordinates), marker = "H", s = 800, color = 'k', facecolors = 'w')
for f in federates:
types = [re.search(r'\d\.(.+)@(\w+\d)', e).group(1) for e in elements if '%d.' % f in e]
federates_type_dict['F%d'%f] = [types_dict[t] for t in types]
federates_location_dict['F%d'%f] = [re.search(r'(.+)@(\w+\d)', e).group(2) for e in elements if '%d.'%f in e]
federate_coordinates_dict['F%d'%f] = [convertLocation2xy(loc) for loc in federates_location_dict['F%d'%f]]
plt.scatter(*zip(*federate_coordinates_dict['F%d'%f]), marker = "H", s = 800, edgecolors = 'k', facecolors = colordict['F%d'%f], linewidth='3')
for x, y in federate_coordinates_dict['F%d'%f]:
plt.annotate('F%d'%f, xy = (x, y), xytext = (x-0.1, y-0.075))
plt.xticks([])
plt.yticks([])
rlim = 2.5
plt.xlim(-rlim, rlim)
plt.ylim(-rlim+0.2, rlim)
plt.axis('off')
des_roman_dict = {1: 'I', 2: 'II', 3:'III', 4:'IV', 5:'V'}
plt.savefig("Design_%s.pdf"%des_roman_dict[number], bbox_inches='tight')
# db = None
# dbHost = socket.gethostbyname(socket.gethostname())
# dbHost = "127.0.0.1"
# # dbHost = "155.246.119.10"
# dbName = None
# dbPort = 27017
#
# db = pymongo.MongoClient(dbHost, dbPort).ofs
# seed1 = 20
# seed2 = 30
# numTurns = 2400
# divider = 1000000
# # #
# drawStoragePenalty(db)
# #
# # drawFederateAdaptive(db)
# #
# # drawTotalAdaptive({'experiment': 'Adaptive Cost', 'capacity': 2, 'links': 2, 'numTurns': 2400})
# #
# # drawAdaptiveAuctioneer(db)
# #
# # drawSampleNetwork()
# #
# # drawStorageCoefficient(db)
#
# # for i, des in enumerate(hardcoded_designs):
# # # drawGraphbyDesign(i+1, des)
|
|
# Copyright 2013 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements all necessary Impala HiveServer 2 RPC functionality."""
# This work builds off of:
# 1. the Hue interface:
# hue/apps/beeswax/src/beeswax/server/dbms.py
# hue/apps/beeswax/src/beeswax/server/hive_server2_lib.py
# hue/desktop/core/src/desktop/lib/thrift_util.py
# 2. the Impala shell:
# Impala/shell/original_impala_shell.py
from __future__ import absolute_import, division
import datetime
import socket
import operator
import os
import re
import six
import sys
from decimal import Decimal
from six.moves import range
from impala.error import HiveServer2Error
from impala._thrift_api.hiveserver2 import (
TSocket, TBufferedTransport, TTransportException, TBinaryProtocol,
TOpenSessionReq, TFetchResultsReq, TCloseSessionReq, TExecuteStatementReq,
TGetInfoReq, TGetInfoType, TTypeId, TFetchOrientation,
TGetResultSetMetadataReq, TStatusCode, TGetColumnsReq, TGetSchemasReq,
TGetTablesReq, TGetFunctionsReq, TGetOperationStatusReq, TOperationState,
TCancelOperationReq, TCloseOperationReq, TGetLogReq, TProtocolVersion,
TGetRuntimeProfileReq, TGetExecSummaryReq, ImpalaHiveServer2Service,
TExecStats, ThriftClient)
# mapping between the schema types (based on
# com.cloudera.impala.catalog.PrimitiveType) and TColumnValue (in returned
# rows) helper object for converting from TRow to something friendlier
_TTypeId_to_TColumnValue_getters = {
'BOOLEAN': operator.attrgetter('boolVal'),
'TINYINT': operator.attrgetter('byteVal'),
'SMALLINT': operator.attrgetter('i16Val'),
'INT': operator.attrgetter('i32Val'),
'BIGINT': operator.attrgetter('i64Val'),
'TIMESTAMP': operator.attrgetter('stringVal'),
'FLOAT': operator.attrgetter('doubleVal'),
'DOUBLE': operator.attrgetter('doubleVal'),
'STRING': operator.attrgetter('stringVal'),
'DECIMAL': operator.attrgetter('stringVal'),
'BINARY': operator.attrgetter('binaryVal'),
'VARCHAR': operator.attrgetter('stringVal'),
'CHAR': operator.attrgetter('stringVal'),
}
_pre_columnar_protocols = [
TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V1,
TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V2,
TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V3,
TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V4,
TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V5,
]
def err_if_rpc_not_ok(resp):
if (resp.status.statusCode != TStatusCode.SUCCESS_STATUS and
resp.status.statusCode != TStatusCode.SUCCESS_WITH_INFO_STATUS and
resp.status.statusCode != TStatusCode.STILL_EXECUTING_STATUS):
raise HiveServer2Error(resp.status.errorMessage)
# datetime only supports 6 digits of microseconds but Impala supports 9.
# If present, the trailing 3 digits will be ignored without warning.
_TIMESTAMP_PATTERN = re.compile(r'(\d+-\d+-\d+ \d+:\d+:\d+(\.\d{,6})?)')
def _parse_timestamp(value):
if value:
match = _TIMESTAMP_PATTERN.match(value)
if match:
if match.group(2):
format = '%Y-%m-%d %H:%M:%S.%f'
# use the pattern to truncate the value
value = match.group()
else:
format = '%Y-%m-%d %H:%M:%S'
value = datetime.datetime.strptime(value, format)
else:
raise Exception(
'Cannot convert "{}" into a datetime'.format(value))
else:
value = None
return value
# TODO: Add another decorator that runs the function in its own thread
def threaded(func):
raise NotImplementedError
def retry(func):
# Retries RPCs after closing/reopening transport
# `service` must be the first arg in args or must be a kwarg
def wrapper(*args, **kwargs):
# get the thrift transport
if 'service' in kwargs:
transport = kwargs['service']._iprot.trans
elif len(args) > 0 and isinstance(args[0], ThriftClient):
transport = args[0]._iprot.trans
else:
raise HiveServer2Error(
"RPC function does not have expected 'service' arg")
tries_left = 3
while tries_left > 0:
try:
if six.PY2 and not transport.isOpen():
transport.open()
elif six.PY3 and not transport.is_open():
transport.open()
return func(*args, **kwargs)
except socket.error:
pass
except TTransportException:
pass
except Exception:
raise
transport.close()
tries_left -= 1
raise
return wrapper
def _get_socket(host, port, use_ssl, ca_cert):
# based on the Impala shell impl
if use_ssl:
from thrift.transport.TSSLSocket import TSSLSocket
if ca_cert is None:
return TSSLSocket(host, port, validate=False)
else:
return TSSLSocket(host, port, validate=True, ca_certs=ca_cert)
else:
return TSocket(host, port)
def _get_transport(sock, host, use_ldap, ldap_user, ldap_password,
use_kerberos, kerberos_service_name):
# based on the Impala shell impl
if not use_ldap and not use_kerberos:
return TBufferedTransport(sock)
import sasl
from thrift_sasl import TSaslClientTransport
def sasl_factory():
sasl_client = sasl.Client()
sasl_client.setAttr("host", host)
if use_ldap:
sasl_client.setAttr("username", ldap_user)
sasl_client.setAttr("password", ldap_password)
else:
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
if use_kerberos:
return TSaslClientTransport(sasl_factory, "GSSAPI", sock)
else:
return TSaslClientTransport(sasl_factory, "PLAIN", sock)
def connect_to_impala(host, port, timeout=45, use_ssl=False, ca_cert=None,
use_ldap=False, ldap_user=None, ldap_password=None,
use_kerberos=False, kerberos_service_name='impala'):
sock = _get_socket(host, port, use_ssl, ca_cert)
if six.PY2:
sock.setTimeout(timeout * 1000.)
elif six.PY3:
sock.set_timeout(timeout * 1000.)
transport = _get_transport(sock, host, use_ldap, ldap_user, ldap_password,
use_kerberos, kerberos_service_name)
transport.open()
protocol = TBinaryProtocol(transport)
if six.PY2:
# ThriftClient == ImpalaHiveServer2Service.Client
service = ThriftClient(protocol)
elif six.PY3:
# ThriftClient == TClient
service = ThriftClient(ImpalaHiveServer2Service, protocol)
return service
def close_service(service):
service._iprot.trans.close()
def reconnect(service):
service._iprot.trans.close()
service._iprot.trans.open()
@retry
def open_session(service, user, configuration=None):
req = TOpenSessionReq(
client_protocol=TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V6,
username=user, configuration=configuration)
resp = service.OpenSession(req)
err_if_rpc_not_ok(resp)
return (resp.sessionHandle, resp.configuration, resp.serverProtocolVersion)
@retry
def close_session(service, session_handle):
req = TCloseSessionReq(sessionHandle=session_handle)
resp = service.CloseSession(req)
err_if_rpc_not_ok(resp)
@retry
def execute_statement(service, session_handle, statement, configuration=None,
async=False):
req = TExecuteStatementReq(sessionHandle=session_handle,
statement=statement, confOverlay=configuration,
runAsync=async)
resp = service.ExecuteStatement(req)
err_if_rpc_not_ok(resp)
return resp.operationHandle
@retry
def get_result_schema(service, operation_handle):
if not operation_handle.hasResultSet:
return None
req = TGetResultSetMetadataReq(operationHandle=operation_handle)
resp = service.GetResultSetMetadata(req)
err_if_rpc_not_ok(resp)
schema = []
for column in resp.schema.columns:
name = column.columnName
entry = column.typeDesc.types[0].primitiveEntry
type_ = TTypeId._VALUES_TO_NAMES[entry.type].split('_')[0]
if type_ == 'DECIMAL':
qualifiers = entry.typeQualifiers.qualifiers
precision = qualifiers['precision'].i32Value
scale = qualifiers['scale'].i32Value
schema.append((name, type_, None, None,
precision, scale, None))
else:
schema.append((name, type_, None, None, None, None, None))
return schema
@retry
def fetch_results(service, operation_handle, hs2_protocol_version, schema=None,
max_rows=1024, orientation=TFetchOrientation.FETCH_NEXT):
if not operation_handle.hasResultSet:
return None
# the schema is necessary to pull the proper values (i.e., coalesce)
if schema is None:
schema = get_result_schema(service, operation_handle)
req = TFetchResultsReq(operationHandle=operation_handle,
orientation=orientation,
maxRows=max_rows)
resp = service.FetchResults(req)
err_if_rpc_not_ok(resp)
if hs2_protocol_version == TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V6:
tcols = [_TTypeId_to_TColumnValue_getters[schema[i][1]](col)
for (i, col) in enumerate(resp.results.columns)]
num_cols = len(tcols)
num_rows = len(tcols[0].values)
rows = []
for i in range(num_rows):
row = []
for j in range(num_cols):
type_ = schema[j][1]
values = tcols[j].values
nulls = tcols[j].nulls
# i // 8 is the byte, i % 8 is position in the byte; get the
# int repr and pull out the bit at the corresponding pos
is_null = False
# Ref HUE-2722, HiveServer2 sometimes does not add not put
# trailing '\x00'.
if len(values) != len(nulls):
nulls = nulls + ('\x00' * (len(values) - len(nulls)))
# Hive encodes nulls differently than Impala
# (\x00 vs \x00\x00 ...)
if not re.match('^(\x00)+$', nulls):
is_null = ord(nulls[i // 8]) & (1 << (i % 8))
if is_null:
row.append(None)
elif type_ == 'TIMESTAMP':
row.append(_parse_timestamp(values[i]))
elif type_ == 'DECIMAL':
row.append(Decimal(values[i]))
else:
row.append(values[i])
rows.append(tuple(row))
elif hs2_protocol_version in _pre_columnar_protocols:
rows = []
for trow in resp.results.rows:
row = []
for (i, col_val) in enumerate(trow.colVals):
type_ = schema[i][1]
value = _TTypeId_to_TColumnValue_getters[type_](col_val).value
if type_ == 'TIMESTAMP':
value = _parse_timestamp(value)
elif type_ == 'DECIMAL':
if value:
value = Decimal(value)
row.append(value)
rows.append(tuple(row))
else:
raise HiveServer2Error(
("Got HiveServer2 version %s. " %
TProtocolVersion._VALUES_TO_NAMES[hs2_protocol_version]) +
"Expected V1 - V6")
return rows
@retry
def get_current_database(service, session_handle):
raise NotImplementedError
@retry
def get_databases(service, session_handle):
req = TGetSchemasReq(sessionHandle=session_handle, schemaName='.*')
resp = service.GetSchemas(req)
err_if_rpc_not_ok(resp)
return resp.operationHandle
@retry
def database_exists(service, session_handle, hs2_protocol_version, db_name):
req = TGetSchemasReq(sessionHandle=session_handle, schemaName=db_name)
resp = service.GetSchemas(req)
err_if_rpc_not_ok(resp)
operation_handle = resp.operationHandle
# this only fetches default max_rows, but there should only be one row
# ideally
results = fetch_results(service=service, operation_handle=operation_handle,
hs2_protocol_version=hs2_protocol_version)
exists = False
for result in results:
if result[0].lower() == db_name.lower():
exists = True
close_operation(service, operation_handle)
return exists
@retry
def get_tables(service, session_handle, database_name='.*'):
req = TGetTablesReq(sessionHandle=session_handle, schemaName=database_name,
tableName='.*')
resp = service.GetTables(req)
err_if_rpc_not_ok(resp)
return resp.operationHandle
@retry
def table_exists(service, session_handle, hs2_protocol_version, table_name,
database_name='.*'):
req = TGetTablesReq(sessionHandle=session_handle, schemaName=database_name,
tableName=table_name)
resp = service.GetTables(req)
err_if_rpc_not_ok(resp)
operation_handle = resp.operationHandle
# this only fetches default max_rows, but there should only be one row
# ideally
results = fetch_results(service=service, operation_handle=operation_handle,
hs2_protocol_version=hs2_protocol_version)
exists = False
for result in results:
if result[2].lower() == table_name.lower():
exists = True
close_operation(service, operation_handle)
return exists
@retry
def get_table_schema(service, session_handle, table_name, database_name='.*'):
req = TGetColumnsReq(sessionHandle=session_handle,
schemaName=database_name, tableName=table_name,
columnName='.*')
resp = service.GetColumns(req)
err_if_rpc_not_ok(resp)
return resp.operationHandle
@retry
def get_functions(service, session_handle, database_name='.*'):
# TODO: need to test this one especially
req = TGetFunctionsReq(sessionHandle=session_handle,
schemaName=database_name,
functionName='.*')
resp = service.GetFunctions(req)
err_if_rpc_not_ok(resp)
return resp.operationHandle
@retry
def get_operation_status(service, operation_handle):
req = TGetOperationStatusReq(operationHandle=operation_handle)
resp = service.GetOperationStatus(req)
err_if_rpc_not_ok(resp)
return TOperationState._VALUES_TO_NAMES[resp.operationState]
@retry
def cancel_operation(service, operation_handle):
req = TCancelOperationReq(operationHandle=operation_handle)
resp = service.CancelOperation(req)
err_if_rpc_not_ok(resp)
@retry
def close_operation(service, operation_handle):
req = TCloseOperationReq(operationHandle=operation_handle)
resp = service.CloseOperation(req)
err_if_rpc_not_ok(resp)
@retry
def get_log(service, operation_handle):
req = TGetLogReq(operationHandle=operation_handle)
resp = service.GetLog(req)
err_if_rpc_not_ok(resp)
return resp.log
def ping(service, session_handle):
req = TGetInfoReq(sessionHandle=session_handle,
infoType=TGetInfoType.CLI_SERVER_NAME)
try:
resp = service.GetInfo(req)
except TTransportException:
return False
try:
err_if_rpc_not_ok(resp)
except HiveServer2Error:
return False
return True
def get_profile(service, operation_handle, session_handle):
req = TGetRuntimeProfileReq(operationHandle=operation_handle,
sessionHandle=session_handle)
resp = service.GetRuntimeProfile(req)
err_if_rpc_not_ok(resp)
return resp.profile
def get_summary(service, operation_handle, session_handle):
req = TGetExecSummaryReq(operationHandle=operation_handle,
sessionHandle=session_handle)
resp = service.GetExecSummary(req)
err_if_rpc_not_ok(resp)
return resp.summary
def build_summary_table(summary, idx, is_fragment_root, indent_level, output):
"""Direct translation of Coordinator::PrintExecSummary() to recursively
build a list of rows of summary statistics, one per exec node
summary: the TExecSummary object that contains all the summary data
idx: the index of the node to print
is_fragment_root: true if the node to print is the root of a fragment (and
therefore feeds into an exchange)
indent_level: the number of spaces to print before writing the node's
label, to give the appearance of a tree. The 0th child of a node has the
same indent_level as its parent. All other children have an indent_level
of one greater than their parent.
output: the list of rows into which to append the rows produced for this
node and its children.
Returns the index of the next exec node in summary.exec_nodes that should
be processed, used internally to this method only.
"""
attrs = ["latency_ns", "cpu_time_ns", "cardinality", "memory_used"]
# Initialise aggregate and maximum stats
agg_stats, max_stats = TExecStats(), TExecStats()
for attr in attrs:
setattr(agg_stats, attr, 0)
setattr(max_stats, attr, 0)
node = summary.nodes[idx]
for stats in node.exec_stats:
for attr in attrs:
val = getattr(stats, attr)
if val is not None:
setattr(agg_stats, attr, getattr(agg_stats, attr) + val)
setattr(max_stats, attr, max(getattr(max_stats, attr), val))
if len(node.exec_stats) > 0:
avg_time = agg_stats.latency_ns / len(node.exec_stats)
else:
avg_time = 0
# If the node is a broadcast-receiving exchange node, the cardinality of
# rows produced is the max over all instances (which should all have
# received the same number of rows). Otherwise, the cardinality is the sum
# over all instances which process disjoint partitions.
if node.is_broadcast and is_fragment_root:
cardinality = max_stats.cardinality
else:
cardinality = agg_stats.cardinality
est_stats = node.estimated_stats
label_prefix = ""
if indent_level > 0:
label_prefix = "|"
if is_fragment_root:
label_prefix += " " * indent_level
else:
label_prefix += "--" * indent_level
def prettyprint(val, units, divisor):
for unit in units:
if val < divisor:
if unit == units[0]:
return "%d%s" % (val, unit)
else:
return "%3.2f%s" % (val, unit)
val /= divisor
def prettyprint_bytes(byte_val):
return prettyprint(
byte_val, [' B', ' KB', ' MB', ' GB', ' TB'], 1024.0)
def prettyprint_units(unit_val):
return prettyprint(unit_val, ["", "K", "M", "B"], 1000.0)
def prettyprint_time(time_val):
return prettyprint(time_val, ["ns", "us", "ms", "s"], 1000.0)
row = [label_prefix + node.label,
len(node.exec_stats),
prettyprint_time(avg_time),
prettyprint_time(max_stats.latency_ns),
prettyprint_units(cardinality),
prettyprint_units(est_stats.cardinality),
prettyprint_bytes(max_stats.memory_used),
prettyprint_bytes(est_stats.memory_used),
node.label_detail]
output.append(row)
try:
sender_idx = summary.exch_to_sender_map[idx]
# This is an exchange node, so the sender is a fragment root, and
# should be printed next.
build_summary_table(summary, sender_idx, True, indent_level, output)
except (KeyError, TypeError):
# Fall through if idx not in map, or if exch_to_sender_map itself is
# not set
pass
idx += 1
if node.num_children > 0:
first_child_output = []
idx = build_summary_table(summary, idx, False, indent_level,
first_child_output)
for child_idx in range(1, node.num_children):
# All other children are indented (we only have 0, 1 or 2 children
# for every exec node at the moment)
idx = build_summary_table(summary, idx, False, indent_level + 1,
output)
output += first_child_output
return idx
|
|
# Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import json
import webob
from nova.api.openstack import compute as compute_api
from nova import compute
from nova import context
from nova import exception
from nova import flags
from nova.scheduler import api as scheduler_api
from nova import test
from nova.tests.api.openstack import fakes
from nova import utils
FLAGS = flags.FLAGS
INSTANCE = {
"id": 1,
"name": "fake",
"display_name": "test_server",
"uuid": "abcd",
"user_id": 'fake_user_id',
"tenant_id": 'fake_tenant_id',
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"security_groups": [{"id": 1, "name": "test"}],
"progress": 0,
"image_ref": 'http://foo.com/123',
"fixed_ips": [],
"instance_type": {"flavorid": '124'},
}
def fake_compute_api(*args, **kwargs):
return True
def fake_compute_api_raises_invalid_state(*args, **kwargs):
raise exception.InstanceInvalidState
def fake_compute_api_get(self, context, instance_id):
return {'id': 1, 'uuid': instance_id}
def fake_scheduler_api_live_migration(context, block_migration,
disk_over_commit, instance_id,
dest, topic):
return None
class AdminActionsTest(test.TestCase):
_actions = ('pause', 'unpause', 'suspend', 'resume', 'migrate',
'resetNetwork', 'injectNetworkInfo', 'lock', 'unlock')
_methods = ('pause', 'unpause', 'suspend', 'resume', 'resize',
'reset_network', 'inject_network_info', 'lock', 'unlock')
_actions_that_check_state = (
# action, method
('pause', 'pause'),
('unpause', 'unpause'),
('suspend', 'suspend'),
('resume', 'resume'),
('migrate', 'resize'))
def setUp(self):
super(AdminActionsTest, self).setUp()
self.stubs.Set(compute.API, 'get', fake_compute_api_get)
self.UUID = utils.gen_uuid()
for _method in self._methods:
self.stubs.Set(compute.API, _method, fake_compute_api)
self.stubs.Set(scheduler_api,
'live_migration',
fake_scheduler_api_live_migration)
def test_admin_api_actions(self):
app = fakes.wsgi_app()
for _action in self._actions:
req = webob.Request.blank('/v2/fake/servers/%s/action' %
self.UUID)
req.method = 'POST'
req.body = json.dumps({_action: None})
req.content_type = 'application/json'
res = req.get_response(app)
self.assertEqual(res.status_int, 202)
def test_admin_api_actions_raise_conflict_on_invalid_state(self):
app = fakes.wsgi_app()
for _action, _method in self._actions_that_check_state:
self.stubs.Set(compute.API, _method,
fake_compute_api_raises_invalid_state)
req = webob.Request.blank('/v2/fake/servers/%s/action' %
self.UUID)
req.method = 'POST'
req.body = json.dumps({_action: None})
req.content_type = 'application/json'
res = req.get_response(app)
self.assertEqual(res.status_int, 409)
self.assertIn("invalid state for '%(_action)s'" % locals(),
res.body)
def test_migrate_live_enabled(self):
ctxt = context.get_admin_context()
ctxt.user_id = 'fake'
ctxt.project_id = 'fake'
ctxt.is_admin = True
app = fakes.wsgi_app(fake_auth_context=ctxt)
req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
req.method = 'POST'
req.body = json.dumps({'os-migrateLive': {'host': 'hostname',
'block_migration': False,
'disk_over_commit': False}})
req.content_type = 'application/json'
res = req.get_response(app)
self.assertEqual(res.status_int, 202)
def test_migrate_live_missing_dict_param(self):
ctxt = context.get_admin_context()
ctxt.user_id = 'fake'
ctxt.project_id = 'fake'
ctxt.is_admin = True
app = fakes.wsgi_app(fake_auth_context=ctxt)
req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
req.method = 'POST'
req.body = json.dumps({'os-migrateLive': {'dummy': 'hostname',
'block_migration': False,
'disk_over_commit': False}})
req.content_type = 'application/json'
res = req.get_response(app)
self.assertEqual(res.status_int, 400)
class CreateBackupTests(test.TestCase):
def setUp(self):
super(CreateBackupTests, self).setUp()
self.stubs.Set(compute.API, 'get', fake_compute_api_get)
self.backup_stubs = fakes.stub_out_compute_api_backup(self.stubs)
self.app = compute_api.APIRouter()
self.uuid = utils.gen_uuid()
def _get_request(self, body):
url = '/fake/servers/%s/action' % self.uuid
req = fakes.HTTPRequest.blank(url)
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
return req
def test_create_backup_with_metadata(self):
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': 1,
'metadata': {'123': 'asdf'},
},
}
request = self._get_request(body)
response = request.get_response(self.app)
self.assertEqual(response.status_int, 202)
self.assertTrue(response.headers['Location'])
def test_create_backup_with_too_much_metadata(self):
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': 1,
'metadata': {'123': 'asdf'},
},
}
for num in range(FLAGS.quota_metadata_items + 1):
body['createBackup']['metadata']['foo%i' % num] = "bar"
request = self._get_request(body)
response = request.get_response(self.app)
self.assertEqual(response.status_int, 413)
def test_create_backup_no_name(self):
"""Name is required for backups"""
body = {
'createBackup': {
'backup_type': 'daily',
'rotation': 1,
},
}
request = self._get_request(body)
response = request.get_response(self.app)
self.assertEqual(response.status_int, 400)
def test_create_backup_no_rotation(self):
"""Rotation is required for backup requests"""
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
},
}
request = self._get_request(body)
response = request.get_response(self.app)
self.assertEqual(response.status_int, 400)
def test_create_backup_no_backup_type(self):
"""Backup Type (daily or weekly) is required for backup requests"""
body = {
'createBackup': {
'name': 'Backup 1',
'rotation': 1,
},
}
request = self._get_request(body)
response = request.get_response(self.app)
self.assertEqual(response.status_int, 400)
def test_create_backup_bad_entity(self):
body = {'createBackup': 'go'}
request = self._get_request(body)
response = request.get_response(self.app)
self.assertEqual(response.status_int, 400)
def test_create_backup(self):
"""The happy path for creating backups"""
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': 1,
},
}
request = self._get_request(body)
response = request.get_response(self.app)
self.assertTrue(response.headers['Location'])
def test_create_backup_raises_conflict_on_invalid_state(self):
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': 1,
},
}
self.stubs.Set(compute.API, 'backup',
fake_compute_api_raises_invalid_state)
request = self._get_request(body)
response = request.get_response(self.app)
self.assertEqual(response.status_int, 409)
|
|
#! /usr/local/bin/stackless2.6
# by [email protected] at Fri May 14 14:22:30 CEST 2010
"""Interactive Python console for Syncless and coroutines.
Example invocation:
$ python -m syncless.console
See more on http://code.google.com/p/syncless/wiki/Console .
"""
__author__ = '[email protected] (Peter Szabo)'
import array
import code
import errno
import fcntl
import os
import re
import select
import site
import struct
import sys
import termios
from syncless import coio
from syncless import patch
from syncless import wsgi
def BlockingWriteAll(out_fd, data):
"""A blocking version to write all of data to out_fd."""
while len(data):
got = os.write(out_fd, data)
if got == len(data):
break
data = buffer(data, got)
PROMPT_ANSI_RE = re.compile(r'\e\[[0-9;]*m')
def GetPromptWidth(prompt):
"""Return the display width of a prompt."""
# We could be much smarter here, e.g. interpret '\n' and '\r' to jump to
# 0, detect UTF-8, and count UTF-8 characters only.
if '\e' in prompt:
# Get rid of \e[...m (ANSI escape sequence character mode formatting).
prompt = PROMPT_ANSI_RE.sub('', prompt)
return len(prompt)
def WritePromptToNextLine(out_fd, prompt, prompt_width):
packed_hh = struct.pack('hh', 0, 0)
try:
width = struct.unpack('hh', fcntl.ioctl(
out_fd, termios.TIOCGWINSZ, packed_hh))[1]
except (IOError, OSError, ValueError, IndexError, struct.error):
width = None
if width and width > 1 + prompt_width:
# Move cursor to the beginning of the (next) line. This trich is
# also done by zsh(1).
BlockingWriteAll(
out_fd, '%%%s\r%s' %
(' ' * (width - 1 - prompt_width), prompt))
else:
# Move cursor to the beginning of the line.
BlockingWriteAll(out_fd, '\r' + prompt)
just_after_prompt = False
"""False or (out_fd, prompt).
Boolean value indicates if a prompt was displayed but nothing typed.
"""
def NewReadLine(in_fd, out_fd):
"""Terminal-enhanced readline function generator.
Tested on Linux 2.6.
"""
xin = coio.fdopen(in_fd, 'r', do_close=False)
packed_i = struct.pack('i', 0)
def NonTerminalReadLine(prompt=''):
xout.write(prompt)
xout.flush()
# Coroutines are scheduled while xin.readline() is reading the rest of
# its line.
line = xin.readline()
if line:
return line.rstrip('\n')
raise EOFError
def TerminalReadLine(prompt=''):
old = termios.tcgetattr(0)
new = list(old)
new[6] = list(new[6]) # Copy sublist.
#print 'READLINE', prompt
new[3] &= ~termios.ECHO # [2] is c_lflag
new[3] &= ~termios.ICANON # [3] is c_lflag
#new[6][termios.VMIN] = '\0' # !! VMIN -- no effect below, affects only blocking / nonblocking reads
termios.tcsetattr(0, termios.TCSANOW, new)
BlockingWriteAll(out_fd, prompt)
global just_after_prompt
just_after_prompt = (out_fd, prompt)
try:
while not xin.wait_for_readable():
pass
finally:
just_after_prompt = False
# Is this the correct way to disable new input while we're examining the
# existing input?
termios.tcflow(in_fd, termios.TCIOFF)
nread = struct.unpack('i', fcntl.ioctl(
in_fd, termios.FIONREAD, packed_i))[0]
# We read more than 1 character here so that we can push all characters in
# an escape sequence back.
got = xin.read_at_most(nread)
if got in ('\r', '\n'): # Helps GNU libreadline a bit.
BlockingWriteAll(out_fd, '\n')
return ''
if '\x04' in got: # Got EOF (isn't handled well here by readline).
new[3] |= termios.ECHO # [2] is c_lflag; this is needed by readline.so
new[3] |= termios.ICANON # [2] is c_lflag; superfluous
termios.tcsetattr(0, termios.TCSANOW, new)
for c in got:
fcntl.ioctl(in_fd, termios.TIOCSTI, c)
termios.tcflow(in_fd, termios.TCION)
raise EOFError
prompt_width = GetPromptWidth(prompt)
if 'readline' in sys.modules: # raw_input() is GNU libreadline.
WritePromptToNextLine(out_fd, '', prompt_width)
new[3] |= termios.ICANON # [2] is c_lflag; superfluous
termios.tcsetattr(0, termios.TCSANOW, new)
for c in got:
fcntl.ioctl(in_fd, termios.TIOCSTI, c)
new[3] |= termios.ECHO # [2] is c_lflag; this is needed by readline.so
termios.tcsetattr(0, termios.TCSANOW, new)
termios.tcflow(in_fd, termios.TCION)
# The disadvantage of the GNU libreadline implementation of
# raw_input() here is that coroutines are not scheduled while readline
# is reading the prompt (the non-first character).
try:
return raw_input(prompt)
finally:
termios.tcsetattr(in_fd, termios.TCSANOW, old)
else:
WritePromptToNextLine(out_fd, prompt, prompt_width)
new[3] |= termios.ECHO # [2] is c_lflag; this is needed by readline.so
new[3] |= termios.ICANON # [2] is c_lflag; superfluous
termios.tcsetattr(0, termios.TCSANOW, new)
for c in got:
fcntl.ioctl(in_fd, termios.TIOCSTI, c)
termios.tcflow(in_fd, termios.TCION)
if False:
# Coroutines are scheduled in xin.readline(), so this would be
# incompatible with raw_input() above.
try:
line = xin.readline()
finally:
termios.tcsetattr(in_fd, termios.TCSANOW, old)
if line:
return line.rstrip('\n')
raise EOFError
line = array.array('c') # TODO(pts): Use a byte arra
while True:
# Do a blocking read on purpose, so other tasklets are suspended until
# the user finishes typing the command.
try:
c = os.read(in_fd, 1) # Don't read past the first '\n'.
except OSError, e:
if e.errno != errno.EAGAIN:
raise
select.select([in_fd], (), ())
continue
if not c:
if line:
return line.tostring() # Without the terminating '\n'.
else:
raise EOFError
if c in ('\r', '\n'):
return line.tostring()
line.append(c)
if os.isatty(in_fd):
return TerminalReadLine
else:
xout = coio.fdopen(out_fd, 'w', do_close=False)
return NonTerminalReadLine
class _Ticker(object):
"""Background tasklet demonstration for syncless.console.
To start the tasklet, type this to syncless.console: +ticker
To stop the tasklet, type this: -ticker
"""
ticker_worker = None
@classmethod
def TickerWorker(cls, sleep_amount):
while True:
os.write(1, '.')
coio.sleep(sleep_amount)
def __pos__(self):
if self.ticker_worker is None:
self.ticker_worker = coio.stackless.tasklet(self.TickerWorker)(0.1)
def __neg__(self):
if self.ticker_worker is not None:
self.ticker_worker.remove()
self.ticker_worker.kill()
self.ticker_worker = None
console_tasklet = None
def wrap_tasklet(function):
"""Create tasklet like stackless.tasklet(function), handle exceptions."""
import traceback
def TaskletWrapper(*args, **kwargs):
try:
function(*args, **kwargs)
except TaskletExit:
pass
except:
newlines = '\n\n'
if just_after_prompt:
newlines = '\n'
BlockingWriteAll(
2, '\n%sException terminated tasklet, resuming syncless.console.%s'
% (''.join(traceback.format_exc()), newlines))
if just_after_prompt: # Display the prompt again.
out_fd, prompt = just_after_prompt
BlockingWriteAll(out_fd, prompt)
coio.insert_after_current(console_tasklet)
return coio.stackless.tasklet(TaskletWrapper)
# Create a class just to display its name.
class SynclessInteractiveConsole(code.InteractiveConsole):
pass
SYNCLESS_CONSOLE_HELP = (
'See more on http://code.google.com/p/syncless/wiki/Console\n'
'Example commands on syncless.console:\n'
'+ticker\n'
'-ticker\n'
'wsgi.simple(8080, lambda *args: [\'Hello, <b>World!</b>\']) or None\n'
'wrap_tasklet(lambda: 1 / 0)()')
class _Helper(site._Helper):
def __repr__(self):
return ('%s\n%s') % (site._Helper.__repr__(self), SYNCLESS_CONSOLE_HELP)
console_module = type(code)('__console__')
# Initialize __builtins__ etc.
exec '' in console_module.__dict__
# TODO(pts): Add functionality to suspend all other tasklets temporarily.
console_module.coio = coio
console_module.patch = patch
console_module.wsgi = wsgi
console_module.stackless = coio.stackless
console_module.help = _Helper()
console_module.ticker = _Ticker()
console_module.wrap_tasklet = wrap_tasklet
sys.modules['__console__'] = console_module
def main(argv=None):
console = SynclessInteractiveConsole(console_module.__dict__)
if os.isatty(0):
try:
import readline
except ImportError:
pass
console.raw_input = NewReadLine(0, 1)
global console_tasklet
console_tasklet = coio.stackless.current
try:
console.interact(None)
finally:
console_tasklet = None
if __name__ == '__main__':
sys.exit(main(sys.argv) or 0)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorBoard server handler logic.
TensorboardHandler contains all the logic for serving static files off of disk
and for handling the API calls to endpoints like /tags that require information
about loaded events.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import functools
import imghdr
import mimetypes
import os
from six import StringIO
from six.moves import BaseHTTPServer
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
from six.moves.urllib import parse as urlparse
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import event_accumulator
from tensorflow.tensorboard.backend import process_graph
from tensorflow.tensorboard.lib.python import http
DATA_PREFIX = '/data'
LOGDIR_ROUTE = '/logdir'
RUNS_ROUTE = '/runs'
PLUGIN_PREFIX = '/plugin'
SCALARS_ROUTE = '/' + event_accumulator.SCALARS
IMAGES_ROUTE = '/' + event_accumulator.IMAGES
AUDIO_ROUTE = '/' + event_accumulator.AUDIO
HISTOGRAMS_ROUTE = '/' + event_accumulator.HISTOGRAMS
COMPRESSED_HISTOGRAMS_ROUTE = '/' + event_accumulator.COMPRESSED_HISTOGRAMS
INDIVIDUAL_IMAGE_ROUTE = '/individualImage'
INDIVIDUAL_AUDIO_ROUTE = '/individualAudio'
GRAPH_ROUTE = '/' + event_accumulator.GRAPH
RUN_METADATA_ROUTE = '/' + event_accumulator.RUN_METADATA
TAB_ROUTES = ['', '/events', '/images', '/audio', '/graphs', '/histograms']
_IMGHDR_TO_MIMETYPE = {
'bmp': 'image/bmp',
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'png': 'image/png'
}
_DEFAULT_IMAGE_MIMETYPE = 'application/octet-stream'
def _content_type_for_image(encoded_image_string):
image_type = imghdr.what(None, encoded_image_string)
return _IMGHDR_TO_MIMETYPE.get(image_type, _DEFAULT_IMAGE_MIMETYPE)
class _OutputFormat(object):
"""An enum used to list the valid output formats for API calls.
Not all API calls support all formats (for example, only scalars and
compressed histograms support CSV).
"""
JSON = 'json'
CSV = 'csv'
class TensorboardHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handler class for use with BaseHTTPServer.HTTPServer.
This is essentially a thin wrapper around calls to an EventMultiplexer object
as well as serving files off disk.
"""
# How many samples to include in sampling API calls by default.
DEFAULT_SAMPLE_COUNT = 10
# NOTE TO MAINTAINERS: An accurate Content-Length MUST be specified on all
# responses using send_header.
protocol_version = 'HTTP/1.1'
def __init__(self, multiplexer, name_to_plugin_dict, logdir, *args):
self._multiplexer = multiplexer
self._registered_plugins = name_to_plugin_dict
self._logdir = logdir
self._setup_data_handlers()
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args)
def _setup_data_handlers(self):
self.data_handlers = {
DATA_PREFIX + LOGDIR_ROUTE: self._serve_logdir,
DATA_PREFIX + SCALARS_ROUTE: self._serve_scalars,
DATA_PREFIX + GRAPH_ROUTE: self._serve_graph,
DATA_PREFIX + RUN_METADATA_ROUTE: self._serve_run_metadata,
DATA_PREFIX + HISTOGRAMS_ROUTE: self._serve_histograms,
DATA_PREFIX + COMPRESSED_HISTOGRAMS_ROUTE:
self._serve_compressed_histograms,
DATA_PREFIX + IMAGES_ROUTE: self._serve_images,
DATA_PREFIX + INDIVIDUAL_IMAGE_ROUTE: self._serve_image,
DATA_PREFIX + AUDIO_ROUTE: self._serve_audio,
DATA_PREFIX + INDIVIDUAL_AUDIO_ROUTE: self._serve_individual_audio,
DATA_PREFIX + RUNS_ROUTE: self._serve_runs,
'/app.js': self._serve_js
}
# Serve the routes from the registered plugins using their name as the route
# prefix. For example if plugin z has two routes /a and /b, they will be
# served as /data/plugin/z/a and /data/plugin/z/b.
for name in self._registered_plugins:
try:
plugin = self._registered_plugins[name]
plugin_handlers = plugin.get_plugin_handlers(
self._multiplexer.RunPaths(), self._logdir)
except Exception as e: # pylint: disable=broad-except
logging.warning('Plugin %s failed. Exception: %s', name, str(e))
continue
for route, handler in plugin_handlers.items():
path = DATA_PREFIX + PLUGIN_PREFIX + '/' + name + route
self.data_handlers[path] = functools.partial(handler, self)
def respond(self, *args, **kwargs):
"""Delegates to http.Respond."""
http.Respond(self, *args, **kwargs)
# We use underscore_names for consistency with inherited methods.
def _image_response_for_run(self, run_images, run, tag):
"""Builds a JSON-serializable object with information about run_images.
Args:
run_images: A list of event_accumulator.ImageValueEvent objects.
run: The name of the run.
tag: The name of the tag the images all belong to.
Returns:
A list of dictionaries containing the wall time, step, URL, width, and
height for each image.
"""
response = []
for index, run_image in enumerate(run_images):
response.append({
'wall_time': run_image.wall_time,
'step': run_image.step,
# We include the size so that the frontend can add that to the <img>
# tag so that the page layout doesn't change when the image loads.
'width': run_image.width,
'height': run_image.height,
'query': self._query_for_individual_image(run, tag, index)
})
return response
def _audio_response_for_run(self, run_audio, run, tag):
"""Builds a JSON-serializable object with information about run_audio.
Args:
run_audio: A list of event_accumulator.AudioValueEvent objects.
run: The name of the run.
tag: The name of the tag the images all belong to.
Returns:
A list of dictionaries containing the wall time, step, URL, and
content_type for each audio clip.
"""
response = []
for index, run_audio_clip in enumerate(run_audio):
response.append({
'wall_time': run_audio_clip.wall_time,
'step': run_audio_clip.step,
'content_type': run_audio_clip.content_type,
'query': self._query_for_individual_audio(run, tag, index)
})
return response
def _path_is_safe(self, path):
"""Check path is safe (stays within current directory).
This is for preventing directory-traversal attacks.
Args:
path: The path to check for safety.
Returns:
True if the given path stays within the current directory, and false
if it would escape to a higher directory. E.g. _path_is_safe('index.html')
returns true, but _path_is_safe('../../../etc/password') returns false.
"""
base = os.path.abspath(os.curdir)
absolute_path = os.path.abspath(path)
prefix = os.path.commonprefix([base, absolute_path])
return prefix == base
def _serve_logdir(self, unused_query_params):
"""Writes out the logdir argument with which this tensorboard was started.
"""
self.respond({'logdir': self._logdir}, 'application/json')
def _serve_scalars(self, query_params):
"""Given a tag and single run, return array of ScalarEvents.
Alternately, if both the tag and the run are omitted, returns JSON object
where obj[run][tag] contains sample values for the given tag in the given
run.
Args:
query_params: The query parameters as a dict.
"""
# TODO(cassandrax): return HTTP status code for malformed requests
tag = query_params.get('tag')
run = query_params.get('run')
if tag is None and run is None:
if query_params.get('format') == _OutputFormat.CSV:
self.respond('Scalar sample values only supports JSON output',
'text/plain', 400)
return
sample_count = int(query_params.get('sample_count',
self.DEFAULT_SAMPLE_COUNT))
values = {}
for run_name, tags in self._multiplexer.Runs().items():
values[run_name] = {
tag: _uniform_sample(
self._multiplexer.Scalars(run_name, tag), sample_count)
for tag in tags['scalars']
}
else:
values = self._multiplexer.Scalars(run, tag)
if query_params.get('format') == _OutputFormat.CSV:
string_io = StringIO()
writer = csv.writer(string_io)
writer.writerow(['Wall time', 'Step', 'Value'])
writer.writerows(values)
self.respond(string_io.getvalue(), 'text/csv')
else:
self.respond(values, 'application/json')
def _serve_graph(self, query_params):
"""Given a single run, return the graph definition in json format."""
run = query_params.get('run', None)
if run is None:
self.respond('query parameter "run" is required', 'text/plain', 400)
return
try:
graph = self._multiplexer.Graph(run)
except ValueError:
self.send_response(404)
return
limit_attr_size = query_params.get('limit_attr_size', None)
if limit_attr_size is not None:
try:
limit_attr_size = int(limit_attr_size)
except ValueError:
self.respond('query parameter `limit_attr_size` must be integer',
'text/plain', 400)
return
large_attrs_key = query_params.get('large_attrs_key', None)
try:
process_graph.prepare_graph_for_ui(graph, limit_attr_size,
large_attrs_key)
except ValueError as e:
self.respond(e.message, 'text/plain', 400)
return
self.respond(str(graph), 'text/x-protobuf') # pbtxt
def _serve_run_metadata(self, query_params):
"""Given a tag and a TensorFlow run, return the session.run() metadata."""
tag = query_params.get('tag', None)
run = query_params.get('run', None)
if tag is None:
self.respond('query parameter "tag" is required', 'text/plain', 400)
return
if run is None:
self.respond('query parameter "run" is required', 'text/plain', 400)
return
try:
run_metadata = self._multiplexer.RunMetadata(run, tag)
except ValueError:
self.send_response(404)
return
self.respond(str(run_metadata), 'text/x-protobuf') # pbtxt
def _serve_histograms(self, query_params):
"""Given a tag and single run, return an array of histogram values."""
tag = query_params.get('tag')
run = query_params.get('run')
values = self._multiplexer.Histograms(run, tag)
self.respond(values, 'application/json')
def _serve_compressed_histograms(self, query_params):
"""Given a tag and single run, return an array of compressed histograms."""
tag = query_params.get('tag')
run = query_params.get('run')
compressed_histograms = self._multiplexer.CompressedHistograms(run, tag)
if query_params.get('format') == _OutputFormat.CSV:
string_io = StringIO()
writer = csv.writer(string_io)
# Build the headers; we have two columns for timing and two columns for
# each compressed histogram bucket.
headers = ['Wall time', 'Step']
if compressed_histograms:
bucket_count = len(compressed_histograms[0].compressed_histogram_values)
for i in xrange(bucket_count):
headers += ['Edge %d basis points' % i, 'Edge %d value' % i]
writer.writerow(headers)
for compressed_histogram in compressed_histograms:
row = [compressed_histogram.wall_time, compressed_histogram.step]
for value in compressed_histogram.compressed_histogram_values:
row += [value.rank_in_bps, value.value]
writer.writerow(row)
self.respond(string_io.getvalue(), 'text/csv')
else:
self.respond(compressed_histograms, 'application/json')
def _serve_images(self, query_params):
"""Given a tag and list of runs, serve a list of images.
Note that the images themselves are not sent; instead, we respond with URLs
to the images. The frontend should treat these URLs as opaque and should not
try to parse information about them or generate them itself, as the format
may change.
Args:
query_params: The query parameters as a dict.
"""
tag = query_params.get('tag')
run = query_params.get('run')
images = self._multiplexer.Images(run, tag)
response = self._image_response_for_run(images, run, tag)
self.respond(response, 'application/json')
def _serve_image(self, query_params):
"""Serves an individual image."""
tag = query_params.get('tag')
run = query_params.get('run')
index = int(query_params.get('index'))
image = self._multiplexer.Images(run, tag)[index]
encoded_image_string = image.encoded_image_string
content_type = _content_type_for_image(encoded_image_string)
self.respond(encoded_image_string, content_type)
def _query_for_individual_image(self, run, tag, index):
"""Builds a URL for accessing the specified image.
This should be kept in sync with _serve_image. Note that the URL is *not*
guaranteed to always return the same image, since images may be unloaded
from the reservoir as new images come in.
Args:
run: The name of the run.
tag: The tag.
index: The index of the image. Negative values are OK.
Returns:
A string representation of a URL that will load the index-th
sampled image in the given run with the given tag.
"""
query_string = urllib.parse.urlencode({
'run': run,
'tag': tag,
'index': index
})
return query_string
def _serve_audio(self, query_params):
"""Given a tag and list of runs, serve a list of audio.
Note that the audio clips themselves are not sent; instead, we respond with
URLs to the audio. The frontend should treat these URLs as opaque and should
not try to parse information about them or generate them itself, as the
format may change.
Args:
query_params: The query parameters as a dict.
"""
tag = query_params.get('tag')
run = query_params.get('run')
audio_list = self._multiplexer.Audio(run, tag)
response = self._audio_response_for_run(audio_list, run, tag)
self.respond(response, 'application/json')
def _serve_individual_audio(self, query_params):
"""Serves an individual audio clip."""
tag = query_params.get('tag')
run = query_params.get('run')
index = int(query_params.get('index'))
audio = self._multiplexer.Audio(run, tag)[index]
self.respond(audio.encoded_audio_string, audio.content_type)
def _query_for_individual_audio(self, run, tag, index):
"""Builds a URL for accessing the specified audio.
This should be kept in sync with _serve_individual_audio. Note that the URL
is *not* guaranteed to always return the same audio, since audio may be
unloaded from the reservoir as new audio comes in.
Args:
run: The name of the run.
tag: The tag.
index: The index of the audio. Negative values are OK.
Returns:
A string representation of a URL that will load the index-th
sampled audio in the given run with the given tag.
"""
query_string = urllib.parse.urlencode({
'run': run,
'tag': tag,
'index': index
})
return query_string
def _serve_runs(self, unused_query_params):
"""Return a JSON object about runs and tags.
Returns a mapping from runs to tagType to list of tags for that run.
Returns:
{runName: {images: [tag1, tag2, tag3],
audio: [tag4, tag5, tag6],
scalars: [tagA, tagB, tagC],
histograms: [tagX, tagY, tagZ],
firstEventTimestamp: 123456.789}}
"""
runs = self._multiplexer.Runs()
for run_name, run_data in runs.items():
try:
run_data['firstEventTimestamp'] = self._multiplexer.FirstEventTimestamp(
run_name)
except ValueError:
logging.warning('Unable to get first event timestamp for run %s',
run_name)
run_data['firstEventTimestamp'] = None
self.respond(runs, 'application/json')
def _serve_index(self, unused_query_params):
"""Serves the index page (i.e., the tensorboard app itself)."""
self._serve_static_file('/dist/index.html')
def _serve_js(self, unused_query_params):
"""Serves the JavaScript for the index page."""
self._serve_static_file('/dist/app.js')
def _serve_static_file(self, path):
"""Serves the static file located at the given path.
Args:
path: The path of the static file, relative to the tensorboard/ directory.
"""
# Strip off the leading forward slash.
orig_path = path.lstrip('/')
if not self._path_is_safe(orig_path):
logging.warning('path not safe: %s', orig_path)
self.respond('Naughty naughty!', 'text/plain', 400)
return
# Resource loader wants a path relative to //WORKSPACE/tensorflow.
path = os.path.join('tensorboard', orig_path)
# Open the file and read it.
try:
contents = resource_loader.load_resource(path)
except IOError:
# For compatibility with latest version of Bazel, we renamed bower
# packages to use '_' rather than '-' in their package name.
# This means that the directory structure is changed too.
# So that all our recursive imports work, we need to modify incoming
# requests to map onto the new directory structure.
path = orig_path
components = path.split('/')
components[0] = components[0].replace('-', '_')
path = ('/').join(components)
# Bazel keeps all the external dependencies in //WORKSPACE/external.
# and resource loader wants a path relative to //WORKSPACE/tensorflow/.
path = os.path.join('../external', path)
try:
contents = resource_loader.load_resource(path)
except IOError:
logging.info('path %s not found, sending 404', path)
self.respond('Not found', 'text/plain', 404)
return
mimetype, content_encoding = mimetypes.guess_type(path)
mimetype = mimetype or 'application/octet-stream'
self.respond(contents, mimetype, expires=3600,
content_encoding=content_encoding)
def do_GET(self): # pylint: disable=invalid-name
"""Handler for all get requests."""
parsed_url = urlparse.urlparse(self.path)
# Remove a trailing slash, if present.
clean_path = parsed_url.path
if clean_path.endswith('/'):
clean_path = clean_path[:-1]
query_params = urlparse.parse_qs(parsed_url.query)
# parse_qs returns a list of values for each key; we're only interested in
# the first.
for key in query_params:
value_count = len(query_params[key])
if value_count != 1:
self.respond(
'query parameter %s should have exactly one value, had %d' %
(key, value_count), 'text/plain', 400)
return
query_params[key] = query_params[key][0]
if clean_path in self.data_handlers:
self.data_handlers[clean_path](query_params)
elif clean_path in TAB_ROUTES:
self._serve_index(query_params)
else:
self._serve_static_file(clean_path)
# @Override
def log_message(self, *args):
"""Logs message."""
# By default, BaseHTTPRequestHandler logs to stderr.
logging.info(*args)
# @Override
def log_request(self, *args):
"""Does nothing."""
# This is called by BaseHTTPRequestHandler.send_response() which causes it
# to log every request. We've configured http.Respond() to only log
# requests with >=400 status code.
pass
def _uniform_sample(values, count):
"""Samples `count` values uniformly from `values`.
Args:
values: The values to sample from.
count: The number of values to sample. Must be at least 2.
Raises:
ValueError: If `count` is not at least 2.
TypeError: If `type(count) != int`.
Returns:
A list of values from `values`. The first and the last element will always
be included. If `count > len(values)`, then all values will be returned.
"""
if count < 2:
raise ValueError('Must sample at least 2 elements, %d requested' % count)
if count >= len(values):
# Copy the list in case the caller mutates it.
return list(values)
return [
# We divide by count - 1 to make sure we always get the first and the last
# element.
values[(len(values) - 1) * i // (count - 1)] for i in xrange(count)
]
|
|
# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import xml.sax
import base64
import time
from boto.compat import six, urllib
from boto.auth import detect_potential_s3sigv4
import boto.utils
from boto.connection import AWSAuthConnection
from boto import handler
from boto.s3.bucket import Bucket
from boto.s3.key import Key
from boto.resultset import ResultSet
from boto.exception import BotoClientError, S3ResponseError
from boto.utils import get_utf8able_str
def check_lowercase_bucketname(n):
"""
Bucket names must not contain uppercase characters. We check for
this by appending a lowercase character and testing with islower().
Note this also covers cases like numeric bucket names with dashes.
>>> check_lowercase_bucketname("Aaaa")
Traceback (most recent call last):
...
BotoClientError: S3Error: Bucket names cannot contain upper-case
characters when using either the sub-domain or virtual hosting calling
format.
>>> check_lowercase_bucketname("1234-5678-9123")
True
>>> check_lowercase_bucketname("abcdefg1234")
True
"""
if not (n + 'a').islower():
raise BotoClientError("Bucket names cannot contain upper-case " \
"characters when using either the sub-domain or virtual " \
"hosting calling format.")
return True
def assert_case_insensitive(f):
def wrapper(*args, **kwargs):
if len(args) == 3 and check_lowercase_bucketname(args[2]):
pass
return f(*args, **kwargs)
return wrapper
class _CallingFormat(object):
def get_bucket_server(self, server, bucket):
return ''
def build_url_base(self, connection, protocol, server, bucket, key=''):
url_base = '%s://' % six.ensure_text(protocol)
url_base += self.build_host(server, bucket)
url_base += connection.get_path(self.build_path_base(bucket, key))
return url_base
def build_host(self, server, bucket):
if bucket == '':
return server
else:
return self.get_bucket_server(server, bucket)
def build_auth_path(self, bucket, key=u''):
bucket = six.ensure_text(bucket, encoding='utf-8')
key = get_utf8able_str(key)
path = u''
if bucket != u'':
path = u'/' + bucket
return path + '/%s' % urllib.parse.quote(key)
def build_path_base(self, bucket, key=''):
key = get_utf8able_str(key)
return '/%s' % urllib.parse.quote(key)
class SubdomainCallingFormat(_CallingFormat):
@assert_case_insensitive
def get_bucket_server(self, server, bucket):
return '%s.%s' % (bucket, server)
class VHostCallingFormat(_CallingFormat):
@assert_case_insensitive
def get_bucket_server(self, server, bucket):
return bucket
class OrdinaryCallingFormat(_CallingFormat):
def get_bucket_server(self, server, bucket):
return server
def build_path_base(self, bucket, key=''):
key = get_utf8able_str(key)
path_base = '/'
if bucket:
path_base += "%s/" % bucket
return path_base + urllib.parse.quote(key)
class ProtocolIndependentOrdinaryCallingFormat(OrdinaryCallingFormat):
def build_url_base(self, connection, protocol, server, bucket, key=''):
url_base = '//'
url_base += self.build_host(server, bucket)
url_base += connection.get_path(self.build_path_base(bucket, key))
return url_base
class Location(object):
DEFAULT = '' # US Classic Region
EU = 'EU' # Ireland
EUCentral1 = 'eu-central-1' # Frankfurt
USWest = 'us-west-1'
USWest2 = 'us-west-2'
SAEast = 'sa-east-1'
APNortheast = 'ap-northeast-1'
APSoutheast = 'ap-southeast-1'
APSoutheast2 = 'ap-southeast-2'
CNNorth1 = 'cn-north-1'
class NoHostProvided(object):
# An identifying object to help determine whether the user provided a
# ``host`` or not. Never instantiated.
pass
class HostRequiredError(BotoClientError):
pass
class S3Connection(AWSAuthConnection):
DefaultHost = 's3.amazonaws.com'
DefaultCallingFormat = boto.config.get('s3', 'calling_format', 'boto.s3.connection.SubdomainCallingFormat')
QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None,
host=NoHostProvided, debug=0, https_connection_factory=None,
calling_format=DefaultCallingFormat, path='/',
provider='aws', bucket_class=Bucket, security_token=None,
suppress_consec_slashes=True, anon=None,
validate_certs=None, profile_name=None):
self.bucket_class = bucket_class
if isinstance(calling_format, six.string_types):
calling_format=boto.utils.find_class(calling_format)()
self.calling_format = calling_format
# Fetching config options at init time, instead of using a class-level
# default (set at class declaration time) as the default arg value,
# allows our tests to ensure that the config file options are
# respected.
if anon is None:
# Only fetch from the config option if a non-default arg value was
# provided.
anon = boto.config.getbool('s3', 'no_sign_request', False)
self.anon = anon
no_host_provided = False
if host is NoHostProvided:
host = boto.config.get('s3', 'host')
if host is None:
host = self.DefaultHost
no_host_provided = True
super(S3Connection, self).__init__(host,
aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
debug=debug, https_connection_factory=https_connection_factory,
path=path, provider=provider, security_token=security_token,
suppress_consec_slashes=suppress_consec_slashes,
validate_certs=validate_certs, profile_name=profile_name)
# We need to delay until after the call to ``super`` before checking
# to see if SigV4 is in use.
if no_host_provided:
if 'hmac-v4-s3' in self._required_auth_capability():
raise HostRequiredError(
"When using SigV4, you must specify a 'host' parameter."
)
@detect_potential_s3sigv4
def _required_auth_capability(self):
if self.anon:
return ['anon']
else:
return ['s3']
def __iter__(self):
for bucket in self.get_all_buckets():
yield bucket
def __contains__(self, bucket_name):
return not (self.lookup(bucket_name) is None)
def set_bucket_class(self, bucket_class):
"""
Set the Bucket class associated with this bucket. By default, this
would be the boto.s3.key.Bucket class but if you want to subclass that
for some reason this allows you to associate your new class.
:type bucket_class: class
:param bucket_class: A subclass of Bucket that can be more specific
"""
self.bucket_class = bucket_class
def build_post_policy(self, expiration_time, conditions):
"""
Taken from the AWS book Python examples and modified for use with boto
"""
assert isinstance(expiration_time, time.struct_time), \
'Policy document must include a valid expiration Time object'
# Convert conditions object mappings to condition statements
return '{"expiration": "%s",\n"conditions": [%s]}' % \
(time.strftime(boto.utils.ISO8601, expiration_time), ",".join(conditions))
def build_post_form_args(self, bucket_name, key, expires_in=6000,
acl=None, success_action_redirect=None,
max_content_length=None,
http_method='http', fields=None,
conditions=None, storage_class='STANDARD',
server_side_encryption=None):
"""
Taken from the AWS book Python examples and modified for use with boto
This only returns the arguments required for the post form, not the
actual form. This does not return the file input field which also
needs to be added
:type bucket_name: string
:param bucket_name: Bucket to submit to
:type key: string
:param key: Key name, optionally add ${filename} to the end to
attach the submitted filename
:type expires_in: integer
:param expires_in: Time (in seconds) before this expires, defaults
to 6000
:type acl: string
:param acl: A canned ACL. One of:
* private
* public-read
* public-read-write
* authenticated-read
* bucket-owner-read
* bucket-owner-full-control
:type success_action_redirect: string
:param success_action_redirect: URL to redirect to on success
:type max_content_length: integer
:param max_content_length: Maximum size for this file
:type http_method: string
:param http_method: HTTP Method to use, "http" or "https"
:type storage_class: string
:param storage_class: Storage class to use for storing the object.
Valid values: STANDARD | REDUCED_REDUNDANCY
:type server_side_encryption: string
:param server_side_encryption: Specifies server-side encryption
algorithm to use when Amazon S3 creates an object.
Valid values: None | AES256
:rtype: dict
:return: A dictionary containing field names/values as well as
a url to POST to
.. code-block:: python
"""
if fields is None:
fields = []
if conditions is None:
conditions = []
expiration = time.gmtime(int(time.time() + expires_in))
# Generate policy document
conditions.append('{"bucket": "%s"}' % bucket_name)
if key.endswith("${filename}"):
conditions.append('["starts-with", "$key", "%s"]' % key[:-len("${filename}")])
else:
conditions.append('{"key": "%s"}' % key)
if acl:
conditions.append('{"acl": "%s"}' % acl)
fields.append({"name": "acl", "value": acl})
if success_action_redirect:
conditions.append('{"success_action_redirect": "%s"}' % success_action_redirect)
fields.append({"name": "success_action_redirect", "value": success_action_redirect})
if max_content_length:
conditions.append('["content-length-range", 0, %i]' % max_content_length)
if self.provider.security_token:
fields.append({'name': 'x-amz-security-token',
'value': self.provider.security_token})
conditions.append('{"x-amz-security-token": "%s"}' % self.provider.security_token)
if storage_class:
fields.append({'name': 'x-amz-storage-class',
'value': storage_class})
conditions.append('{"x-amz-storage-class": "%s"}' % storage_class)
if server_side_encryption:
fields.append({'name': 'x-amz-server-side-encryption',
'value': server_side_encryption})
conditions.append('{"x-amz-server-side-encryption": "%s"}' % server_side_encryption)
policy = self.build_post_policy(expiration, conditions)
# Add the base64-encoded policy document as the 'policy' field
policy_b64 = base64.b64encode(policy)
fields.append({"name": "policy", "value": policy_b64})
# Add the AWS access key as the 'AWSAccessKeyId' field
fields.append({"name": "AWSAccessKeyId",
"value": self.aws_access_key_id})
# Add signature for encoded policy document as the
# 'signature' field
signature = self._auth_handler.sign_string(policy_b64)
fields.append({"name": "signature", "value": signature})
fields.append({"name": "key", "value": key})
# HTTPS protocol will be used if the secure HTTP option is enabled.
url = '%s://%s/' % (http_method,
self.calling_format.build_host(self.server_name(),
bucket_name))
return {"action": url, "fields": fields}
def generate_url_sigv4(self, expires_in, method, bucket='', key='',
headers=None, force_http=False,
response_headers=None, version_id=None,
iso_date=None):
path = self.calling_format.build_path_base(bucket, key)
auth_path = self.calling_format.build_auth_path(bucket, key)
host = self.calling_format.build_host(self.server_name(), bucket)
# For presigned URLs we should ignore the port if it's HTTPS
if host.endswith(':443'):
host = host[:-4]
params = {}
if version_id is not None:
params['VersionId'] = version_id
if response_headers is not None:
params.update(response_headers)
http_request = self.build_base_http_request(method, path, auth_path,
headers=headers, host=host,
params=params)
return self._auth_handler.presign(http_request, expires_in,
iso_date=iso_date)
def generate_url(self, expires_in, method, bucket='', key='', headers=None,
query_auth=True, force_http=False, response_headers=None,
expires_in_absolute=False, version_id=None):
if self._auth_handler.capability[0] == 'hmac-v4-s3' and query_auth:
# Handle the special sigv4 case
return self.generate_url_sigv4(expires_in, method, bucket=bucket,
key=key, headers=headers, force_http=force_http,
response_headers=response_headers, version_id=version_id)
headers = headers or {}
if expires_in_absolute:
expires = int(expires_in)
else:
expires = int(time.time() + expires_in)
auth_path = self.calling_format.build_auth_path(bucket, key)
auth_path = self.get_path(auth_path)
# optional version_id and response_headers need to be added to
# the query param list.
extra_qp = []
if version_id is not None:
extra_qp.append("versionId=%s" % version_id)
if response_headers:
for k, v in response_headers.items():
extra_qp.append("%s=%s" % (k, urllib.parse.quote(v)))
if self.provider.security_token:
headers['x-amz-security-token'] = self.provider.security_token
if extra_qp:
delimiter = '?' if '?' not in auth_path else '&'
auth_path += delimiter + '&'.join(extra_qp)
self.calling_format.build_path_base(bucket, key)
if query_auth and not self.anon:
c_string = boto.utils.canonical_string(method, auth_path, headers,
expires, self.provider)
b64_hmac = self._auth_handler.sign_string(c_string)
encoded_canonical = urllib.parse.quote(b64_hmac, safe='')
query_part = '?' + self.QueryString % (encoded_canonical, expires,
self.aws_access_key_id)
else:
query_part = ''
if headers:
hdr_prefix = self.provider.header_prefix
for k, v in headers.items():
if k.startswith(hdr_prefix):
# headers used for sig generation must be
# included in the url also.
extra_qp.append("%s=%s" % (k, urllib.parse.quote(v)))
if extra_qp:
delimiter = '?' if not query_part else '&'
query_part += delimiter + '&'.join(extra_qp)
if force_http:
protocol = 'http'
port = 80
else:
protocol = self.protocol
port = self.port
return self.calling_format.build_url_base(self, protocol,
self.server_name(port),
bucket, key) + query_part
def get_all_buckets(self, headers=None):
response = self.make_request('GET', headers=headers)
body = response.read()
if response.status > 300:
raise self.provider.storage_response_error(
response.status, response.reason, body)
rs = ResultSet([('Bucket', self.bucket_class)])
h = handler.XmlHandler(rs, self)
if not isinstance(body, bytes):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
return rs
def get_canonical_user_id(self, headers=None):
"""
Convenience method that returns the "CanonicalUserID" of the
user who's credentials are associated with the connection.
The only way to get this value is to do a GET request on the
service which returns all buckets associated with the account.
As part of that response, the canonical userid is returned.
This method simply does all of that and then returns just the
user id.
:rtype: string
:return: A string containing the canonical user id.
"""
rs = self.get_all_buckets(headers=headers)
return rs.owner.id
def get_bucket(self, bucket_name, validate=True, headers=None):
"""
Retrieves a bucket by name.
If the bucket does not exist, an ``S3ResponseError`` will be raised. If
you are unsure if the bucket exists or not, you can use the
``S3Connection.lookup`` method, which will either return a valid bucket
or ``None``.
If ``validate=False`` is passed, no request is made to the service (no
charge/communication delay). This is only safe to do if you are **sure**
the bucket exists.
If the default ``validate=True`` is passed, a request is made to the
service to ensure the bucket exists. Prior to Boto v2.25.0, this fetched
a list of keys (but with a max limit set to ``0``, always returning an empty
list) in the bucket (& included better error messages), at an
increased expense. As of Boto v2.25.0, this now performs a HEAD request
(less expensive but worse error messages).
If you were relying on parsing the error message before, you should call
something like::
bucket = conn.get_bucket('<bucket_name>', validate=False)
bucket.get_all_keys(maxkeys=0)
:type bucket_name: string
:param bucket_name: The name of the bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to
AWS.
:type validate: boolean
:param validate: If ``True``, it will try to verify the bucket exists
on the service-side. (Default: ``True``)
"""
if validate:
return self.head_bucket(bucket_name, headers=headers)
else:
return self.bucket_class(self, bucket_name)
def head_bucket(self, bucket_name, headers=None):
"""
Determines if a bucket exists by name.
If the bucket does not exist, an ``S3ResponseError`` will be raised.
:type bucket_name: string
:param bucket_name: The name of the bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to
AWS.
:returns: A <Bucket> object
"""
response = self.make_request('HEAD', bucket_name, headers=headers)
body = response.read()
if response.status == 200:
return self.bucket_class(self, bucket_name)
elif response.status == 403:
# For backward-compatibility, we'll populate part of the exception
# with the most-common default.
err = self.provider.storage_response_error(
response.status,
response.reason,
body
)
err.error_code = 'AccessDenied'
err.error_message = 'Access Denied'
raise err
elif response.status == 404:
# For backward-compatibility, we'll populate part of the exception
# with the most-common default.
err = self.provider.storage_response_error(
response.status,
response.reason,
body
)
err.error_code = 'NoSuchBucket'
err.error_message = 'The specified bucket does not exist'
raise err
else:
raise self.provider.storage_response_error(
response.status, response.reason, body)
def lookup(self, bucket_name, validate=True, headers=None):
"""
Attempts to get a bucket from S3.
Works identically to ``S3Connection.get_bucket``, save for that it
will return ``None`` if the bucket does not exist instead of throwing
an exception.
:type bucket_name: string
:param bucket_name: The name of the bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to
AWS.
:type validate: boolean
:param validate: If ``True``, it will try to fetch all keys within the
given bucket. (Default: ``True``)
"""
try:
bucket = self.get_bucket(bucket_name, validate, headers=headers)
except:
bucket = None
return bucket
def create_bucket(self, bucket_name, headers=None,
location=Location.DEFAULT, policy=None):
"""
Creates a new located bucket. By default it's in the USA. You can pass
Location.EU to create a European bucket (S3) or European Union bucket
(GCS).
:type bucket_name: string
:param bucket_name: The name of the new bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to AWS.
:type location: str
:param location: The location of the new bucket. You can use one of the
constants in :class:`boto.s3.connection.Location` (e.g. Location.EU,
Location.USWest, etc.).
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
"""
check_lowercase_bucketname(bucket_name)
if policy:
if headers:
headers[self.provider.acl_header] = policy
else:
headers = {self.provider.acl_header: policy}
if location == Location.DEFAULT:
data = ''
else:
data = '<CreateBucketConfiguration><LocationConstraint>' + \
location + '</LocationConstraint></CreateBucketConfiguration>'
response = self.make_request('PUT', bucket_name, headers=headers,
data=data)
body = response.read()
if response.status == 409:
raise self.provider.storage_create_error(
response.status, response.reason, body)
if response.status == 200:
return self.bucket_class(self, bucket_name)
else:
raise self.provider.storage_response_error(
response.status, response.reason, body)
def delete_bucket(self, bucket, headers=None):
"""
Removes an S3 bucket.
In order to remove the bucket, it must first be empty. If the bucket is
not empty, an ``S3ResponseError`` will be raised.
:type bucket_name: string
:param bucket_name: The name of the bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to
AWS.
"""
response = self.make_request('DELETE', bucket, headers=headers)
body = response.read()
if response.status != 204:
raise self.provider.storage_response_error(
response.status, response.reason, body)
def make_request(self, method, bucket='', key='', headers=None, data='',
query_args=None, sender=None, override_num_retries=None,
retry_handler=None):
if isinstance(bucket, self.bucket_class):
bucket = bucket.name
if isinstance(key, Key):
key = key.name
path = self.calling_format.build_path_base(bucket, key)
boto.log.debug('path=%s' % path)
auth_path = self.calling_format.build_auth_path(bucket, key)
boto.log.debug('auth_path=%s' % auth_path)
host = self.calling_format.build_host(self.server_name(), bucket)
if query_args:
path += '?' + query_args
boto.log.debug('path=%s' % path)
auth_path += '?' + query_args
boto.log.debug('auth_path=%s' % auth_path)
return super(S3Connection, self).make_request(
method, path, headers,
data, host, auth_path, sender,
override_num_retries=override_num_retries,
retry_handler=retry_handler
)
|
|
# Copyright 2014, 2015 SAP SE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
###
from pyhdb.protocol.message import RequestMessage
from pyhdb.protocol.segments import RequestSegment
from pyhdb.protocol.types import escape_values, by_type_code
from pyhdb.protocol.parts import Command, FetchSize, ResultSetId, StatementId, Parameters, WriteLobRequest
from pyhdb.protocol.constants import message_types, function_codes, part_kinds
from pyhdb.exceptions import ProgrammingError, InterfaceError, DatabaseError
from pyhdb.compat import izip
FORMAT_OPERATION_ERRORS = [
'not enough arguments for format string',
'not all arguments converted during string formatting'
]
def format_operation(operation, parameters=None):
if parameters is not None:
e_values = escape_values(parameters)
try:
operation = operation % e_values
except TypeError as msg:
if str(msg) in FORMAT_OPERATION_ERRORS:
# Python DBAPI expects a ProgrammingError in this case
raise ProgrammingError(str(msg))
else:
# some other error message appeared, so just reraise exception:
raise
return operation
class PreparedStatement(object):
"""Reference object to a prepared statement including parameter (meta) data"""
ParamTuple = collections.namedtuple('Parameter', 'id type_code length value')
def __init__(self, connection, statement_id, params_metadata, result_metadata_part):
"""Initialize PreparedStatement part object
:param connection: connection object
:param statement_id: 8-byte statement identifier
:param params_metadata: A tuple of named-tuple instances containing parameter meta data:
Example: (ParameterMetadata(options=2, datatype=26, mode=1, id=0, length=24, fraction=0),)
:param result_metadata_part: can be None
"""
self._connection = connection
self.statement_id = statement_id
self._params_metadata = params_metadata
self.result_metadata_part = result_metadata_part
self._multi_row_parameters = None
self._num_rows = None
self._iter_row_count = None
def prepare_parameters(self, multi_row_parameters):
""" Attribute sql parameters with meta data for a prepared statement.
Make some basic checks that at least the number of parameters is correct.
:param multi_row_parameters: A list/tuple containing list/tuples of parameters (for multiple rows)
:returns: A generator producing parameters attributed with meta data for one sql statement (a row) at a time
"""
self._multi_row_parameters = multi_row_parameters
self._num_rows = len(multi_row_parameters)
self._iter_row_count = 0
return self
def __repr__(self):
return '<PreparedStatement id=%r>' % self.statement_id
def __iter__(self):
return self
def __bool__(self):
return self._iter_row_count < self._num_rows
# Python 2.7 compat
__nonzero__ = __bool__
def __next__(self):
if self._iter_row_count == self._num_rows:
raise StopIteration()
parameters = self._multi_row_parameters[self._iter_row_count]
if not isinstance(parameters, (list, tuple, dict)):
raise ProgrammingError("Prepared statement parameters supplied as %s, shall be list, tuple or dict." %
type(parameters).__name__)
if len(parameters) != len(self._params_metadata):
raise ProgrammingError("Prepared statement parameters expected %d supplied %d." %
(len(self._params_metadata), len(parameters)))
row_params = [self.ParamTuple(p.id, p.datatype, p.length, parameters[i]) for i, p in enumerate(self._params_metadata)]
self._iter_row_count += 1
return row_params
# Python 2.7 compat
next = __next__
def back(self):
assert self._iter_row_count > 0, 'already stepped back to beginning of iterator data'
self._iter_row_count -= 1
class Cursor(object):
"""Database cursor class"""
def __init__(self, connection):
self.connection = connection
self._buffer = iter([])
self._received_last_resultset_part = False
self._executed = None
self.rowcount = -1
self._column_types = None
self.description = None
self.rownumber = None
self.arraysize = 1
self._prepared_statements = {}
@property
def prepared_statement_ids(self):
return self._prepared_statements.keys()
def get_prepared_statement(self, statement_id):
return self._prepared_statements[statement_id]
def prepare(self, statement):
"""Prepare SQL statement in HANA and cache it
:param statement; a valid SQL statement
:returns: statement_id (of prepared and cached statement)
"""
self._check_closed()
self._column_types = None
statement_id = params_metadata = result_metadata_part = None
request = RequestMessage.new(
self.connection,
RequestSegment(
message_types.PREPARE,
Command(statement)
)
)
response = self.connection.send_request(request)
for part in response.segments[0].parts:
if part.kind == part_kinds.STATEMENTID:
statement_id = part.statement_id
elif part.kind == part_kinds.PARAMETERMETADATA:
params_metadata = part.values
elif part.kind == part_kinds.RESULTSETMETADATA:
result_metadata_part = part
# Check that both variables have been set in previous loop, we need them:
assert statement_id is not None
assert params_metadata is not None
# cache statement:
self._prepared_statements[statement_id] = PreparedStatement(self.connection, statement_id,
params_metadata, result_metadata_part)
return statement_id
def execute_prepared(self, prepared_statement, multi_row_parameters):
"""
:param prepared_statement: A PreparedStatement instance
:param multi_row_parameters: A list/tuple containing list/tuples of parameters (for multiple rows)
"""
self._check_closed()
# Convert parameters into a generator producing lists with parameters as named tuples (incl. some meta data):
parameters = prepared_statement.prepare_parameters(multi_row_parameters)
while parameters:
request = RequestMessage.new(
self.connection,
RequestSegment(
message_types.EXECUTE,
(StatementId(prepared_statement.statement_id),
Parameters(parameters))
)
)
reply = self.connection.send_request(request)
parts = reply.segments[0].parts
function_code = reply.segments[0].function_code
if function_code == function_codes.SELECT:
self._handle_select(parts, prepared_statement.result_metadata_part)
elif function_code in function_codes.DML:
self._handle_upsert(parts, request.segments[0].parts[1].unwritten_lobs)
elif function_code == function_codes.DDL:
# No additional handling is required
pass
elif function_code in (function_codes.DBPROCEDURECALL, function_codes.DBPROCEDURECALLWITHRESULT):
self._handle_dbproc_call(parts, prepared_statement._params_metadata) # resultset metadata set in prepare
else:
raise InterfaceError("Invalid or unsupported function code received: %d" % function_code)
def _execute_direct(self, operation):
"""Execute statements which are not going through 'prepare_statement' (aka 'direct execution').
Either their have no parameters, or Python's string expansion has been applied to the SQL statement.
:param operation:
"""
request = RequestMessage.new(
self.connection,
RequestSegment(
message_types.EXECUTEDIRECT,
Command(operation)
)
)
reply = self.connection.send_request(request)
parts = reply.segments[0].parts
function_code = reply.segments[0].function_code
if function_code == function_codes.SELECT:
self._handle_select(parts)
elif function_code in function_codes.DML:
self._handle_upsert(parts)
elif function_code == function_codes.DDL:
# No additional handling is required
pass
elif function_code in (function_codes.DBPROCEDURECALL, function_codes.DBPROCEDURECALLWITHRESULT):
self._handle_dbproc_call(parts, None)
else:
raise InterfaceError("Invalid or unsupported function code received: %d" % function_code)
def execute(self, statement, parameters=None):
"""Execute statement on database
:param statement: a valid SQL statement
:param parameters: a list/tuple of parameters
:returns: this cursor
In order to be compatible with Python's DBAPI five parameter styles
must be supported.
paramstyle Meaning
---------------------------------------------------------
1) qmark Question mark style, e.g. ...WHERE name=?
2) numeric Numeric, positional style, e.g. ...WHERE name=:1
3) named Named style, e.g. ...WHERE name=:name
4) format ANSI C printf format codes, e.g. ...WHERE name=%s
5) pyformat Python extended format codes, e.g. ...WHERE name=%(name)s
Hana's 'prepare statement' feature supports 1) and 2), while 4 and 5
are handle by Python's own string expansion mechanism.
Note that case 3 is not yet supported by this method!
"""
self._check_closed()
if not parameters:
# Directly execute the statement, nothing else to prepare:
self._execute_direct(statement)
else:
self.executemany(statement, parameters=[parameters])
return self
def executemany(self, statement, parameters):
"""Execute statement on database with multiple rows to be inserted/updated
:param statement: a valid SQL statement
:param parameters: a nested list/tuple of parameters for multiple rows
:returns: this cursor
"""
# First try safer hana-style parameter expansion:
try:
statement_id = self.prepare(statement)
except DatabaseError as msg:
# Hana expansion failed, check message to be sure of reason:
if 'incorrect syntax near "%"' not in str(msg):
# Probably some other error than related to string expansion -> raise an error
raise
# Statement contained percentage char, so perform Python style parameter expansion:
for row_params in parameters:
operation = format_operation(statement, row_params)
self._execute_direct(operation)
else:
# Continue with Hana style statement execution:
prepared_statement = self.get_prepared_statement(statement_id)
self.execute_prepared(prepared_statement, parameters)
# Return cursor object:
return self
def _handle_upsert(self, parts, unwritten_lobs=()):
"""Handle reply messages from INSERT or UPDATE statements"""
self.description = None
self._received_last_resultset_part = True # set to 'True' so that cursor.fetch*() returns just empty list
for part in parts:
if part.kind == part_kinds.ROWSAFFECTED:
self.rowcount = part.values[0]
elif part.kind in (part_kinds.TRANSACTIONFLAGS, part_kinds.STATEMENTCONTEXT, part_kinds.PARAMETERMETADATA):
pass
elif part.kind == part_kinds.WRITELOBREPLY:
# This part occurrs after lobs have been submitted not at all or only partially during an insert.
# In this case the parameter part of the Request message contains a list called 'unwritten_lobs'
# with LobBuffer instances.
# Those instances are in the same order as 'locator_ids' received in the reply message. These IDs
# are then used to deliver the missing LOB data to the server via WRITE_LOB_REQUESTs.
for lob_buffer, lob_locator_id in izip(unwritten_lobs, part.locator_ids):
# store locator_id in every lob buffer instance for later reference:
lob_buffer.locator_id = lob_locator_id
self._perform_lob_write_requests(unwritten_lobs)
else:
raise InterfaceError("Prepared insert statement response, unexpected part kind %d." % part.kind)
self._executed = True
def _perform_lob_write_requests(self, unwritten_lobs):
"""After sending incomplete LOB data during an INSERT or UPDATE this method will be called.
It sends missing LOB data possibly in multiple LOBWRITE requests for all LOBs.
:param unwritten_lobs: A deque list of LobBuffer instances containing LOB data.
Those buffers have been assembled in the parts.Parameter.pack_lob_data() method.
"""
while unwritten_lobs:
request = RequestMessage.new(
self.connection,
RequestSegment(
message_types.WRITELOB,
WriteLobRequest(unwritten_lobs)
)
)
self.connection.send_request(request)
def _handle_select(self, parts, result_metadata=None):
"""Handle reply messages from SELECT statements"""
self.rowcount = -1
if result_metadata is not None:
# Select was prepared and we can use the already received metadata
self.description, self._column_types = self._handle_result_metadata(result_metadata)
for part in parts:
if part.kind == part_kinds.RESULTSETID:
self._resultset_id = part.value
elif part.kind == part_kinds.RESULTSETMETADATA:
self.description, self._column_types = self._handle_result_metadata(part)
elif part.kind == part_kinds.RESULTSET:
self._buffer = part.unpack_rows(self._column_types, self.connection)
self._received_last_resultset_part = part.attribute & 1
self._executed = True
elif part.kind in (part_kinds.STATEMENTCONTEXT, part_kinds.TRANSACTIONFLAGS, part_kinds.PARAMETERMETADATA):
pass
else:
raise InterfaceError("Prepared select statement response, unexpected part kind %d." % part.kind)
def _handle_dbproc_call(self, parts, parameters_metadata):
"""Handle reply messages from STORED PROCEDURE statements"""
for part in parts:
if part.kind == part_kinds.ROWSAFFECTED:
self.rowcount = part.values[0]
elif part.kind == part_kinds.TRANSACTIONFLAGS:
pass
elif part.kind == part_kinds.STATEMENTCONTEXT:
pass
elif part.kind == part_kinds.OUTPUTPARAMETERS:
self._buffer = part.unpack_rows(parameters_metadata, self.connection)
self._received_last_resultset_part = True
self._executed = True
elif part.kind == part_kinds.RESULTSETMETADATA:
self.description, self._column_types = self._handle_result_metadata(part)
elif part.kind == part_kinds.RESULTSETID:
self._resultset_id = part.value
elif part.kind == part_kinds.RESULTSET:
self._buffer = part.unpack_rows(self._column_types, self.connection)
self._received_last_resultset_part = part.attribute & 1
self._executed = True
else:
raise InterfaceError("Stored procedure call, unexpected part kind %d." % part.kind)
self._executed = True
def _handle_result_metadata(self, result_metadata):
description = []
column_types = []
for column in result_metadata.columns:
description.append((column[8], column[1], None, column[3], column[2], None, column[0] & 0b10))
if column[1] not in by_type_code:
raise InterfaceError("Unknown column data type: %s" % column[1])
column_types.append(by_type_code[column[1]])
return tuple(description), tuple(column_types)
def fetchmany(self, size=None):
"""Fetch many rows from select result set.
:param size: Number of rows to return.
:returns: list of row records (tuples)
"""
self._check_closed()
if not self._executed:
raise ProgrammingError("Require execute() first")
if size is None:
size = self.arraysize
result = []
cnt = 0
while cnt != size:
try:
result.append(next(self._buffer))
cnt += 1
except StopIteration:
break
if cnt == size or self._received_last_resultset_part:
# No rows are missing or there are no additional rows
return result
request = RequestMessage.new(
self.connection,
RequestSegment(
message_types.FETCHNEXT,
(ResultSetId(self._resultset_id), FetchSize(size - cnt))
)
)
response = self.connection.send_request(request)
resultset_part = response.segments[0].parts[1]
if resultset_part.attribute & 1:
self._received_last_resultset_part = True
result.extend(resultset_part.unpack_rows(self._column_types, self.connection))
return result
def fetchone(self):
"""Fetch one row from select result set.
:returns: a single row tuple
"""
result = self.fetchmany(size=1)
if result:
return result[0]
return None
FETCHALL_BLOCKSIZE = 1024
def fetchall(self):
"""Fetch all available rows from select result set.
:returns: list of row tuples
"""
result = r = self.fetchmany(size=self.FETCHALL_BLOCKSIZE)
while len(r) == self.FETCHALL_BLOCKSIZE or not self._received_last_resultset_part:
r = self.fetchmany(size=self.FETCHALL_BLOCKSIZE)
result.extend(r)
return result
def close(self):
self.connection = None
def _check_closed(self):
if self.connection is None or self.connection.closed:
raise ProgrammingError("Cursor closed")
|
|
#!/usr/bin/env python
"""Tests client actions related to administrating the client."""
import os
import StringIO
import psutil
from grr.client import comms
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import stats
from grr.lib import test_lib
class ConfigActionTest(test_lib.EmptyActionTest):
"""Tests the client actions UpdateConfiguration and GetConfiguration."""
def testUpdateConfiguration(self):
"""Test that we can update the config."""
# A unique name on the filesystem for the writeback.
self.config_file = os.path.join(self.temp_dir, "ConfigActionTest.yaml")
# In a real client, the writeback location should be set to something real,
# but for this test we make it the same as the config file..
config_lib.CONFIG.SetWriteBack(self.config_file)
# Make sure the file is gone
self.assertRaises(IOError, open, self.config_file)
location = ["http://www.example1.com/", "http://www.example2.com/"]
request = rdfvalue.Dict()
request["Client.control_urls"] = location
request["Client.foreman_check_frequency"] = 3600
result = self.RunAction("UpdateConfiguration", request)
self.assertEqual(result, [])
self.assertEqual(config_lib.CONFIG["Client.foreman_check_frequency"], 3600)
# Test the config file got written.
data = open(self.config_file).read()
self.assertTrue("control_urls: {0}".format(",".join(location)) in data)
self.urls = []
# Now test that our location was actually updated.
def FakeUrlOpen(req, timeout=10):
_ = timeout
self.urls.append(req.get_full_url())
return StringIO.StringIO()
comms.urllib2.urlopen = FakeUrlOpen
client_context = comms.GRRHTTPClient(worker=MockClientWorker)
client_context.MakeRequest("", comms.Status())
self.assertTrue(location[0] in self.urls[0])
self.assertTrue(location[1] in self.urls[1])
def testUpdateConfigBlacklist(self):
"""Tests that disallowed fields are not getting updated."""
config_lib.CONFIG.Set("Client.control_urls", ["http://something.com/"])
config_lib.CONFIG.Set("Client.server_serial_number", 1)
location = ["http://www.example.com"]
request = rdfvalue.Dict()
request["Client.control_urls"] = location
request["Client.server_serial_number"] = 10
self.RunAction("UpdateConfiguration", request)
# Location can be set.
self.assertEqual(config_lib.CONFIG["Client.control_urls"], location)
# But the server serial number can not be updated.
self.assertEqual(config_lib.CONFIG["Client.server_serial_number"], 1)
def testGetConfig(self):
"""Check GetConfig client action works."""
# Use UpdateConfig to generate a config.
location = ["http://example.com"]
request = rdfvalue.Dict()
request["Client.control_urls"] = location
request["Client.foreman_check_frequency"] = 3600
self.RunAction("UpdateConfiguration", request)
# Check that our GetConfig actually gets the real data.
self.RunAction("GetConfiguration")
self.assertEqual(config_lib.CONFIG["Client.foreman_check_frequency"], 3600)
self.assertEqual(config_lib.CONFIG["Client.control_urls"], location)
class MockStatsCollector(object):
"""Mock stats collector for GetClientStatsActionTest."""
# First value in every tuple is a timestamp (as if it was returned by
# time.time()).
cpu_samples = [(rdfvalue.RDFDatetime().FromSecondsFromEpoch(100),
0.1, 0.1, 10.0),
(rdfvalue.RDFDatetime().FromSecondsFromEpoch(110),
0.1, 0.2, 15.0),
(rdfvalue.RDFDatetime().FromSecondsFromEpoch(120),
0.1, 0.3, 20.0)]
io_samples = [(rdfvalue.RDFDatetime().FromSecondsFromEpoch(100), 100, 100),
(rdfvalue.RDFDatetime().FromSecondsFromEpoch(110), 200, 200),
(rdfvalue.RDFDatetime().FromSecondsFromEpoch(120), 300, 300)]
class MockClientWorker(object):
"""Mock client worker for GetClientStatsActionTest."""
def __init__(self):
self.stats_collector = MockStatsCollector()
class GetClientStatsActionTest(test_lib.EmptyActionTest):
"""Test GetClientStats client action."""
def setUp(self):
super(GetClientStatsActionTest, self).setUp()
self.old_boot_time = psutil.boot_time
psutil.boot_time = lambda: 100
def tearDown(self):
super(GetClientStatsActionTest, self).tearDown()
psutil.boot_time = self.old_boot_time
def testReturnsAllDataByDefault(self):
"""Checks that stats collection works."""
stats.STATS.RegisterCounterMetric("grr_client_received_bytes")
stats.STATS.IncrementCounter("grr_client_received_bytes", 1566)
stats.STATS.RegisterCounterMetric("grr_client_sent_bytes")
stats.STATS.IncrementCounter("grr_client_sent_bytes", 2000)
results = self.RunAction("GetClientStats", grr_worker=MockClientWorker(),
arg=rdfvalue.GetClientStatsRequest())
response = results[0]
self.assertEqual(response.bytes_received, 1566)
self.assertEqual(response.bytes_sent, 2000)
self.assertEqual(len(response.cpu_samples), 3)
for i in range(3):
self.assertEqual(response.cpu_samples[i].timestamp,
rdfvalue.RDFDatetime().FromSecondsFromEpoch(
100 + i * 10))
self.assertAlmostEqual(response.cpu_samples[i].user_cpu_time, 0.1)
self.assertAlmostEqual(response.cpu_samples[i].system_cpu_time,
0.1 * (i + 1))
self.assertAlmostEqual(response.cpu_samples[i].cpu_percent, 10.0 + 5 * i)
self.assertEqual(len(response.io_samples), 3)
for i in range(3):
self.assertEqual(response.io_samples[i].timestamp,
rdfvalue.RDFDatetime().FromSecondsFromEpoch(
100 + i * 10))
self.assertEqual(response.io_samples[i].read_bytes, 100 * (i + 1))
self.assertEqual(response.io_samples[i].write_bytes, 100 * (i + 1))
self.assertEqual(response.boot_time, long(100 * 1e6))
def testFiltersDataPointsByStartTime(self):
start_time = rdfvalue.RDFDatetime().FromSecondsFromEpoch(117)
results = self.RunAction(
"GetClientStats", grr_worker=MockClientWorker(),
arg=rdfvalue.GetClientStatsRequest(start_time=start_time))
response = results[0]
self.assertEqual(len(response.cpu_samples), 1)
self.assertEqual(response.cpu_samples[0].timestamp,
rdfvalue.RDFDatetime().FromSecondsFromEpoch(120))
self.assertEqual(len(response.io_samples), 1)
self.assertEqual(response.io_samples[0].timestamp,
rdfvalue.RDFDatetime().FromSecondsFromEpoch(120))
def testFiltersDataPointsByEndTime(self):
end_time = rdfvalue.RDFDatetime().FromSecondsFromEpoch(102)
results = self.RunAction(
"GetClientStats", grr_worker=MockClientWorker(),
arg=rdfvalue.GetClientStatsRequest(end_time=end_time))
response = results[0]
self.assertEqual(len(response.cpu_samples), 1)
self.assertEqual(response.cpu_samples[0].timestamp,
rdfvalue.RDFDatetime().FromSecondsFromEpoch(100))
self.assertEqual(len(response.io_samples), 1)
self.assertEqual(response.io_samples[0].timestamp,
rdfvalue.RDFDatetime().FromSecondsFromEpoch(100))
def testFiltersDataPointsByStartAndEndTimes(self):
start_time = rdfvalue.RDFDatetime().FromSecondsFromEpoch(109)
end_time = rdfvalue.RDFDatetime().FromSecondsFromEpoch(113)
results = self.RunAction(
"GetClientStats", grr_worker=MockClientWorker(),
arg=rdfvalue.GetClientStatsRequest(start_time=start_time,
end_time=end_time))
response = results[0]
self.assertEqual(len(response.cpu_samples), 1)
self.assertEqual(response.cpu_samples[0].timestamp,
rdfvalue.RDFDatetime().FromSecondsFromEpoch(110))
self.assertEqual(len(response.io_samples), 1)
self.assertEqual(response.io_samples[0].timestamp,
rdfvalue.RDFDatetime().FromSecondsFromEpoch(110))
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# from functools import reduce
class ConditionalAccumulatorTest(test.TestCase):
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.ConditionalAccumulator(dtypes_lib.float32, name="Q")
self.assertTrue(isinstance(q.accumulator_ref, ops.Tensor))
self.assertProtoEquals(
"""
name:'Q' op:'ConditionalAccumulator'
attr { key: 'dtype' value { type: DT_FLOAT } }
attr { key: 'shape' value { shape { unknown_rank: true} } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
attr { key: 'reduction_type' value {s: 'MEAN'} }
""", q.accumulator_ref.op.node_def)
def testConstructorWithInvalidArg(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError):
data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", reduction_type="Invalid")
def testConstructorWithShape(self):
with ops.Graph().as_default():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32,
name="Q",
shape=tensor_shape.TensorShape([1, 5, 2, 8]))
self.assertTrue(isinstance(q.accumulator_ref, ops.Tensor))
self.assertProtoEquals(
"""
name:'Q' op:'ConditionalAccumulator'
attr { key: 'dtype' value { type: DT_FLOAT } }
attr { key: 'shape' value { shape { dim {size: 1 }
dim {size: 5 }
dim {size: 2 }
dim {size: 8 }
} } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
attr { key: 'reduction_type' value {s: 'MEAN'} }
""", q.accumulator_ref.op.node_def)
@test_util.run_deprecated_v1
def testAccumulatorSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(dtypes_lib.float32, name="Q")
self.assertEqual(q.num_accumulated().eval(), 0)
@test_util.run_deprecated_v1
def testAccumulatorSetGlobalStep(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
set_global_step_op = q.set_global_step(1)
set_global_step_op.run()
@test_util.run_deprecated_v1
def testAccumulatorApplyGradFloat32(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
accum_op = q.apply_grad((10.0,))
accum_op.run()
@test_util.run_deprecated_v1
def testDtypes(self):
with self.cached_session() as sess:
dtypes = [dtypes_lib.float16, dtypes_lib.float32, dtypes_lib.float64]
for i in range(len(dtypes)):
dtype = dtypes[i]
q = data_flow_ops.ConditionalAccumulator(
dtype, shape=tensor_shape.TensorShape([1]))
elems = np.arange(10).astype(dtype.as_numpy_dtype)
for e in elems:
q.apply_grad((e,)).run()
result = self.evaluate(q.take_grad(1))
self.assertEqual(sum(elems) / len(elems), result)
@test_util.run_deprecated_v1
def testAccumulatorMultipleAccumulators(self):
with self.cached_session():
q_f32_0 = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
q_f32_1 = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
q_f16_0 = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float16, name="Q", shape=tensor_shape.TensorShape([1]))
q_f16_1 = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float16, name="Q", shape=tensor_shape.TensorShape([1]))
accums = [q_f16_0, q_f16_1, q_f32_0, q_f32_1]
for i in range(len(accums)):
accums[i].apply_grad((i + 10.0,)).run()
for i in range(len(accums)):
result = accums[i].take_grad(1).eval()
self.assertEqual(result, i + 10.0)
@test_util.run_deprecated_v1
def testAccumulatorApplyAndTakeGradWithShape(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=(3, 2))
elems = [[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[[10.0, 20.0], [30.0, 40.0], [50.0, 60.0]]]
elems_ave = [[(a + b) / len(elems) for a, b in zip(x, y)]
for x, y in zip(elems[0], elems[1])]
accum_ops = [q.apply_grad(x) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
is_all_equal = True
val = self.evaluate(takeg_t)
for i in range(len(val)):
for j in range(len(val[i])):
is_all_equal &= (val[i][j] == elems_ave[i][j])
self.assertTrue(is_all_equal)
@test_util.run_deprecated_v1
def testAccumulatorApplyGradWithWrongShape(self):
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=(3, 2))
with self.assertRaises(ValueError):
q.apply_grad([[1.0, 2.0], [3.0, 4.0]])
with self.assertRaises(ValueError):
q.apply_grad([[1.0], [2.0], [3.0]])
@test_util.run_deprecated_v1
def testAccumulatorDynamicShape(self):
with self.cached_session() as sess:
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=None)
x = array_ops.placeholder(dtypes_lib.float32)
accum_op = q.apply_grad(x)
elems = [[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[[10.0, 20.0], [30.0, 40.0], [50.0, 60.0]]]
elems_ave = [[(a + b) / len(elems) for a, b in zip(c, d)]
for c, d in zip(elems[0], elems[1])]
takeg_t = q.take_grad(1)
for elem in elems:
sess.run(accum_op, feed_dict={x: elem})
is_all_equal = True
val = self.evaluate(takeg_t)
for i in range(len(val)):
for j in range(len(val[i])):
is_all_equal &= (val[i][j] == elems_ave[i][j])
self.assertTrue(is_all_equal)
@test_util.run_v1_only("b/120545219")
def testAccumulatorWrongDynamicShape(self):
with self.cached_session() as sess:
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=None)
x = array_ops.placeholder(dtypes_lib.float32)
accum_op = q.apply_grad(x)
# First successful apply_grad determines shape
sess.run(accum_op, feed_dict={x: [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]})
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(accum_op, feed_dict={x: [[1.0, 2.0], [3.0, 4.0]]})
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(accum_op, feed_dict={x: [[1.0], [2.0], [3.0]]})
@test_util.run_deprecated_v1
def testAccumulatorSizeAfterApplyGrad(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
accum_op = q.apply_grad((10.0,))
self.assertEqual(q.num_accumulated().eval(), 0)
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 1)
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 2)
@test_util.run_deprecated_v1
def testAccumulatorSizeAfterApplyGradAndTakeGrad(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
accum_op = q.apply_grad((10.0,))
extract_t = q.take_grad(2)
# Applying gradient multiple times to increase size from 0 to 2.
self.assertEqual(q.num_accumulated().eval(), 0)
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 1)
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 2)
# Extract will reduce size to 0
extract_t.op.run()
self.assertEqual(q.num_accumulated().eval(), 0)
# Take gradients always sets the size back to 0 if successful.
accum_op = q.apply_grad((10.0,), local_step=1)
accum_op.run()
accum_op.run()
accum_op.run()
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 4)
extract_t.op.run()
self.assertEqual(q.num_accumulated().eval(), 0)
@test_util.run_deprecated_v1
def testAccumulatorTakeGradMean(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0]
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = self.evaluate(takeg_t)
self.assertEqual(15.0, val)
accum_ops = [q.apply_grad((x,), local_step=1) for x in elems]
takeg_t = q.take_grad(constant_op.constant(1))
for accum_op in accum_ops:
accum_op.run()
val = self.evaluate(takeg_t)
self.assertEqual(15.0, val)
@test_util.run_deprecated_v1
def testAccumulatorTakeGradSum(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32,
name="Q",
shape=tensor_shape.TensorShape([1]),
reduction_type="SUM")
elems = [10.0, 20.0]
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = self.evaluate(takeg_t)
self.assertEqual(30.0, val)
accum_ops = [q.apply_grad((x,), local_step=1) for x in elems]
takeg_t = q.take_grad(constant_op.constant(1))
for accum_op in accum_ops:
accum_op.run()
val = self.evaluate(takeg_t)
self.assertEqual(30.0, val)
@test_util.run_deprecated_v1
def testAccumulatorTakeGradInvalidReductionType(self):
with self.assertRaises(ValueError):
data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32,
name="Q",
shape=tensor_shape.TensorShape([1]),
reduction_type="Invalid")
@test_util.run_v1_only("b/120545219")
def testAccumulatorInvalidTakeGrad(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0]
accum_ops = [q.apply_grad((x,)) for x in elems]
takeg_t = q.take_grad(-1)
for accum_op in accum_ops:
accum_op.run()
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(takeg_t)
@test_util.run_deprecated_v1
def testAccumulatorRepeatedTakeGradMean(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0]
elems_ave = sum(elems) / len(elems)
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = self.evaluate(takeg_t)
self.assertEqual(elems_ave, val)
elems = [20.0, 30.0]
elems_ave = sum(elems) / len(elems)
accum_ops = [q.apply_grad((x,), local_step=1) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = self.evaluate(takeg_t)
self.assertEqual(elems_ave + 0.0, val)
@test_util.run_deprecated_v1
def testAccumulatorRepeatedTakeGradSum(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32,
name="Q",
shape=tensor_shape.TensorShape([1]),
reduction_type="SUM")
elems = [10.0, 20.0]
elems_sum = 30.0
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = self.evaluate(takeg_t)
self.assertEqual(elems_sum, val)
elems = [20.0, 30.0]
elems_sum = 50.0
accum_ops = [q.apply_grad((x,), local_step=1) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = self.evaluate(takeg_t)
self.assertEqual(elems_sum, val)
@test_util.run_deprecated_v1
def testAccumulatorIncrementGlobalStep(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
global_step = variables.Variable(0, name="global_step")
new_global_step = math_ops.add(global_step, 1)
inc_global_step = state_ops.assign(global_step, new_global_step)
set_global_step_op = q.set_global_step(new_global_step)
self.evaluate(variables.global_variables_initializer())
for _ in range(3):
set_global_step_op.run()
self.evaluate(inc_global_step)
@test_util.run_deprecated_v1
def testAccumulatorSetGlobalStepPreventsAccumulation(self):
with self.cached_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
local_steps = range(1000, 1005)
accum_ops = [q.apply_grad((0.0 + x,), local_step=x) for x in local_steps]
for ls in local_steps:
set_global_step_op = q.set_global_step(ls)
set_global_step_op.run()
for accum_op in accum_ops:
accum_op.run()
takeg_t = q.take_grad(1)
val = self.evaluate(takeg_t)
self.assertEqual(0.0 + sum(x for x in local_steps
if x >= ls) / sum(1 for x in local_steps
if x >= ls), val)
@test_util.run_v1_only("b/120545219")
def testParallelApplyGrad(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
def apply_grad(accum_op):
self.evaluate(accum_op)
threads = [
self.checkedThread(
target=apply_grad, args=(o,)) for o in accum_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
val = self.evaluate(takeg_t)
self.assertEqual(val, sum(elems) / len(elems))
@test_util.run_v1_only("b/120545219")
def testParallelTakeGrad(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [e for e in range(10)]
accum_ops = [q.apply_grad((np.float32(e),), local_step=e) for e in elems]
takeg_t = q.take_grad(1)
def apply_grad():
for accum_op in accum_ops:
time.sleep(1.0)
self.evaluate(accum_op)
apply_grad_thread = self.checkedThread(target=apply_grad)
results = []
def take_grad():
results.append(self.evaluate(takeg_t))
threads = [self.checkedThread(target=take_grad) for _ in range(10)]
for thread in threads:
thread.start()
apply_grad_thread.start()
for thread in threads:
thread.join()
apply_grad_thread.join()
self.assertItemsEqual(elems, results)
@test_util.run_v1_only("b/120545219")
def testAccumulatorApplyAndBlockingTake(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0, 30.0]
elems_ave = sum(elems) / len(elems)
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(3)
def apply_grad():
time.sleep(1.0)
for accum_op in accum_ops:
self.evaluate(accum_op)
return_array = []
def take_grad():
return_array.append(self.evaluate(takeg_t))
accum_thread = self.checkedThread(target=apply_grad)
takeg_thread = self.checkedThread(target=take_grad)
accum_thread.start()
takeg_thread.start()
accum_thread.join()
takeg_thread.join()
self.assertEqual([elems_ave], return_array)
def _blocking_takeg(self, sess, takeg_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(takeg_op)
@test_util.run_v1_only("b/120545219")
def testAccumulatorCancel(self):
with self.cached_session() as sess:
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
takeg_t = q.take_grad(1)
takeg_thread = self.checkedThread(
self._blocking_takeg, args=(sess, takeg_t))
takeg_thread.start()
time.sleep(1.0)
sess.close() # Will cancel blocked operation
takeg_thread.join()
if __name__ == "__main__":
test.main()
|
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import os
import threading
import time
import numpy as np
from PySide import QtGui, QtCore
from . import utils
from . import input
from . import _ember
import matplotlib
matplotlib.rcParams['backend.qt4'] = 'PySide'
matplotlib.use('Qt4Agg')
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
if sys.version_info.major == 3:
_stringTypes = (str,)
else:
_stringTypes = (str, unicode)
class SolverThread(threading.Thread):
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self)
self.solver = kwargs['solver']
self.conf = kwargs['conf'].evaluate()
self.solver.lock = threading.Lock()
self.solver.progress = 0.0
self.__stop = False
self.daemon = True
self.stage = 0
self.frac0 = 0.15
def run(self):
done = 0
i = 0
while not self.__stop and not done:
i += 1
with self.solver.lock:
done = self.solver.step()
time.sleep(1e-3)
if not i % 5:
self.updateProgress()
if done:
self.solver.progress = 1.0
self.__stop = True
def stop(self):
self.__stop = True
def updateProgress(self):
TC = self.conf.terminationCondition
errNow = self.solver.terminationCondition
if TC.measurement is None:
self.solver.progress = self.solver.timeseriesWriter.t[-1] / TC.tEnd
elif self.stage == 0:
# First part: getting past the minimum steady-state measurement period
self.solver.progress = self.frac0 * self.solver.timeseriesWriter.t[-1] / TC.steadyPeriod
if errNow < 1e9:
self.refCond = errNow
self.stage = 1
else:
# Second part: vaguely linearizing the approach to steady-state
A = (np.log10(TC.tolerance + (errNow-TC.tolerance)/self.refCond) /
np.log10(TC.tolerance))
P = min(self.frac0 + (1-self.frac0) * A ** 0.5 , 1.0)
self.solver.progress = max(P, self.solver.progress) # never go backwards
class OptionWidget(QtGui.QWidget):
def __init__(self, label, opt, *args, **kwargs):
QtGui.QWidget.__init__(self)
self.opt = opt
self.label = label
self.optName = self.label.text()
self.setLayout(QtGui.QHBoxLayout())
self.layout().setContentsMargins(0,0,0,0)
def updateOpt(self):
if self.opt.value != self.opt.default:
self.label.setText('<i>%s</i>' % self.optName)
else:
self.label.setText('%s' % self.optName)
self.parent().parent().updateVisibility()
class StringOptionWidget(OptionWidget):
def __init__(self, label, opt, *args, **kwargs):
OptionWidget.__init__(self, label, opt)
self.text = QtGui.QLineEdit(opt.value)
self.layout().addWidget(self.text)
self.text.textChanged.connect(self.updateOpt)
def updateOpt(self):
self.opt.value = str(self.text.text())
OptionWidget.updateOpt(self)
class NumericOptionWidget(OptionWidget):
def __init__(self, label, opt, *args, **kwargs):
OptionWidget.__init__(self, label, opt)
self.text = QtGui.QLineEdit(str(opt.value))
self.layout().addWidget(self.text)
self.text.textChanged.connect(self.updateOpt)
def updateOpt(self):
try:
self.opt.value = float(self.text.text())
except ValueError:
pass
OptionWidget.updateOpt(self)
class IntegerOptionWidget(NumericOptionWidget):
def updateOpt(self):
try:
self.opt.value = int(self.text.text())
except ValueError:
pass
OptionWidget.updateOpt(self)
class BoolOptionWidget(OptionWidget):
def __init__(self, label, opt, *args, **kwargs):
OptionWidget.__init__(self, label, opt)
self.trueWidget = QtGui.QRadioButton('True')
self.falseWidget = QtGui.QRadioButton('False')
self.noneWidget = QtGui.QRadioButton('None')
if opt.value:
self.trueWidget.toggle()
else:
self.falseWidget.toggle()
self.layout().addWidget(self.trueWidget)
self.layout().addWidget(self.falseWidget)
self.layout().addWidget(self.noneWidget)
self.noneWidget.hide()
self.trueWidget.released.connect(self.updateOpt)
self.falseWidget.released.connect(self.updateOpt)
self.layout().addStretch(1)
self._savedValue = None
def updateOpt(self):
self.opt.value = self.trueWidget.isChecked()
OptionWidget.updateOpt(self)
def setEnabled(self, tf):
if tf and not self.isEnabled() and self._savedValue is not None:
self.trueWidget.setChecked(self._savedValue)
self.falseWidget.setChecked(not self._savedValue)
self._savedValue = None
elif not tf and self.isEnabled() and self._savedValue is None:
self._savedValue = self.trueWidget.isChecked()
self.noneWidget.setChecked(True)
OptionWidget.setEnabled(self, tf)
class EnumOptionWidget(OptionWidget):
def __init__(self, label, opt, *args, **kwargs):
OptionWidget.__init__(self, label, opt)
self.combo = QtGui.QComboBox()
self.items = {}
for i,choice in enumerate(opt.choices):
if choice == opt.value:
startIndex = i
self.combo.addItem(str(choice))
self.items[i] = choice
self.combo.setCurrentIndex(startIndex)
self.layout().addWidget(self.combo)
self.combo.currentIndexChanged.connect(self.updateOpt)
def updateOpt(self):
self.opt.value = self.items[self.combo.currentIndex()]
OptionWidget.updateOpt(self)
class OptionsWidget(QtGui.QGroupBox):
def __init__(self, opts, *args, **kwargs):
QtGui.QGroupBox.__init__(self)
self.opts = opts
self.setLayout(QtGui.QGridLayout())
self.layout().setSpacing(0)
self.setTitle(self.opts.__class__.__name__)
self.optionWidgets = []
width = 0
for i,(name,opt) in enumerate(self.opts):
if opt.label:
label = QtGui.QLabel(opt.label)
label.setToolTip('<tt>%s</tt>' % name)
else:
label = QtGui.QLabel(name)
self.layout().addWidget(label, i, 0)
width = max(label.sizeHint().width(), width)
if opt.choices is not None:
w = EnumOptionWidget(label, opt)
elif isinstance(opt, input.StringOption):
w = StringOptionWidget(label, opt)
elif isinstance(opt, input.IntegerOption):
w = IntegerOptionWidget(label, opt)
elif isinstance(opt, input.FloatOption):
w = NumericOptionWidget(label, opt)
elif isinstance(opt, input.BoolOption):
w = BoolOptionWidget(label, opt)
else:
w = QtGui.QLabel(str(opt.value))
w.opt = opt
self.layout().addWidget(w, i, 1)
self.optionWidgets.append((label,w))
self.layout().setVerticalSpacing(4)
self.layout().setColumnMinimumWidth(0, width + 5)
spacer = QtGui.QSpacerItem(1, 1000, QtGui.QSizePolicy.Minimum,
QtGui.QSizePolicy.Maximum)
self.layout().addItem(spacer, i+1, 0)
def updateVisibility(self, level, conf):
anyVisible = False
anyEnabled = False
for label,w in self.optionWidgets:
if w.opt.level > level:
w.hide()
label.hide()
else:
anyVisible = True
w.show()
label.show()
e = w.opt.shouldBeEnabled(conf)
w.setEnabled(e)
label.setEnabled(e)
if e:
anyEnabled = True
return anyVisible, anyEnabled
class MultiOptionsWidget(QtGui.QWidget):
""" Widget used for presenting solver configuration options """
def __init__(self, conf, *args, **kwargs):
QtGui.QWidget.__init__(self)
self.conf = conf
self.setLayout(QtGui.QHBoxLayout())
self.optionsList = QtGui.QListWidget()
self.optionsList.setSpacing(1)
self.layout().addWidget(self.optionsList)
self.activeOptionWidget = None
self.optionWidgets = []
self.level = 0
height = 0
for item in self.conf:
listitem = QtGui.QListWidgetItem(item.__class__.__name__)
self.optionsList.addItem(listitem)
w = OptionsWidget(item)
self.optionWidgets.append((listitem,w))
height = max(height, w.minimumSizeHint().height())
listitem.widget = w
self.layout().addWidget(w)
w.hide()
self.optionsList.setCurrentRow(0)
self.setActiveWidget(self.optionsList.currentItem())
self.setMinimumHeight(height)
width = self.optionsList.sizeHintForColumn(0) + 10
self.optionsList.setMinimumWidth(width)
self.optionsList.setMaximumWidth(width)
self.optionsList.currentItemChanged.connect(self.setActiveWidget)
self.optionsList.setSizePolicy(QtGui.QSizePolicy.Fixed,
QtGui.QSizePolicy.Preferred)
def setActiveWidget(self, listitem):
if self.activeOptionWidget is not None:
self.activeOptionWidget.hide()
self.activeOptionWidget = listitem.widget
self.activeOptionWidget.show()
def updateVisibility(self, level=None):
if level is not None:
self.level = level
for listitem, w in self.optionWidgets:
visible, enabled = w.updateVisibility(self.level, self.conf)
listitem.setHidden(not visible or not enabled)
class SolverWidget(QtGui.QWidget):
""" Widget used to run and monitor the Ember solver """
def __init__(self, conf, *args, **kwargs):
QtGui.QWidget.__init__(self)
self.conf = conf
self.setLayout(QtGui.QVBoxLayout())
# Buttons
self.startButton = QtGui.QPushButton('Start')
self.stopButton = QtGui.QPushButton('Stop')
self.resetButton = QtGui.QPushButton('Reset')
self.buttons = QtGui.QWidget()
self.buttons.setLayout(QtGui.QHBoxLayout())
self.buttons.layout().addWidget(self.startButton)
self.buttons.layout().addWidget(self.stopButton)
self.buttons.layout().addWidget(self.resetButton)
self.layout().addWidget(self.buttons)
self.startButton.pressed.connect(self.run)
self.stopButton.pressed.connect(self.stop)
self.resetButton.pressed.connect(self.reset)
# Progress Bar
self.progressBar = QtGui.QProgressBar()
self.layout().addWidget(self.progressBar)
self.progressBar.setRange(0, 1000)
self.progressBar.setValue(0)
# Graphs
self.graphContainer = QtGui.QWidget()
self.graphContainer.setLayout(QtGui.QHBoxLayout())
self.layout().addWidget(self.graphContainer)
self.fig = Figure(figsize=(600,400), dpi=72)
self.fig.subplots_adjust(0.09, 0.08, 0.93, 0.96, wspace=0.3)
self.ax1 = self.fig.add_subplot(1,2,1)
self.ax1.set_xlabel('time [ms]')
self.ax1.set_ylabel('Consumption Speed, $S_c$ [cm/s]')
self.Sc_timeseries = self.ax1.plot([0],[0], lw=2)[0]
self.ax2a = self.fig.add_subplot(1,2,2)
self.ax2b = self.ax2a.twinx()
self.ax2a.set_xlabel('flame coordinate [mm]')
self.ax2a.set_ylabel('Temperature [K]')
self.ax2b.set_ylabel('Heat Release Rate [MW/m$^3$]')
self.T_profile = self.ax2a.plot([0],[0], 'b', lw=2)[0]
self.hrr_profile = self.ax2b.plot([0],[0], 'r', lw=2)[0]
self.canvas = FigureCanvas(self.fig)
self.graphContainer.layout().addWidget(self.canvas)
bgcolor = self.palette().color(QtGui.QPalette.Window)
self.fig.set_facecolor((bgcolor.redF(), bgcolor.greenF(), bgcolor.blueF()))
#self.fig.patch.set_alpha(0.1)
# internals
self.solver = None
self.solverThread = None
self.updateTimer = QtCore.QTimer()
self.updateTimer.setInterval(0.5)
self.updateTimer.timeout.connect(self.updateStatus)
self.running = False
self.updateButtons()
def run(self):
if self.solverThread is not None and self.solverThread.is_alive():
return
if self.solver is None:
self.conf.validate()
self.solver = _ember.FlameSolver(self.conf)
self.solver.initialize()
self.solverThread = SolverThread(solver=self.solver,
conf=self.conf)
self.solverThread.start()
self.updateTimer.start()
self.running = True
self.updateButtons()
def stop(self):
if self.solverThread:
self.solverThread.stop()
self.updateTimer.stop()
self.running = False
self.startButton.setText('Resume')
self.updateButtons()
def reset(self):
self.progressBar.setValue(0)
self.T_profile.set_data([0], [0])
self.hrr_profile.set_data([0], [0])
self.Sc_timeseries.set_data([0], [0])
self.canvas.draw()
self.solver = None
self.startButton.setText('Start')
self.updateButtons()
def updateButtons(self):
running = self.running and self.solverThread is not None and self.solverThread.is_alive()
self.startButton.setEnabled(not running)
self.stopButton.setEnabled(running)
self.resetButton.setEnabled(self.solver is not None and not running)
def updateStatus(self):
if not self.solver:
return
if not self.solverThread.is_alive():
self.running = False
self.updateTimer.stop()
self.updateButtons()
if self.solver.progress > 0:
self.progressBar.setValue(1000 * self.solver.progress)
with self.solver.lock:
t = np.array(self.solver.timeseriesWriter.t)
Sc = np.array(self.solver.timeseriesWriter.Sc)
self.T_profile.set_data(self.solver.x * 1000,
self.solver.T)
self.hrr_profile.set_data(self.solver.x * 1000,
self.solver.qDot / 1e6)
self.Sc_timeseries.set_data(1000 * t, Sc * 100)
for ax in (self.ax1, self.ax2a, self.ax2b):
ax.relim()
ax.autoscale_view(False, True, True)
self.canvas.draw()
class MainWindow(QtGui.QMainWindow):
def __init__(self, *args, **kwargs):
QtGui.QMainWindow.__init__(self)
w = QtGui.QWidget()
self.setCentralWidget(w)
self.resize(800,600)
self.setWindowTitle('Simple')
fileMenu = self.menuBar().addMenu('&File')
optMenu = self.menuBar().addMenu('&Options')
self.addToMenu(fileMenu, '&New', lambda: self.new())
self.addToMenu(fileMenu, '&Open...', lambda: self.openConf())
self.addToMenu(fileMenu, '&Save', lambda: self.saveConf(True))
self.addToMenu(fileMenu, 'Save &as...', lambda: self.saveConf(False))
self.addToMenu(fileMenu, '&Quit', self.close)
optLevelGroup = QtGui.QActionGroup(optMenu)
a = self.addToMenu(optMenu, '&Basic',
lambda: self.setLevel(0), optLevelGroup)
self.addToMenu(optMenu, '&Advanced',
lambda: self.setLevel(1), optLevelGroup)
self.addToMenu(optMenu, '&Expert',
lambda: self.setLevel(2), optLevelGroup)
a.setChecked(True)
self.level = 0
self.confFileName = None
if len(args) == 2:
self.new(args[1])
else:
self.new()
def addToMenu(self, menu, name, triggerFunc, group=None):
a = QtGui.QAction(name, self)
a.triggered.connect(triggerFunc)
menu.addAction(a)
if group:
a.setCheckable(True)
group.addAction(a)
return a
def setLevel(self, level):
self.level = level
self.confWidget.updateVisibility(level)
def new(self, conf=None):
if conf is None:
self.conf = input.Config(input.Paths(logFile='gui-runlog.txt'))
elif isinstance(conf, input.Config):
self.conf = conf
elif isinstance(conf, _stringTypes):
if not os.path.exists(conf):
print("Can't find input file '%s'" % conf)
return
localenv = {}
execstatements = ['from numpy import *',
'import numpy as np',
'from ember.input import *']
execstatements.extend(open(conf).readlines())
exec('\n'.join(execstatements), localenv)
self.conf = localenv['conf']
self.confFileName = conf
self.tabWidget = QtGui.QTabWidget()
self.setCentralWidget(self.tabWidget)
self.confWidget = MultiOptionsWidget(self.conf)
self.tabWidget.addTab(self.confWidget, 'Configure')
self.setLevel(self.level)
self.runWidget = SolverWidget(self.conf)
self.tabWidget.addTab(self.runWidget, 'Run')
self.tabWidget.addTab(QtGui.QWidget(), 'Analyze') #TODO: unimplemented
def openConf(self):
fileinfo = QtGui.QFileDialog.getOpenFileName(
self, 'Select Configuration', '.', 'Flame Configurations (*.py *.conf)')
# Dealing with an incompatibility between PySide and PyQt
filename = str(fileinfo[0] if isinstance(fileinfo, tuple) else fileinfo)
if os.path.exists(filename):
self.new(filename)
def saveConf(self, useExisting):
if not useExisting or self.confFileName is None:
fileinfo = QtGui.QFileDialog.getSaveFileName(
self, 'Select Configuration', '.', 'Flame Configurations (*.py *.conf)')
# Dealing with an incompatibility between PySide and PyQt
filename = str(fileinfo[0] if isinstance(fileinfo, tuple) else fileinfo)
if not filename:
return
if not filename.endswith('.py') and not filename.endswith('.conf'):
filename += '.py'
# Confirm before overwriting an existing file
if os.path.exists(filename) and not useExisting:
dlg = QtGui.QMessageBox(self.parent())
dlg.setText("A file named '%s' already exists." % filename)
dlg.setInformativeText("Do you wish to overwrite it?")
dlg.setStandardButtons(dlg.Yes | dlg.No)
dlg.setDefaultButton(dlg.Yes)
ret = dlg.exec_()
if ret == dlg.No:
self.saveConf(False)
return
elif ret != dlg.Yes:
print('unknown return value:', ret)
self.confFileName = filename
else:
filename = self.confFileName
outFile = open(filename, 'w')
outFile.write(self.conf.stringify())
def main():
app = QtGui.QApplication(sys.argv)
app.setStyle("Plastique")
window = MainWindow(*sys.argv)
window.show()
sys.exit(app.exec_())
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import mock
import sys
import unittest
import email.utils
from libcloud.common.google import GoogleAuthType
from libcloud.storage.drivers import google_storage
from libcloud.test.common.test_google import GoogleTestCase
from libcloud.test.file_fixtures import StorageFileFixtures
from libcloud.test.secrets import STORAGE_GOOGLE_STORAGE_PARAMS
from libcloud.test.storage.test_s3 import S3Tests, S3MockHttp
from libcloud.utils.py3 import httplib
CONN_CLS = google_storage.GoogleStorageConnection
STORAGE_CLS = google_storage.GoogleStorageDriver
TODAY = email.utils.formatdate(usegmt=True)
class GoogleStorageMockHttp(S3MockHttp):
fixtures = StorageFileFixtures('google_storage')
def _test2_test_get_object(self, method, url, body, headers):
# test_get_object
# Google uses a different HTTP header prefix for meta data
body = self.fixtures.load('list_containers.xml')
headers = {
'content-type': 'application/zip',
'etag': '"e31208wqsdoj329jd"',
'x-goog-meta-rabbits': 'monkeys',
'content-length': 12345,
'last-modified': 'Thu, 13 Sep 2012 07:13:22 GMT'
}
return (
httplib.OK,
body,
headers,
httplib.responses[httplib.OK]
)
class GoogleStorageConnectionTest(GoogleTestCase):
@mock.patch('email.utils.formatdate')
@mock.patch('libcloud.common.google.'
'GoogleBaseConnection.add_default_headers')
def test_add_default_headers(self, mock_base_method, mock_formatdate):
mock_formatdate.return_value = TODAY
starting_headers = {'starting': 'headers'}
changed_headers = {'changed': 'headers'}
project = 'foo-project'
# Should use base add_default_headers
mock_base_method.return_value = dict(changed_headers)
conn = CONN_CLS('foo_user', 'bar_key', secure=True,
auth_type=GoogleAuthType.GCE)
conn.get_project = lambda: None
self.assertEqual(
conn.add_default_headers(dict(starting_headers)),
dict(changed_headers)
)
mock_base_method.assert_called_once_with(dict(starting_headers))
mock_base_method.reset_mock()
# Base add_default_headers with project
mock_base_method.return_value = dict(changed_headers)
conn = CONN_CLS('foo_user', 'bar_key', secure=True,
auth_type=GoogleAuthType.GCE)
conn.get_project = lambda: project
headers = dict(changed_headers)
headers[CONN_CLS.PROJECT_ID_HEADER] = project
self.assertEqual(
conn.add_default_headers(dict(starting_headers)),
headers
)
mock_base_method.assert_called_once_with(dict(starting_headers))
mock_base_method.reset_mock()
# Should use S3 add_default_headers
conn = CONN_CLS('foo_user', 'bar_key', secure=True,
auth_type=GoogleAuthType.GCS_S3)
conn.get_project = lambda: None
headers = dict(starting_headers)
headers['Date'] = TODAY
self.assertEqual(conn.add_default_headers(dict(starting_headers)),
headers)
mock_base_method.assert_not_called()
# S3 add_default_headers with project
conn = CONN_CLS('foo_user', 'bar_key', secure=True,
auth_type=GoogleAuthType.GCS_S3)
conn.get_project = lambda: project
headers = dict(starting_headers)
headers['Date'] = TODAY
headers[CONN_CLS.PROJECT_ID_HEADER] = project
self.assertEqual(conn.add_default_headers(dict(starting_headers)),
headers)
mock_base_method.assert_not_called()
@mock.patch('libcloud.common.google.GoogleBaseConnection.encode_data')
def test_encode_data(self, mock_base_method):
old_data = 'old data!'
new_data = 'new data!'
# Should use Base encode_data
mock_base_method.return_value = new_data
conn = CONN_CLS('foo_user', 'bar_key', secure=True,
auth_type=GoogleAuthType.GCE)
self.assertEqual(conn.encode_data(old_data), new_data)
mock_base_method.assert_called_once_with(old_data)
mock_base_method.reset_mock()
# Should use S3 encode_data (which does nothing)
conn = CONN_CLS('foo_user', 'bar_key', secure=True,
auth_type=GoogleAuthType.GCS_S3)
self.assertEqual(conn.encode_data(old_data), old_data)
mock_base_method.assert_not_called()
@mock.patch('libcloud.storage.drivers.s3.'
'BaseS3Connection.get_auth_signature')
def test_get_s3_auth_signature(self, mock_s3_auth_sig_method):
# Check that the S3 HMAC signature method is used.
# Check that headers are copied and modified before calling the method.
mock_s3_auth_sig_method.return_value = 'mock signature!'
starting_params = {}
starting_headers = {
'Date': TODAY,
'x-goog-foo': 'MAINTAIN UPPERCASE!',
'x-Goog-bar': 'Header should be lowered',
'Other': 'LOWER THIS!'
}
modified_headers = {
'date': TODAY,
'x-goog-foo': 'MAINTAIN UPPERCASE!',
'x-goog-bar': 'Header should be lowered',
'other': 'lower this!'
}
conn = CONN_CLS('foo_user', 'bar_key', secure=True,
auth_type=GoogleAuthType.GCS_S3)
conn.method = 'GET'
conn.action = '/path'
result = conn._get_s3_auth_signature(starting_params, starting_headers)
self.assertNotEqual(starting_headers, modified_headers)
self.assertEqual(result, 'mock signature!')
mock_s3_auth_sig_method.assert_called_once_with(
method='GET',
headers=modified_headers,
params=starting_params,
expires=None,
secret_key='bar_key',
path='/path',
vendor_prefix='x-goog'
)
@mock.patch('libcloud.common.google.GoogleBaseConnection.pre_connect_hook')
def test_pre_connect_hook_oauth2(self, mock_base_hook):
# Should use BaseGoogleConnection pre_connect_hook
# Check that the base hook is called.
starting_params = {'starting': 'params'}
changed_params = {'changed': 'params'}
starting_headers = {'starting': 'headers'}
changed_headers = {'changed': 'headers'}
mock_base_hook.return_value = (dict(changed_params),
dict(changed_headers))
conn = CONN_CLS('foo_user', 'bar_key', secure=True,
auth_type=GoogleAuthType.GCE)
result = conn.pre_connect_hook(
dict(starting_params),
dict(starting_headers)
)
self.assertEqual(
result,
(dict(changed_params), dict(changed_headers))
)
mock_base_hook.assert_called_once_with(
dict(starting_params),
dict(starting_headers)
)
mock_base_hook.reset_mock()
@mock.patch('libcloud.common.google.GoogleBaseConnection.pre_connect_hook')
def test_pre_connect_hook_hmac(self, mock_base_hook):
# Check that we call for a HMAC signature, passing params and headers
# Check that we properly apply the HMAC signature.
# Check that we don't use the BaseGoogleConnection pre_connect_hook.
starting_params = {'starting': 'params'}
starting_headers = {'starting': 'headers'}
def fake_hmac_method(params, headers):
# snapshot the params and headers passed (they are modified later)
fake_hmac_method.params_passed = copy.deepcopy(params)
fake_hmac_method.headers_passed = copy.deepcopy(headers)
return 'fake signature!'
conn = CONN_CLS('foo_user', 'bar_key', secure=True,
auth_type=GoogleAuthType.GCS_S3)
conn._get_s3_auth_signature = fake_hmac_method
conn.action = 'GET'
conn.method = '/foo'
expected_headers = dict(starting_headers)
expected_headers['Authorization'] = (
'%s %s:%s' % (google_storage.SIGNATURE_IDENTIFIER, 'foo_user',
'fake signature!')
)
result = conn.pre_connect_hook(
dict(starting_params),
dict(starting_headers)
)
self.assertEqual(
result,
(dict(starting_params), expected_headers)
)
mock_base_hook.assert_not_called()
self.assertEqual(
fake_hmac_method.params_passed,
starting_params
)
self.assertEqual(
fake_hmac_method.headers_passed,
starting_headers
)
class GoogleStorageTests(S3Tests, GoogleTestCase):
driver_type = STORAGE_CLS
driver_args = STORAGE_GOOGLE_STORAGE_PARAMS
mock_response_klass = GoogleStorageMockHttp
driver = google_storage.GoogleStorageDriver
def test_billing_not_enabled(self):
# TODO
pass
def test_token(self):
# Not supported on Google Storage
pass
if __name__ == '__main__':
sys.exit(unittest.main())
|
|
from sympy import Symbol, Rational, Order, C, exp, ln, log, O, var, nan, pi, S
from sympy.utilities.pytest import XFAIL, raises
from sympy.abc import w, x, y, z
def test_caching_bug():
#needs to be a first test, so that all caches are clean
#cache it
e = O(w)
#and test that this won't raise an exception
f = O(w**(-1/x/log(3)*log(5)), w)
def test_simple_1():
o = Rational(0)
assert Order(2*x) == Order(x)
assert Order(x)*3 == Order(x)
assert -28*Order(x) == Order(x)
assert Order(-23) == Order(1)
assert Order(exp(x)) == Order(1,x)
assert Order(exp(1/x)).expr == exp(1/x)
assert Order(x*exp(1/x)).expr == x*exp(1/x)
assert Order(x**(o/3)).expr == x**(o/3)
assert Order(x**(5*o/3)).expr == x**(5*o/3)
assert Order(x**2 + x + y, x) == \
Order(x**2 + x + y, y) == O(1)
raises(NotImplementedError, 'Order(x, 2 - x)')
def test_simple_2():
assert Order(2*x)*x == Order(x**2)
assert Order(2*x)/x == Order(1,x)
assert Order(2*x)*x*exp(1/x) == Order(x**2*exp(1/x))
assert (Order(2*x)*x*exp(1/x)/ln(x)**3).expr == x**2*exp(1/x)*ln(x)**-3
def test_simple_3():
assert Order(x)+x == Order(x)
assert Order(x)+2 == 2+Order(x)
assert Order(x)+x**2 == Order(x)
assert Order(x)+1/x == 1/x+Order(x)
assert Order(1/x)+1/x**2 == 1/x**2+Order(1/x)
assert Order(x)+exp(1/x) == Order(x)+exp(1/x)
def test_simple_4():
assert Order(x)**2 == Order(x**2)
assert Order(x**3)**-2 == Order(x**-6)
def test_simple_5():
assert Order(x)+Order(x**2) == Order(x)
assert Order(x)+Order(x**-2) == Order(x**-2)
assert Order(x)+Order(1/x) == Order(1/x)
def test_simple_6():
assert Order(x)-Order(x) == Order(x)
assert Order(x)+Order(1) == Order(1)
assert Order(x)+Order(x**2) == Order(x)
assert Order(1/x)+Order(1) == Order(1/x)
assert Order(x)+Order(exp(1/x)) == Order(exp(1/x))
assert Order(x**3)+Order(exp(2/x)) == Order(exp(2/x))
assert Order(x**-3)+Order(exp(2/x)) == Order(exp(2/x))
def test_simple_7():
assert 1+O(1) == O(1)
assert 2+O(1) == O(1)
assert x+O(1) == O(1)
assert 1/x+O(1) == 1/x+O(1)
def test_contains_0():
assert Order(1,x).contains(Order(1,x))
assert Order(1,x).contains(Order(1))
assert Order(1).contains(Order(1,x))
def test_contains_1():
assert Order(x).contains(Order(x))
assert Order(x).contains(Order(x**2))
assert not Order(x**2).contains(Order(x))
assert not Order(x).contains(Order(1/x))
assert not Order(1/x).contains(Order(exp(1/x)))
assert not Order(x).contains(Order(exp(1/x)))
assert Order(1/x).contains(Order(x))
assert Order(exp(1/x)).contains(Order(x))
assert Order(exp(1/x)).contains(Order(1/x))
assert Order(exp(1/x)).contains(Order(exp(1/x)))
assert Order(exp(2/x)).contains(Order(exp(1/x)))
assert not Order(exp(1/x)).contains(Order(exp(2/x)))
def test_contains_2():
assert Order(x).contains(Order(y)) is None
assert Order(x).contains(Order(y*x))
assert Order(y*x).contains(Order(x))
assert Order(y).contains(Order(x*y))
assert Order(x).contains(Order(y**2*x))
def test_contains_3():
assert Order(x*y**2).contains(Order(x**2*y)) is None
assert Order(x**2*y).contains(Order(x*y**2)) is None
def test_add_1():
assert Order(x+x) == Order(x)
assert Order(3*x-2*x**2) == Order(x)
assert Order(1+x) == Order(1,x)
assert Order(1+1/x) == Order(1/x)
assert Order(ln(x)+1/ln(x)) == Order(ln(x))
assert Order(exp(1/x)+x) == Order(exp(1/x))
assert Order(exp(1/x)+1/x**20) == Order(exp(1/x))
def test_ln_args():
assert O(log(x)) + O(log(2*x)) == O(log(x))
assert O(log(x)) + O(log(x**3)) == O(log(x))
assert O(log(x*y)) + O(log(x)+log(y)) == O(log(x*y))
def test_multivar_0():
assert Order(x*y).expr == x*y
assert Order(x*y**2).expr == x*y**2
assert Order(x*y,x).expr == x
assert Order(x*y**2,y).expr == y**2
assert Order(x*y*z).expr == x*y*z
assert Order(x/y).expr == x/y
assert Order(x*exp(1/y)).expr == x*exp(1/y)
assert Order(exp(x)*exp(1/y)).expr == exp(1/y)
def test_multivar_0a():
assert Order(exp(1/x)*exp(1/y)).expr == exp(1/x + 1/y)
def test_multivar_1():
assert Order(x+y).expr == x+y
assert Order(x+2*y).expr == x+y
assert (Order(x+y)+x).expr == (x+y)
assert (Order(x+y)+x**2) == Order(x+y)
assert (Order(x+y)+1/x) == 1/x+Order(x+y)
assert Order(x**2+y*x).expr == x**2+y*x
def test_multivar_2():
assert Order(x**2*y+y**2*x,x,y).expr == x**2*y+y**2*x
def test_multivar_mul_1():
assert Order(x+y)*x == Order(x**2+y*x,x,y)
def test_multivar_3():
assert (Order(x)+Order(y)).args in [
(Order(x), Order(y)),
(Order(y), Order(x))]
assert Order(x)+Order(y)+Order(x+y) == Order(x+y)
assert (Order(x**2*y)+Order(y**2*x)).args in [
(Order(x*y**2), Order(y*x**2)),
(Order(y*x**2), Order(x*y**2))]
assert (Order(x**2*y)+Order(y*x)) == Order(x*y)
def test_issue369():
x = Symbol('x')
y = Symbol('y', negative=True)
z = Symbol('z', complex=True)
# check that Order does not modify assumptions about symbols
Order(x)
Order(y)
Order(z)
assert x.is_positive == None
assert y.is_positive == False
assert z.is_positive == None
assert x.is_infinitesimal == None
assert y.is_infinitesimal == None
assert z.is_infinitesimal == None
def test_leading_order():
assert (x+1+1/x**5).extract_leading_order(x) == ((1/x**5, O(1/x**5)),)
assert (1+1/x).extract_leading_order(x) == ((1/x, O(1/x)),)
assert (1+x).extract_leading_order(x) == ((1, O(1, x)),)
assert (1+x**2).extract_leading_order(x) == ((1, O(1, x)),)
assert (2+x**2).extract_leading_order(x) == ((2, O(1, x)),)
assert (x+x**2).extract_leading_order(x) == ((x, O(x)),)
def test_leading_order2():
assert set((2+pi+x**2).extract_leading_order(x)) == set(((pi, O(1, x)),
(S(2), O(1, x))))
assert set((2*x+pi*x+x**2).extract_leading_order(x)) == set(((2*x, O(x)),
(x*pi, O(x))))
def test_order_leadterm():
assert O(x**2)._eval_as_leading_term(x) == O(x**2)
def test_nan():
assert not O(x).contains(nan)
def test_O1():
assert O(1) == O(1, x)
assert O(1) == O(1, y)
assert hash(O(1)) == hash(O(1, x))
assert hash(O(1)) == hash(O(1, y))
def test_getn():
# other lines are tested incidentally by the suite
assert O(x).getn() == 1
assert O(x/log(x)).getn() == 1
assert O(x**2/log(x)**2).getn() == 2
assert O(x*log(x)).getn() == 1
raises(NotImplementedError, '(O(x) + O(y)).getn()')
def test_diff():
assert O(x**2).diff(x) == O(x)
def test_getO():
assert (x).getO() is None
assert (x).removeO() == x
assert (O(x)).getO() == O(x)
assert (O(x)).removeO() == 0
assert (z + O(x) + O(y)).getO() == O(x) + O(y)
assert (z + O(x) + O(y)).removeO() == z
raises(NotImplementedError, '(O(x)+O(y)).getn()')
|
|
from firewallconfigdecryptor.exception import ParserException
from firewallconfigdecryptor.device import Firewall, Gateway, FirewallInterface
from firewallconfigdecryptor.security import ACL, InterfaceACL, SecurityZone, SecurityConduit, ConduitFirewallArchitecture, RuleInteraction,AtomicACE
from firewallconfigdecryptor.enums import RuleOperation, RuleInteractionType, GraphAttribute, SecurityElement, ServiceProtocol, RuleEffect
from firewallconfigdecryptor.utilities import Util, Singleton
import matplotlib.pyplot as plt
import networkx as nx
import firewallconfigdecryptor.log
import firewallconfigdecryptor.properties
import re
import os
import ipaddr
import shutil
@Singleton
class CiscoConfigParser(object):
def __init__(self):
self.config_files = dict()
self.file_contents = dict()
self.delimited = dict()
self.firewalls=dict()
self.acl_details = dict()
self.acls_used = dict()
self.acl_anomalies = dict()
def ExtractFirewallInterfaces(self, file_contents, acls_used):
'''
Example format:
interface Ethernet0/0
nameif sample
security-level 20
ip address 10.0.0.8 255.255.255.0
ospf cost 15
!
'''
# Search for occurrences of 'interface' at beginning of line
extracted_interfaces = dict()
int_start = -1
int_end = -1
int_section_start = -1
int_section_end = -1
count = 0
for line in file_contents:
p = re.search('^interface',line)
q = re.search('^!',line)
if p:
if(int_start == -1):
int_start = count
int_section_start = count
elif int_section_start>0 and q:
int_section_end = count
int_section_start =0
count = count+1
int_end = int_section_end
# Check interface definitions present
if not (int_start >=0 and int_end > int_start): return None
# Extract interfaces
int_definitions = []
int_definition = None
for num in range(int_start,int_end+1):
config_line = file_contents[num]
if re.search('^interface',config_line):
if int_definition != None: int_definitions.append(int_definition)
int_definition = config_line
else :
int_definition = int_definition + "~" + config_line
# Append last
if int_definition != None: int_definitions.append(int_definition)
for int_definition in int_definitions:
interface = self.ExtractInterface(int_definition, acls_used)
if interface !=None:
extracted_interfaces[interface.name.replace(' ','')] = interface
return extracted_interfaces
def ExtractACLsAssigned(self, file_contents):
'''
Example format:
access-group ACL_name in interface int_name
'''
acls_used = dict()
applied_interfaces =[]
acl_name= None
acl_dir = None
int_name = None
lookup_table=dict()
# This doesnt look right check
#for line in file_contents:
for line in file_contents:
#for line in file_contents[host]:
p = re.search('^access-group', line)
if p:
remain = line[p.end():].lstrip()
acl_name = remain.split(' ')[0]
acl_dir = remain.split(' ')[1]
q = re.search('interface',remain)
if q:
rest = remain[q.end():].lstrip()
int_name = rest.split(' ')[0]
if not acls_used.has_key(acl_name):
applied_interfaces= []
else:
applied_interfaces = acls_used[acl_name]
if (not lookup_table.has_key((int_name))) or (not lookup_table[int_name].__contains__(acl_dir)):
applied_interfaces.append(InterfaceACL(int_name, acl_dir, acl_name))
acls_used[acl_name] = applied_interfaces
if not lookup_table.has_key(int_name):
lookup_table[int_name]= []
lookup_table[int_name].append(acl_dir)
if len(acls_used) == 0 : return None
else: return acls_used
def ExtractInterface(self,interface_definition, acls_used):
t = re.search('interface',interface_definition)
p = re.search('nameif',interface_definition)
q = re.search('ip address',interface_definition)
r = re.search('security-level',interface_definition)
v = re.search('description',interface_definition)
x = re.search('ip access-group',interface_definition)
type=None
name = None
description = None
ip_address = None
applied_interfaces = None
sec_level=-1
if t:
remain = interface_definition[t.end():]
s = re.search("~",remain)
type = remain[0:s.start()]
if p:
remain = interface_definition[p.end():]
s = re.search("~",remain)
name = remain[0:s.start()]
if q:
remain = interface_definition[q.end():]
s = re.search("~",remain)
ip_address = remain[0:s.start()]
if r:
remain = interface_definition[r.end():]
s = re.search("~",remain)
sec_level = remain[0:s.start()]
if v:
remain = interface_definition[v.end():]
s = re.search("~",remain)
description = remain[0:s.start()]
if x:
remain = interface_definition[x.end():].lstrip()
acl_name = remain.split(' ')[0]
acl_dir = remain.split(' ')[1].replace('~','')
if not acls_used.has_key(acl_name):
applied_interfaces= []
else:
applied_interfaces = acls_used[acl_name]
# No need to process interfaces with a non-assigned ipaddress
if ip_address==None or len(ip_address)==0: return None
subnet = ip_address.split(' ')[1].replace('\r','')
mask = ip_address.split(' ')[2].replace('\r','')
address = ipaddr.IPNetwork('%s/%s'%(subnet,mask))
if name== None and description!= None:
name=description.replace(' ','')
if applied_interfaces!=None:
applied_interfaces.append(InterfaceACL(name, acl_dir, acl_name))
acls_used[acl_name] = applied_interfaces
return FirewallInterface(type, name, description, address,sec_level)
def GetACLDetails(self, acls_in_use, file_contents):
'''
Example format:
access-list acl-name <entry>
'''
prev_acl_name = None
new_acl_name= None
entry_list = []
acl=dict()
prev_acl_name_2 = None
new_acl_name_2 =None
entry_list_2 =[]
acl_2=dict()
low_level_ruleset_missing=True
for line in file_contents:
p = re.search('^access-list', line)
r = re.search('^ access-list', line)
if p:
# Output of 'show run'- HL rule potentially containing object groups
remain = line[p.end():].lstrip()
# TODO: this is required for ASA
# if not ('line' in remain): continue
new_acl_name = remain.split(' ')[0].replace(';','')
if prev_acl_name != new_acl_name:
entry_list = []
prev_acl_name = new_acl_name
q= re.search(new_acl_name,line)
entry_list.append(line[q.end():].lstrip().replace(';',''))
acl[new_acl_name] = entry_list
if r:
# output of 'show access-lists' - Low-level rules
remain = line[r.end():].lstrip()
if not ('line' in remain): continue
low_level_ruleset_missing=False
new_acl_name_2 = remain.split(' ')[0].replace(';','')
if prev_acl_name_2 != new_acl_name_2:
entry_list_2 = []
prev_acl_name_2 = new_acl_name_2
q= re.search(new_acl_name_2,line)
entry_list_2.append(line[q.end():].lstrip().replace(';',''))
acl_2[new_acl_name_2] = entry_list_2
# Replace high-level ACL entries with their equivalent low-level rule-sets
final_acl = dict()
for acl_name in acl.keys():
final_entry_list = []
for entry in acl[acl_name]:
p = re.search('line', entry)
if p:
remain = entry[p.end():].lstrip()
line_number = remain.split(' ')[0]
low_level_rule_set= None
if acl_2.has_key(acl_name):
low_level_rule_set = self.GetLowLevelRulesetEquivalent("line %s " % line_number, acl_2[acl_name])
if low_level_rule_set ==None:
final_entry_list.append(entry)
else:
for low_level_rule in low_level_rule_set:
final_entry_list.append(low_level_rule)
else:
final_entry_list.append(entry)
final_acl[acl_name] = final_entry_list
# Check whether low-level rules need to be extracted from object-group based HL rules
if low_level_ruleset_missing:
# Extract object groups
self.GetObjectGroupItems(file_contents)
for acl_name in final_acl.keys():
final_entry_list = []
for entry in final_acl[acl_name]:
groups=[]
p = re.search('object-group ',entry)
if p:
remain=entry[p.end():]
group_name=remain.split(' ')[0]
groups.append(group_name)
q=re.search('object-group ',remain)
if q:
remain=remain[q.end():]
group_name=remain.split(' ')[0]
groups.append(group_name)
if len(groups)>0:
item1 = groups[0].replace(' ','')
item2=None
if len(groups)>1:
item2 = groups[1].replace(' ','')
if not self.group_items_lookup.has_key(item1):continue
if item2!=None and not self.group_items_lookup.has_key(item1):continue
low_level_entries=[]
if item1!=None and item2!=None:
for group_item1 in self.group_items_lookup[item1]:
for group_item2 in self.group_items_lookup[item2]:
temp = entry.replace('object-group %s'%item1, group_item1)
temp = temp.replace('object-group %s'%item2, group_item2)
if not low_level_entries.__contains__(temp):
low_level_entries.append(temp)
else:
for group_item1 in self.group_items_lookup[item1]:
temp = entry.replace('object-group %s'%item1, group_item1)
if not low_level_entries.__contains__(temp):
low_level_entries.append(temp)
[final_entry_list.append(low_level_entry) for low_level_entry in low_level_entries]
else:
final_entry_list.append(entry)
final_acl[acl_name] = final_entry_list
# Check all ACLs in use have been defined
for acl_name in acls_in_use.keys():
if not final_acl.has_key(acl_name):
raise ParserException(acl_name,properties.resources['acl_definition_missing'])
#TODO: replace hostnames in ACL entries with their ipaddresses
# Build hostname lookup table
self.hostname_lookup = dict()
for line in file_contents:
p = re.search('^name ',line)
if p:
ipaddress = line.split(' ')[1]
hostname = line.split(' ')[2]
self.hostname_lookup[hostname] = ipaddress
for acl_name in final_acl.keys():
entries = final_acl[acl_name]
resolved_entries = []
for entry in entries:
p = re.search("host", entry)
if p:
# lookup and replace source host
remain = entry[p.end():]
hostname = remain.split(' ')[1]
if self.hostname_lookup.has_key(hostname):
ipaddress = self.hostname_lookup[hostname]
entry = entry.replace(hostname, ipaddress)
# lookup and replace dest source
q = re.search("host", remain)
if q:
# lookup and replace dest host
remain2 = remain[q.end():]
hostname = remain2.split(' ')[1]
if self.hostname_lookup.has_key(hostname):
ipaddress = self.hostname_lookup[hostname]
entry = entry.replace(hostname, ipaddress)
resolved_entries.append(entry)
final_acl[acl_name] = resolved_entries
# Return details of the ACLs in use
acl_collection = dict()
for acl_name in final_acl.keys():
# Only include ACLs in use
if acls_in_use.has_key(acl_name):
acl_collection[acl_name] = ACL(acl_name,final_acl[acl_name])
return acl_collection
def GetObjectGroupItems(self, file_contents):
count=0
self.group_items_lookup=dict()
while count<len(file_contents):
p = re.search('^object-group',file_contents[count])
if p:
name=file_contents[count].split(' ')[2]
if not self.group_items_lookup.has_key(name):
self.group_items_lookup[name]=[]
# Get all group items
count+=1
group_items=[]
while count<len(file_contents):
q = re.search('^object-group', file_contents[count])
if q:
break
elif ('description' in file_contents[count]):
count+=1
pass
elif'network-object'in file_contents[count]:
elements = file_contents[count].split(' ')
ip_address="%s %s"%(elements[2],elements[3])
group_items.append(ip_address)
self.group_items_lookup[name]=group_items
count+=1
else:
break
else:
count+=1
return self.group_items_lookup
def GetLowLevelRulesetEquivalent(self, line_desc, low_level_acl):
rule_set =[]
for entry in low_level_acl:
p = re.search(line_desc, entry)
if p:
rule_set.append(entry)
if len(rule_set) ==0: return None
else : return rule_set
def ProcessImplicitRules(self, firewalls, file_contents, gen_zones, graphml_file_path):
self.implicitly_allowed_services_ip = dict()
self.implicitly_allowed_services_tcp= dict()
self.implicitly_allowed_services_udp= dict()
self.implicitly_allowed_services_icmp= dict()
# Check how same sec-level traffic is enabled
enable_same_security_traffic = False
# same-security-traffic permit intra-interface
for host in file_contents:
for line in file_contents[host]:
p = re.search('same-security-traffic permit', line)
if p:
if 'inter-interface' in line:
enable_same_security_traffic =True
break
ip_tuples =[]
# Create generic IP overlay based on security level only (i.e. considering interfaces without ACLs)
for host in firewalls:
firewall = firewalls[host]
for interface1 in firewall.interfaces.values():
# Select an interface without an inbound ACL
if (not interface1.acl.has_key('in')) or interface1.acl['in'] == None:
source_ip_list=[]
dest_ip_list=[]
# This can implicitly initiate traffic to another interface without an outbound ACL
for interface2 in firewall.interfaces.values():
# Select other interface without an outbound ACL
if (interface1 != interface2) and ((not interface2.acl.has_key('out')) or interface2.acl['out'] == None):
# Ok to permit ip traffic from high-security zone to low-security zone
if (int(interface1.security_level) > int(interface2.security_level) or
int(interface1.security_level) == int(interface2.security_level) and enable_same_security_traffic):
high_security_zone = gen_zones[host][interface1.type]
low_security_zone = gen_zones[host][interface2.type]
for ip in high_security_zone.ipaddress_list:
source_ip_list.append(ip)
for ip in low_security_zone.ipaddress_list:
dest_ip_list.append(ip)
for source_ip in source_ip_list:
for dest_ip in dest_ip_list:
if not self.implicitly_allowed_services_ip.has_key("ip"):
ip_tuples.append((source_ip,dest_ip))
self.implicitly_allowed_services_ip["ip"] = ip_tuples
else:
if not self.implicitly_allowed_services_ip["ip"].__contains__((source_ip,dest_ip)):
self.implicitly_allowed_services_ip["ip"].append((source_ip,dest_ip))
'''
# TODO:Broadcasts forwarded
for host in file_contents:
for line in file_contents[host]:
p = re.search('^ip forward-protocol ', line)
if p:
# udp [port] | nd | sdns
filter= line.replace('ip forward-protocol ','').split(' ')
protocol = filter[0]
port=None
if len(filter)>1:
port=filter[1]'''
#...TCP
#.....ssh, http, TODO: Add ftp, icmp later (not used in our case study)
for host in file_contents:
firewall = firewalls[host]
for line in file_contents[host]:
p = re.search('^ssh ', line)
r = re.search('^http ', line)
z = re.search('^telnet ', line)
if p:
q= len(line[p.end():].split(' '))
if q>=3:
source = line[p.end():].split(' ')[0]
# check if hostname
if self.hostname_lookup.has_key(source):
source = self.hostname_lookup[source]
source_ip = Util.ConvertStringToIpaddress("%s %s"%(source, line[p.end():].split(' ')[1]))
dest_int = line[p.end():].split(' ')[2]
dest_ip = None
# convert interface name to IP
if firewall.interfaces.has_key(dest_int):
#we're only interested in individual (interface) ip not entire subnet
dest_ip =ipaddr.IPv4Network("%s/%s"% (firewall.interfaces[dest_int].ip_address.ip, '255.255.255.255'))
if not self.implicitly_allowed_services_tcp.has_key('ssh'):
self.implicitly_allowed_services_tcp['ssh'] = []
self.implicitly_allowed_services_tcp['ssh'].append((source_ip,dest_ip))
elif r:
q= len(line[r.end():].split(' '))
if q>=3:
source = line[r.end():].split(' ')[0]
# check if hostname
if self.hostname_lookup.has_key(source):
source = self.hostname_lookup[source]
source_ip = Util.ConvertStringToIpaddress("%s %s"%(source, line[r.end():].split(' ')[1]))
dest_int = line[r.end():].split(' ')[2]
dest_ip = None
# convert interface name to IP
if firewall.interfaces.has_key(dest_int):
#we're only interested in individual (interface) ip not entire subnet
dest_ip =ipaddr.IPv4Network("%s/%s"% (firewall.interfaces[dest_int].ip_address.ip, '255.255.255.255'))
if not self.implicitly_allowed_services_tcp.has_key('http'):
self.implicitly_allowed_services_tcp['http'] = []
self.implicitly_allowed_services_tcp['http'].append((source_ip,dest_ip))
elif z:
q= len(line[z.end():].split(' '))
if q>=3:
source = line[z.end():].split(' ')[0]
# check if hostname
if self.hostname_lookup.has_key(source):
source = self.hostname_lookup[source]
try:
source_ip = Util.ConvertStringToIpaddress("%s %s"%(source, line[z.end():].split(' ')[1]))
except BaseException,e:
continue
dest_int = line[z.end():].split(' ')[2]
dest_ip = None
# convert interface name to IP
if firewall.interfaces.has_key(dest_int):
#we're only interested in individual (interface) ip not entire subnet
dest_ip =ipaddr.IPv4Network("%s/%s"% (firewall.interfaces[dest_int].ip_address.ip, '255.255.255.255'))
if not self.implicitly_allowed_services_tcp.has_key('telnet'):
self.implicitly_allowed_services_tcp['telnet'] = []
self.implicitly_allowed_services_tcp['telnet'].append((source_ip,dest_ip))
# UDP
#..syslog
source_ip=None
server_ip=None
for host in file_contents:
firewall = firewalls[host]
for line in file_contents[host]:
p = re.search('^logging ', line)
if p:
q= len(line[p.end():].split(' '))
if q>=1:
try:
server_ip = ipaddr.IPv4Network("%s/32"%line[p.end():].split(' ')[0])
except BaseException, e:
if 'source-interface' in line[p.end():].split(' ')[0]:
try:
# get interface ip from firewall object
source_interface_type=line[p.end():].split(' ')[1]
# lookup ipaddress by firewall and interface-type
for interface in firewalls[host].interfaces:
if firewalls[host].interfaces[interface].type.replace(' ','')==source_interface_type:
source_ip=ipaddr.IPv4Network(("%s/32")%(firewalls[host].interfaces[interface].ip_address.ip))
break
except BaseException, e:
pass
pass
if source_ip != None and server_ip!=None:
if not self.implicitly_allowed_services_udp.has_key('syslog'):
self.implicitly_allowed_services_udp['syslog'] = []
if not self.implicitly_allowed_services_udp['syslog'].__contains__((source_ip,server_ip)):
self.implicitly_allowed_services_udp['syslog'].append((source_ip,server_ip))
# Default output protocol
default_output_protocols=['telnet']
# From version 11.1 onwards default has been 'none' (prior was 'all')
default_input_protocols=['none']
for host in file_contents:
count=0
physical_access_method=dict()
remote_access_method=dict()
vty_input_protocols = []
vty_output_protocols = []
while count<len(file_contents[host]):
line=file_contents[host][count]
p = re.search("^line con ", line)
q = re.search("^line aux ",line)
r = re.search("^line vty ",line)
if p:
s= len(line[p.end():].split(' '))
if not physical_access_method.has_key('console0'):
physical_access_method['console0']=dict()
# look for transport output command
count+=1
protocols = None
while count<len(file_contents[host]):
line = file_contents[host][count]
if 'line' in line:
count-=1
break
elif 'transport output ' in line:
protocols = line.split('transport output ')[1].split(' ')
if protocols!=None:
for protocol in protocols:
physical_access_method['console0']['out']=protocols
count+=1
if protocols==None:
# Set defaults
physical_access_method['console0']['out']=default_output_protocols
if q:
t= len(line[q.end():].split(' '))
if not physical_access_method.has_key('aux0'):
physical_access_method['aux0']=dict()
# look for transport output command
count+=1
protocols = None
while count<len(file_contents[host]):
line = file_contents[host][count]
if 'line' in line:
count-=1
break
elif 'transport output ' in line:
protocols = line.split('transport output ')[1].split(' ')
if protocols!=None:
for protocol in protocols:
physical_access_method['aux0']['out']=protocols
count+=1
if protocols==None:
# Set defaults
physical_access_method['aux0']['out']=default_output_protocols
if r:
u= len(line[r.end():].split(' '))
if not remote_access_method.has_key('vty'):
remote_access_method['vty']=dict()
# look for transport commands
count+=1
while count<len(file_contents[host]):
line = file_contents[host][count]
if 'line' in line:
count-=1
break
elif 'transport input ' in line:
input_protocols = line.split('transport input ')[1].split(' ')
if input_protocols!=None:
[vty_input_protocols.append(protocol) for protocol in input_protocols if not vty_input_protocols.__contains__(protocol)]
remote_access_method['vty']['in']=vty_input_protocols
elif 'transport output ' in line:
output_protocols = line.split('transport output ')[1].split(' ')
if output_protocols!=None:
[vty_output_protocols.append(protocol) for protocol in output_protocols if not vty_output_protocols.__contains__(protocol)]
#[vty_output_protocols.append(protocol) for protocol in output_protocols]
remote_access_method['vty']['out']=vty_output_protocols
count+=1
if len(vty_input_protocols)==0:
# Set defaults
remote_access_method['vty']['in']=default_input_protocols
if len(vty_output_protocols)==0:
# Set defaults
remote_access_method['vty']['out']=default_output_protocols
else:
count+=1
if physical_access_method.has_key('console0') and physical_access_method['console0'].has_key('out'):
for protocol in physical_access_method['console0']['out']:
if protocol=='none': break
if not self.implicitly_allowed_services_tcp.has_key(protocol):
self.implicitly_allowed_services_tcp[protocol] = []
# currently handles ssh, telnet only
#TODO: add later - acercon, lat, mop, nasi, pad, rlogin, udptn, v120: see reference for how to handle
for ipaddress in gen_zones[host]['management_data_interface'].ipaddress_list:
source_ip=ipaddr.IPv4Network('%s/32'%ipaddress.ip)
# dest can be any other zone
for interfaces in gen_zones.values():
for zone in interfaces.values():
dest_ip=None
if not zone.ContainsSubnetOrIpaddress(source_ip):
dest_ip = zone.ipaddress_list[0]
if not self.implicitly_allowed_services_tcp[protocol].__contains__((source_ip,dest_ip)):
self.implicitly_allowed_services_tcp[protocol].append((source_ip,dest_ip))
if physical_access_method.has_key('aux0') and physical_access_method['aux0'].has_key('out'):
for protocol in physical_access_method['aux0']['out']:
if protocol=='none': break
if not self.implicitly_allowed_services_tcp.has_key(protocol):
self.implicitly_allowed_services_tcp[protocol] = []
# currently handles ssh, telnet only
#TODO: add later - acercon, lat, mop, nasi, pad, rlogin, udptn, v120: see reference for how to handle
for ipaddress in gen_zones[host]['management_data_interface'].ipaddress_list:
source_ip=ipaddr.IPv4Network('%s/32'%ipaddress.ip)
# dest can be any other zone
for interfaces in gen_zones.values():
for zone in interfaces.values():
dest_ip=None
if not zone.ContainsSubnetOrIpaddress(source_ip):
dest_ip = zone.ipaddress_list[0]
if not self.implicitly_allowed_services_tcp[protocol].__contains__((source_ip,dest_ip)):
self.implicitly_allowed_services_tcp[protocol].append((source_ip,dest_ip))
if remote_access_method.has_key('vty') and remote_access_method['vty'].has_key('out'):
for protocol in remote_access_method['vty']['out']:
if protocol=='none': break
if not self.implicitly_allowed_services_tcp.has_key(protocol):
self.implicitly_allowed_services_tcp[protocol] = []
# currently handles ssh, telnet only
#TODO: add later - acercon, lat, mop, nasi, pad, rlogin, udptn, v120: see reference for how to handle
for ipaddress in gen_zones[host]['management_data_interface'].ipaddress_list:
source_ip=ipaddr.IPv4Network('%s/32'%ipaddress.ip)
# dest can be any other zone
# TODO access-class may be used with an ACL to restrict dest hosts
for interfaces in gen_zones.values():
for zone in interfaces.values():
dest_ip=None
if not zone.ContainsSubnetOrIpaddress(source_ip):
dest_ip = zone.ipaddress_list[0]
if not self.implicitly_allowed_services_tcp[protocol].__contains__((source_ip,dest_ip)):
self.implicitly_allowed_services_tcp[protocol].append((source_ip,dest_ip))
for protocol in remote_access_method['vty']['in']:
if protocol=='none': break
if not self.implicitly_allowed_services_tcp.has_key(protocol):
self.implicitly_allowed_services_tcp[protocol] = []
# currently handles ssh, telnet only
#TODO: add later - acercon, lat, mop, nasi, pad, rlogin, udptn, v120: see reference for how to handle
for ipaddress in gen_zones[host]['management_data_interface'].ipaddress_list:
dest_ip=ipaddr.IPv4Network('%s/32'%ipaddress.ip)
# source can be any other zone
#TODO access-class may be used with an ACL to restrict source hosts
for interfaces in gen_zones.values():
for zone in interfaces.values():
source_ip=None
if not zone.ContainsSubnetOrIpaddress(dest_ip):
source_ip = zone.ipaddress_list[0]
if not self.implicitly_allowed_services_tcp[protocol].__contains__((source_ip,dest_ip)):
self.implicitly_allowed_services_tcp[protocol].append((source_ip,dest_ip))
def ProcessStaticRoutes(self,firewalls, all_zones, file_contents):
interface_gateways = dict()
self.potential_route_errors = []
self.unallocated_gateways =[]
# Extract any gateways from static routes
for host in file_contents:
for line in file_contents[host]:
# TODO: check ^route (space) still works with asa
p = re.search('^route ',line)
q = re.search('^ip route ', line)
if p:
interface_name= line.split(' ')[1]
network= ipaddr.IPv4Network("%s/%s" % (line.split(' ')[2],line.split(' ')[3]))
gateway_ip=ipaddr.IPv4Address(line.split(' ')[4])
# Pragmatic choice of network directly connected to gateway (we don't have real gateway configs to verify)
gateway = Gateway(gateway_ip, [network])
if not interface_gateways.has_key(gateway.ipaddress):
interface_gateways[gateway.ipaddress] = gateway
else:
# Multiple routes for same gateway
#..check non-redundant route
is_redundant=False
for existing_network in interface_gateways[gateway.ipaddress].network_addresses:
if (existing_network == ipaddr.IPv4Network('0.0.0.0/0.0.0.0') or
existing_network.__contains__(network)):
self.potential_route_errors = []
if (not self.potential_route_errors.__contains__(line)):
self.potential_route_errors.append(line)
is_redundant= True
break
if not is_redundant:
interface_gateways[gateway.ipaddress].network_addresses.append(network) #add
if q:
line=line.replace('ip route','')
network= ipaddr.IPv4Network("%s/%s" % (line.split(' ')[1],line.split(' ')[2]))
gateway_ip=ipaddr.IPv4Address(line.split(' ')[3])
# Pragmatic choice of network directly connected to gateway (we don't have real gateway configs to veify)
gateway = Gateway(gateway_ip, [network])
if not interface_gateways.has_key(gateway.ipaddress):
interface_gateways[gateway.ipaddress] = gateway
else:
# Multiple routes for same gateway
#..check non-redundant route
is_redundant=False
for existing_network in interface_gateways[gateway.ipaddress].network_addresses:
if (existing_network == ipaddr.IPv4Network('0.0.0.0/0.0.0.0') or
existing_network.__contains__(network)):
self.potential_route_errors = []
if (not self.potential_route_errors.__contains__(line)):
self.potential_route_errors.append(line)
is_redundant= True
break
if not is_redundant:
interface_gateways[gateway.ipaddress].network_addresses.append(network) #append
fw_zones=[]
# Find the firewall zones
for interfaces in all_zones.values():
if interfaces.has_key('management_data_interface'):
fw_zones.append(interfaces['management_data_interface'])
log.info("Linking Gateways to Zones..")
# Link each gateway found to appropriate zone
count=1
for gateway in interface_gateways.values():
existing_gateway=False
for fw_zone in fw_zones:
if fw_zone.ipaddress_list.__contains__(gateway.ipaddress):
# Gateway is an existing firewall/router..no need to create new
existing_gateway=True
break
if existing_gateway: continue
gateway_allocated=False
for interfaces in all_zones.values():
if gateway_allocated: break
for zone in interfaces.values():
if gateway_allocated: break
if zone.ContainsSubnetOrIpaddress(gateway.ipaddress):
# gateway can potentially have ACLs and behave as a firewall
#..so until we know more about it, treat it as a firewall and keep separate
zone.AddGateway(gateway)
gateway_allocated=True
gateway_name="gw %s"%gateway.ipaddress
if not all_zones.has_key(gateway_name):
all_zones[gateway_name]=dict()
# Gateway connected to respective zone via E0/0
all_zones[gateway_name]["Ethernet0/0"]= zone
# Firewall-Zone connected to gateway via mdi
all_zones[gateway_name]["management_data_interface"]=SecurityZone("fwz(%s)"%gateway_name,[ipaddr.IPv4Network("%s/%s"%(gateway.ipaddress,32))],gateway_name)
# Networks (i.e. Unknown-Zones) connected to gateway via E0/1
unknown_zone_id="UZ%s"%count
all_zones[gateway_name]["Ethernet0/1"]=SecurityZone(unknown_zone_id,gateway.network_addresses,gateway_name)
count+=1
# Update firewalls list
if not firewalls.has_key(gateway_name):
firewalls[gateway_name]= Firewall(gateway_name)
firewalls[gateway_name].interfaces["Ethernet0/0"]=FirewallInterface("Ethernet0/0","Ethernet0/0","gw_%s"%zone.zone_id,zone.ipaddress_list)
firewalls[gateway_name].interfaces["Ethernet0/1"]=FirewallInterface("Ethernet0/1","Ethernet0/1","gw_%s"%unknown_zone_id,gateway.network_addresses)
firewalls[gateway_name].interfaces["management_data_interface"]=FirewallInterface("management_data_interface","management_data_interface","management_data_interface",ipaddr.IPv4Network("%s/%s"%(gateway.ipaddress,32)))
replace_ip=None
excluded=None
for ip in zone.ipaddress_list:
if ip.__contains__(gateway.ipaddress):
excluded=ip.address_exclude(ipaddr.IPv4Network("%s/32"%gateway.ipaddress))
replace_ip=ip
break
if replace_ip!=None: zone.ipaddress_list.remove(replace_ip)
for ip in excluded:
zone.ipaddress_list.append(ip)
if (not gateway_allocated) and (not self.unallocated_gateways.__contains__(gateway.ipaddress)):
self.unallocated_gateways.append(gateway.ipaddress)
|
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from six.moves import xrange
from nose.tools import assert_equal, assert_true, assert_raises
from . import get_assert_same_ggplot, cleanup
assert_same_ggplot = get_assert_same_ggplot(__file__)
from ggplot import *
from ggplot.exampledata import diamonds
import numpy as np
import pandas as pd
def _build_testing_df():
df = pd.DataFrame({
"x": np.arange(0, 100),
"y": np.arange(0, 100),
"z": np.arange(0, 100)
})
df['cat'] = np.where(df.x*2 > 50, 'blah', 'blue')
df['cat'] = np.where(df.y > 50, 'hello', df.cat)
df['cat2'] = np.where(df.y < 15, 'one', 'two')
df['y'] = np.sin(df.y)
df['z'] = df['y'] + 100
df['c'] = np.where(df.x%2==0,"red", "blue")
return df
def _build_meat_df():
meat['date'] = pd.to_datetime(meat.date)
return meat
@cleanup
def test_geom_density():
df = _build_testing_df()
gg = ggplot(aes(x="x", color="c"), data=df)
gg = gg + geom_density() + xlab("x label") + ylab("y label")
assert_same_ggplot(gg, "geom_density")
@cleanup
def test_geom_histogram():
df = _build_testing_df()
# TODO: use fill aesthetic for a better test
gg = ggplot(aes(x="x", y="y", shape="cat2", color="cat"), data=df)
assert_same_ggplot(gg + geom_histogram(), "geom_hist")
assert_same_ggplot(gg + geom_histogram() + ggtitle("My Histogram"), "geom_hist_title")
@cleanup
def test_geom_point():
df = _build_testing_df()
gg = ggplot(aes(x="x", y="y", shape="cat2", color="cat"), data=df)
assert_same_ggplot(gg + geom_point(), "geom_point")
gg = gg + geom_point() + geom_vline(xintercept=50, ymin=-1.5, ymax=1.5)
assert_same_ggplot(gg, "geom_point_vline")
@cleanup
def test_geom_area():
df = _build_testing_df()
gg = ggplot(aes(x='x', ymax='y', ymin='z', color="cat2"), data=df)
assert_same_ggplot(gg + geom_area(), "geom_area")
@cleanup
def test_geom_text():
gg = ggplot(aes(x='wt',y='mpg',label='name'),data=mtcars) + geom_text()
assert_same_ggplot(gg, "geom_text")
@cleanup
def test_geom_line():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
assert_same_ggplot(p + geom_line(), "factor_geom_line")
@cleanup
def test_geom_rect():
df = pd.DataFrame({
'xmin':[3, 5, 3, 3, 9, 4, 8, 3, 9, 2, 9, 1, 11, 4, 7, 1],
'xmax':[10, 8, 10, 4, 10, 5, 9, 4, 10, 4, 11, 2, 12, 6, 9, 12],
'ymin':[3, 3, 6, 2, 2, 6, 6, 8, 8, 4, 4, 2, 2, 1, 1, 4],
'ymax':[5, 7, 7, 7, 7, 8, 8, 9, 9, 6, 6, 5, 5, 2, 2, 5]})
p = ggplot(df, aes(xmin='xmin', xmax='xmax', ymin='ymin', ymax='ymax'))
p += geom_rect(xmin=0, xmax=13, ymin=0, ymax=10)
p += geom_rect(colour="white", fill="white")
p += xlim(0, 13)
assert_same_ggplot(p, "geom_rect_inv")
@cleanup
def test_factor_geom_point():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
assert_same_ggplot(p + geom_point(), "factor_geom_point")
@cleanup
def test_factor_geom_point_line():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
assert_same_ggplot(p + geom_line() + geom_point(), "factor_geom_point_line")
@cleanup
def test_factor_point_line_title_lab():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
p = p + geom_point() + geom_line(color='lightblue') + ggtitle("Beef: It's What's for Dinner")
p = p + xlab("Date") + ylab("Head of Cattle Slaughtered")
assert_same_ggplot(p, "factor_complicated")
@cleanup
def test_labs():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
p = p + geom_point() + geom_line(color='lightblue')
p = p + labs(title="Beef: It's What's for Dinner", x="Date", y="Head of Cattle Slaughtered")
assert_same_ggplot(p, "labs")
@cleanup
def test_factor_bar():
p = ggplot(aes(x='factor(cyl)'), data=mtcars)
assert_same_ggplot(p + geom_histogram(), "factor_geom_bar")
@cleanup
def test_stats_smooth():
df = _build_testing_df()
gg = ggplot(aes(x="x", y="y", color="cat"), data=df)
gg = gg + stat_smooth(se=False) + ggtitle("My Smoothed Chart")
assert_same_ggplot(gg, "stat_smooth")
@cleanup
def test_stats_bin2d():
import matplotlib.pyplot as plt
if not hasattr(plt, "hist2d"):
import nose
raise nose.SkipTest("stat_bin2d only works with newer matplotlib (1.3) versions.")
df = _build_testing_df()
gg = ggplot(aes(x='x', y='y', shape='cat', color='cat2'), data=df)
assert_same_ggplot(gg + stat_bin2d(), "stat_bin2d")
@cleanup
def test_alpha_density():
gg = ggplot(aes(x='mpg'), data=mtcars)
assert_same_ggplot(gg + geom_density(fill=True, alpha=0.3), "geom_density_alpha")
@cleanup
def test_facet_wrap():
df = _build_testing_df()
gg = ggplot(aes(x='x', ymax='y', ymin='z'), data=df)
#assert_same_ggplot(gg + geom_bar() + facet_wrap(x="cat2"), "geom_bar_facet")
assert_same_ggplot(gg + geom_area() + facet_wrap(x="cat2"), "geom_area_facet")
@cleanup
def test_facet_wrap2():
meat = _build_meat_df()
meat_lng = pd.melt(meat, id_vars=['date'])
p = ggplot(aes(x='date', y='value', colour='variable'), data=meat_lng)
assert_same_ggplot(p + geom_density(fill=True, alpha=0.3) + facet_wrap("variable"), "geom_density_facet")
assert_same_ggplot(p + geom_line(alpha=0.3) + facet_wrap("variable"), "geom_line_facet")
@cleanup
def test_facet_grid_exceptions():
meat = _build_meat_df()
meat_lng = pd.melt(meat, id_vars=['date'])
p = ggplot(aes(x="date", y="value", colour="variable", shape="variable"), meat_lng)
with assert_raises(Exception):
print(p + geom_point() + facet_grid(y="variable"))
with assert_raises(Exception):
print(p + geom_point() + facet_grid(y="variable", x="NOT_AVAILABLE"))
with assert_raises(Exception):
print(p + geom_point() + facet_grid(y="NOT_AVAILABLE", x="variable"))
@cleanup
def test_facet_grid():
# only use a small subset of the data to speedup tests
# N=53940 -> N=7916 and only 2x2 facets
_mask1 = (diamonds.cut == "Ideal") | (diamonds.cut == "Good")
_mask2 = (diamonds.clarity == "SI2") | (diamonds.clarity == "VS1")
_df = diamonds[_mask1 & _mask2]
p = ggplot(aes(x='x', y='y', colour='z'), data=_df)
p = p + geom_point() + scale_colour_gradient(low="white", high="red")
p = p + facet_grid("cut", "clarity")
assert_same_ggplot(p, "diamonds_big")
p = ggplot(aes(x='carat'), data=_df)
p = p + geom_density() + facet_grid("cut", "clarity")
assert_same_ggplot(p, "diamonds_facet")
@cleanup
def test_smooth_se():
meat = _build_meat_df()
p = ggplot(aes(x='date', y='beef'), data=meat)
assert_same_ggplot(p + geom_point() + stat_smooth(), "point_smooth_se")
assert_same_ggplot(p + stat_smooth(), "smooth_se")
@cleanup
def test_scale_xy_continous():
meat = _build_meat_df()
p = ggplot(aes(x='date', y='beef'), data=meat)
p = p + geom_point() + scale_x_continuous("This is the X")
p = p + scale_y_continuous("Squared", limits=[0, 1500])
assert_same_ggplot(p, "scale1")
@cleanup
def test_ylim():
meat = _build_meat_df()
p = ggplot(aes(x='date', y='beef'), data=meat)
assert_same_ggplot(p + geom_point() + ylim(0, 1500), "ylim")
@cleanup
def test_partial_limits() :
p = ggplot(diamonds, aes('carat', 'price'))
assert_same_ggplot(p + geom_point(alpha=1/20.) + xlim(high = 4) + ylim(0), "partial_limits")
@cleanup
def test_partial_limits_facet() :
p = ggplot(diamonds, aes('carat', 'price', color="clarity"))
p = p + geom_point(alpha=1/20.) + facet_wrap(x="cut", scales="free") + xlim(low=0) + ylim(low=0)
assert_same_ggplot(p, "partial_limits_facet")
@cleanup
def test_scale_date():
meat = _build_meat_df()
gg = ggplot(aes(x='date', y='beef'), data=meat) + geom_line()
assert_same_ggplot(gg+scale_x_date(labels="%Y-%m-%d"), "scale_date")
@cleanup
def test_diamond():
p = ggplot(aes(x='x', y='y', colour='z'), data=diamonds.head(4))
p = p + geom_point() + scale_colour_gradient(low="white", high="red")
p = p + facet_wrap("cut")
assert_same_ggplot(p, "diamonds_small")
def test_aes_positional_args():
result = aes("weight", "hp")
expected = {"x": "weight", "y": "hp"}
assert_equal(result, expected)
result3 = aes("weight", "hp", "qsec")
expected3 = {"x": "weight", "y": "hp", "color": "qsec"}
assert_equal(result3, expected3)
def test_aes_keyword_args():
result = aes(x="weight", y="hp")
expected = {"x": "weight", "y": "hp"}
assert_equal(result, expected)
result3 = aes(x="weight", y="hp", color="qsec")
expected3 = {"x": "weight", "y": "hp", "color": "qsec"}
assert_equal(result3,expected3)
def test_aes_mixed_args():
result = aes("weight", "hp", color="qsec")
expected = {"x": "weight", "y": "hp", "color": "qsec"}
assert_equal(result, expected)
@cleanup
def test_scale_color_brewer() :
p = ggplot(diamonds, aes(x = "x", y="y"))
p = p + geom_line() + scale_color_brewer(type='qual', palette=2)
assert_same_ggplot(p, "scale_color_brewer")
|
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Event dispatch framework.
All objects that produce events in pyglet implement `EventDispatcher`,
providing a consistent interface for registering and manipulating event
handlers. A commonly used event dispatcher is `pyglet.window.Window`.
Event types
===========
For each event dispatcher there is a set of events that it dispatches; these
correspond with the type of event handlers you can attach. Event types are
identified by their name, for example, ''on_resize''. If you are creating a
new class which implements `EventDispatcher`, you must call
`EventDispatcher.register_event_type` for each event type.
Attaching event handlers
========================
An event handler is simply a function or method. You can attach an event
handler by setting the appropriate function on the instance::
def on_resize(width, height):
# ...
dispatcher.on_resize = on_resize
There is also a convenience decorator that reduces typing::
@dispatcher.event
def on_resize(width, height):
# ...
You may prefer to subclass and override the event handlers instead::
class MyDispatcher(DispatcherClass):
def on_resize(self, width, height):
# ...
Event handler stack
===================
When attaching an event handler to a dispatcher using the above methods, it
replaces any existing handler (causing the original handler to no longer be
called). Each dispatcher maintains a stack of event handlers, allowing you to
insert an event handler "above" the existing one rather than replacing it.
There are two main use cases for "pushing" event handlers:
* Temporarily intercepting the events coming from the dispatcher by pushing a
custom set of handlers onto the dispatcher, then later "popping" them all
off at once.
* Creating "chains" of event handlers, where the event propagates from the
top-most (most recently added) handler to the bottom, until a handler
takes care of it.
Use `EventDispatcher.push_handlers` to create a new level in the stack and
attach handlers to it. You can push several handlers at once::
dispatcher.push_handlers(on_resize, on_key_press)
If your function handlers have different names to the events they handle, use
keyword arguments::
dispatcher.push_handlers(on_resize=my_resize,
on_key_press=my_key_press)
After an event handler has processed an event, it is passed on to the
next-lowest event handler, unless the handler returns `EVENT_HANDLED`, which
prevents further propagation.
To remove all handlers on the top stack level, use
`EventDispatcher.pop_handlers`.
Note that any handlers pushed onto the stack have precedence over the
handlers set directly on the instance (for example, using the methods
described in the previous section), regardless of when they were set.
For example, handler ``foo`` is called before handler ``bar`` in the following
example::
dispatcher.push_handlers(on_resize=foo)
dispatcher.on_resize = bar
Dispatching events
==================
pyglet uses a single-threaded model for all application code. Event
handlers are only ever invoked as a result of calling
EventDispatcher.dispatch_events`.
It is up to the specific event dispatcher to queue relevant events until they
can be dispatched, at which point the handlers are called in the order the
events were originally generated.
This implies that your application runs with a main loop that continuously
updates the application state and checks for new events::
while True:
dispatcher.dispatch_events()
# ... additional per-frame processing
Not all event dispatchers require the call to ``dispatch_events``; check with
the particular class documentation.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import inspect
EVENT_HANDLED = True
EVENT_UNHANDLED = None
class EventException(Exception):
'''An exception raised when an event handler could not be attached.
'''
pass
class EventDispatcher(object):
'''Generic event dispatcher interface.
See the module docstring for usage.
'''
# Placeholder empty stack; real stack is created only if needed
_event_stack = ()
@classmethod
def register_event_type(cls, name):
'''Register an event type with the dispatcher.
Registering event types allows the dispatcher to validate event
handler names as they are attached, and to search attached objects for
suitable handlers.
:Parameters:
`name` : str
Name of the event to register.
'''
if not hasattr(cls, 'event_types'):
cls.event_types = []
cls.event_types.append(name)
return name
def push_handlers(self, *args, **kwargs):
'''Push a level onto the top of the handler stack, then attach zero or
more event handlers.
If keyword arguments are given, they name the event type to attach.
Otherwise, a callable's `__name__` attribute will be used. Any other
object may also be specified, in which case it will be searched for
callables with event names.
'''
# Create event stack if necessary
if type(self._event_stack) is tuple:
self._event_stack = []
# Place dict full of new handlers at beginning of stack
self._event_stack.insert(0, {})
self.set_handlers(*args, **kwargs)
def _get_handlers(self, args, kwargs):
'''Implement handler matching on arguments for set_handlers and
remove_handlers.
'''
for object in args:
if inspect.isroutine(object):
# Single magically named function
name = object.__name__
if name not in self.event_types:
raise EventException('Unknown event "%s"' % name)
yield name, object
else:
# Single instance with magically named methods
for name in dir(object):
if name in self.event_types:
yield name, getattr(object, name)
for name, handler in list(kwargs.items()):
# Function for handling given event (no magic)
if name not in self.event_types:
raise EventException('Unknown event "%s"' % name)
yield name, handler
def set_handlers(self, *args, **kwargs):
'''Attach one or more event handlers to the top level of the handler
stack.
See `push_handlers` for the accepted argument types.
'''
# Create event stack if necessary
if type(self._event_stack) is tuple:
self._event_stack = [{}]
for name, handler in self._get_handlers(args, kwargs):
self.set_handler(name, handler)
def set_handler(self, name, handler):
'''Attach a single event handler.
:Parameters:
`name` : str
Name of the event type to attach to.
`handler` : callable
Event handler to attach.
'''
# Create event stack if necessary
if type(self._event_stack) is tuple:
self._event_stack = [{}]
self._event_stack[0][name] = handler
def pop_handlers(self):
'''Pop the top level of event handlers off the stack.
'''
assert self._event_stack and 'No handlers pushed'
del self._event_stack[0]
def remove_handlers(self, *args, **kwargs):
'''Remove event handlers from the event stack.
See `push_handlers` for the accepted argument types. All handlers
are removed from the first stack frame that contains any of the given
handlers. No error is raised if any handler does not appear in that
frame, or if no stack frame contains any of the given handlers.
If the stack frame is empty after removing the handlers, it is
removed from the stack. Note that this interferes with the expected
symmetry of `push_handlers` and `pop_handlers`.
'''
handlers = list(self._get_handlers(args, kwargs))
# Find the first stack frame containing any of the handlers
def find_frame():
for frame in self._event_stack:
for name, handler in handlers:
try:
if frame[name] == handler:
return frame
except KeyError:
pass
frame = find_frame()
# No frame matched; no error.
if not frame:
return
# Remove each handler from the frame.
for name, handler in handlers:
try:
if frame[name] == handler:
del frame[name]
except KeyError:
pass
# Remove the frame if it's empty.
if not frame:
self._event_stack.remove(frame)
def remove_handler(self, name, handler):
'''Remove a single event handler.
The given event handler is removed from the first handler stack frame
it appears in. The handler must be the exact same callable as passed
to `set_handler`, `set_handlers` or `push_handlers`; and the name
must match the event type it is bound to.
No error is raised if the event handler is not set.
:Parameters:
`name` : str
Name of the event type to remove.
`handler` : callable
Event handler to remove.
'''
for frame in self._event_stack:
try:
if frame[name] == handler:
del frame[name]
break
except KeyError:
pass
def dispatch_event(self, event_type, *args):
'''Dispatch a single event to the attached handlers.
The event is propagated to all handlers from from the top of the stack
until one returns `EVENT_HANDLED`. This method should be used only by
`EventDispatcher` implementors; applications should call
the ``dispatch_events`` method.
Since pyglet 1.2, the method returns `EVENT_HANDLED` if an event
handler returned `EVENT_HANDLED` or `EVENT_UNHANDLED` if all events
returned `EVENT_UNHANDLED`. If no matching event handlers are in the
stack, ``False`` is returned.
:Parameters:
`event_type` : str
Name of the event.
`args` : sequence
Arguments to pass to the event handler.
:rtype: bool or None
:return: (Since pyglet 1.2) `EVENT_HANDLED` if an event handler
returned `EVENT_HANDLED`; `EVENT_UNHANDLED` if one or more event
handlers were invoked but returned only `EVENT_UNHANDLED`;
otherwise ``False``. In pyglet 1.1 and earler, the return value
is always ``None``.
'''
assert event_type in self.event_types, "%r not found in %r.event_types == %r" % (event_type, self, self.event_types)
invoked = False
# Search handler stack for matching event handlers
for frame in list(self._event_stack):
handler = frame.get(event_type, None)
if handler:
try:
invoked = True
if handler(*args):
return EVENT_HANDLED
except TypeError:
self._raise_dispatch_exception(event_type, args, handler)
# Check instance for an event handler
if hasattr(self, event_type):
try:
invoked = True
if getattr(self, event_type)(*args):
return EVENT_HANDLED
except TypeError:
self._raise_dispatch_exception(
event_type, args, getattr(self, event_type))
if invoked:
return EVENT_UNHANDLED
return False
def _raise_dispatch_exception(self, event_type, args, handler):
# A common problem in applications is having the wrong number of
# arguments in an event handler. This is caught as a TypeError in
# dispatch_event but the error message is obfuscated.
#
# Here we check if there is indeed a mismatch in argument count,
# and construct a more useful exception message if so. If this method
# doesn't find a problem with the number of arguments, the error
# is re-raised as if we weren't here.
n_args = len(args)
# Inspect the handler
handler_args, handler_varargs, _, handler_defaults = \
inspect.getargspec(handler)
n_handler_args = len(handler_args)
# Remove "self" arg from handler if it's a bound method
if inspect.ismethod(handler) and handler.__self__:
n_handler_args -= 1
# Allow *args varargs to overspecify arguments
if handler_varargs:
n_handler_args = max(n_handler_args, n_args)
# Allow default values to overspecify arguments
if (n_handler_args > n_args and
handler_defaults and
n_handler_args - len(handler_defaults) <= n_args):
n_handler_args = n_args
if n_handler_args != n_args:
if inspect.isfunction(handler) or inspect.ismethod(handler):
descr = '%s at %s:%d' % (
handler.__name__,
handler.__code__.co_filename,
handler.__code__.co_firstlineno)
else:
descr = repr(handler)
raise TypeError(
'%s event was dispatched with %d arguments, but '
'handler %s has an incompatible function signature' %
(event_type, len(args), descr))
else:
raise
def event(self, *args):
'''Function decorator for an event handler.
Usage::
win = window.Window()
@win.event
def on_resize(self, width, height):
# ...
or::
@win.event('on_resize')
def foo(self, width, height):
# ...
'''
if len(args) == 0: # @window.event()
def decorator(func):
name = func.__name__
self.set_handler(name, func)
return func
return decorator
elif inspect.isroutine(args[0]): # @window.event
func = args[0]
name = func.__name__
self.set_handler(name, func)
return args[0]
elif type(args[0]) in (str, str): # @window.event('on_resize')
name = args[0]
def decorator(func):
self.set_handler(name, func)
return func
return decorator
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import uuid
from oslo_config import cfg
from troveclient import exceptions as troveexc
from troveclient.v1 import users
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import neutron
from heat.engine.clients.os import trove
from heat.engine import resource
from heat.engine.resources.openstack.trove import instance as dbinstance
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.engine import stack as parser
from heat.engine import template as tmpl
from heat.tests import common
from heat.tests import utils
db_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "MySQL instance running on openstack DBaaS cloud",
"Resources" : {
"MySqlCloudDB": {
"Type": "OS::Trove::Instance",
"Properties" : {
"name" : "test",
"flavor" : "1GB",
"size" : 30,
"users" : [{"name": "testuser", "password": "pass", "databases":
["validdb"]}],
"databases" : [{"name": "validdb"}],
"datastore_type": "SomeDStype",
"datastore_version": "MariaDB-5.5"
}
}
}
}
'''
db_template_with_nics = '''
heat_template_version: 2013-05-23
description: MySQL instance running on openstack DBaaS cloud
resources:
MySqlCloudDB:
type: OS::Trove::Instance
properties:
name: test
flavor: 1GB
size: 30
networks:
- port: someportname
fixed_ip: 1.2.3.4
'''
db_template_with_replication = '''
heat_template_version: 2013-05-23
description: MySQL instance running on openstack DBaaS cloud
resources:
MySqlCloudDB:
type: OS::Trove::Instance
properties:
name: test
flavor: 1GB
size: 30
replica_of: 0e642916-dd64-43b3-933f-ff34fff69a7f
replica_count: 2
'''
class FakeDBInstance(object):
def __init__(self):
self.id = 12345
self.hostname = "testhost"
self.links = [
{"href": "https://adga23dd432a.rackspacecloud.com/132345245",
"rel": "self"}]
self.resource_id = 12345
self.status = 'ACTIVE'
def delete(self):
pass
def to_dict(self):
pass
class FakeFlavor(object):
def __init__(self, id, name):
self.id = id
self.name = name
class FakeVersion(object):
def __init__(self, name="MariaDB-5.5"):
self.name = name
class InstanceTest(common.HeatTestCase):
def setUp(self):
super(InstanceTest, self).setUp()
self.fc = mock.MagicMock()
self.nova = mock.Mock()
self.client = mock.Mock()
self.patchobject(trove.TroveClientPlugin, '_create',
return_value=self.client)
self.stub_TroveFlavorConstraint_validate()
self.patchobject(resource.Resource, 'is_using_neutron',
return_value=True)
self.flavor_resolve = self.patchobject(trove.TroveClientPlugin,
'find_flavor_by_name_or_id',
return_value='1')
self.fake_instance = FakeDBInstance()
self.client.instances.create.return_value = self.fake_instance
self.client.instances.get.return_value = self.fake_instance
def _setup_test_instance(self, name, t, rsrc_name='MySqlCloudDB'):
stack_name = '%s_stack' % name
template = tmpl.Template(t)
self.stack = parser.Stack(utils.dummy_context(),
stack_name,
template,
stack_id=str(uuid.uuid4()))
rsrc = self.stack[rsrc_name]
rsrc.resource_id = '12345'
return rsrc
def _stubout_validate(self, instance, neutron=None,
mock_net_constraint=False,
with_port=True):
if mock_net_constraint:
self.stub_NetworkConstraint_validate()
self.client.datastore_versions.list.return_value = [FakeVersion()]
if neutron is not None:
instance.is_using_neutron = mock.Mock(return_value=bool(neutron))
if with_port:
self.stub_PortConstraint_validate()
def test_instance_create(self):
t = template_format.parse(db_template)
instance = self._setup_test_instance('dbinstance_create', t)
scheduler.TaskRunner(instance.create)()
self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)
self.assertEqual('instances', instance.entity)
def test_create_failed(self):
t = template_format.parse(db_template)
osdb_res = self._setup_test_instance('dbinstance_create', t)
trove_mock = mock.Mock()
self.patchobject(osdb_res, 'client', return_value=trove_mock)
# test for bad statuses
mock_input = mock.Mock()
mock_input.status = 'ERROR'
trove_mock.instances.get.return_value = mock_input
error_string = ('Went to status ERROR due to "The last operation for '
'the database instance failed due to an error."')
exc = self.assertRaises(exception.ResourceInError,
osdb_res.check_create_complete,
mock_input)
self.assertIn(error_string, str(exc))
mock_input = mock.Mock()
mock_input.status = 'FAILED'
trove_mock.instances.get.return_value = mock_input
error_string = ('Went to status FAILED due to "The database instance '
'was created, but heat failed to set up the '
'datastore. If a database instance is in the FAILED '
'state, it should be deleted and a new one should '
'be created."')
exc = self.assertRaises(exception.ResourceInError,
osdb_res.check_create_complete,
mock_input)
self.assertIn(error_string, str(exc))
# test if error string is not defined
osdb_res.TROVE_STATUS_REASON = {}
mock_input = mock.Mock()
mock_input.status = 'ERROR'
error_string = ('Went to status ERROR due to "Unknown"')
trove_mock.instances.get.return_value = mock_input
exc = self.assertRaises(exception.ResourceInError,
osdb_res.check_create_complete,
mock_input)
self.assertIn(error_string, str(exc))
def _create_failed_bad_status(self, status, error_message):
t = template_format.parse(db_template)
bad_instance = mock.Mock()
bad_instance.status = status
self.client.instances.get.return_value = bad_instance
instance = self._setup_test_instance('test_bad_statuses', t)
ex = self.assertRaises(exception.ResourceInError,
instance.check_create_complete,
self.fake_instance.id)
self.assertIn(error_message, str(ex))
def test_create_failed_status_error(self):
self._create_failed_bad_status(
'ERROR', 'Went to status ERROR due to "The last operation for '
'the database instance failed due to an error."')
def test_create_failed_status_failed(self):
self._create_failed_bad_status(
'FAILED', 'Went to status FAILED due to "The database instance '
'was created, but heat failed to set up the datastore. '
'If a database instance is in the FAILED state, it '
'should be deleted and a new one should be created."')
def test_instance_restore_point(self):
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties']['restore_point'] = "1234"
instance = self._setup_test_instance('dbinstance_create', t)
self.client.flavors.get.side_effect = [troveexc.NotFound()]
self.client.flavors.find.return_value = FakeFlavor(1, '1GB')
scheduler.TaskRunner(instance.create)()
self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)
users = [{"name": "testuser", "password": "pass", "host": "%",
"databases": [{"name": "validdb"}]}]
databases = [{"collate": "utf8_general_ci",
"character_set": "utf8",
"name": "validdb"}]
self.client.instances.create.assert_called_once_with(
'test', '1', volume={'size': 30}, databases=databases, users=users,
restorePoint={"backupRef": "1234"}, availability_zone=None,
datastore="SomeDStype", datastore_version="MariaDB-5.5", nics=[],
replica_of=None, replica_count=None)
def test_instance_create_overlimit(self):
t = template_format.parse(db_template)
instance = self._setup_test_instance('dbinstance_create', t)
# Simulate an OverLimit exception
self.client.instances.get.side_effect = [
troveexc.RequestEntityTooLarge(), self.fake_instance]
scheduler.TaskRunner(instance.create)()
self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)
def test_instance_create_fails(self):
cfg.CONF.set_override('action_retry_limit', 0)
t = template_format.parse(db_template)
instance = self._setup_test_instance('dbinstance_create', t)
self.fake_instance.status = 'ERROR'
self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(instance.create))
# return previous status
self.fake_instance.status = 'ACTIVE'
def _get_db_instance(self):
t = template_format.parse(db_template)
res = self._setup_test_instance('trove_check', t)
res.state_set(res.CREATE, res.COMPLETE)
res.flavor = 'Foo Flavor'
res.volume = 'Foo Volume'
res.datastore_type = 'Foo Type'
res.datastore_version = 'Foo Version'
return res
def test_instance_check(self):
res = self._get_db_instance()
scheduler.TaskRunner(res.check)()
self.assertEqual((res.CHECK, res.COMPLETE), res.state)
def test_instance_check_not_active(self):
res = self._get_db_instance()
self.fake_instance.status = 'FOOBAR'
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(res.check))
self.assertIn('FOOBAR', str(exc))
self.assertEqual((res.CHECK, res.FAILED), res.state)
# return previous status
self.fake_instance.status = 'ACTIVE'
def test_instance_delete(self):
t = template_format.parse(db_template)
instance = self._setup_test_instance('dbinstance_del', t)
self.client.instances.get.side_effect = [self.fake_instance,
troveexc.NotFound(404)]
scheduler.TaskRunner(instance.create)()
scheduler.TaskRunner(instance.delete)()
def test_instance_delete_overlimit(self):
t = template_format.parse(db_template)
instance = self._setup_test_instance('dbinstance_del', t)
# Simulate an OverLimit exception
self.client.instances.get.side_effect = [
troveexc.RequestEntityTooLarge(), self.fake_instance,
troveexc.NotFound(404)]
scheduler.TaskRunner(instance.create)()
scheduler.TaskRunner(instance.delete)()
def test_instance_delete_resource_none(self):
t = template_format.parse(db_template)
instance = self._setup_test_instance('dbinstance_del', t)
scheduler.TaskRunner(instance.create)()
instance.resource_id = None
scheduler.TaskRunner(instance.delete)()
self.assertIsNone(instance.resource_id)
def test_instance_resource_not_found(self):
t = template_format.parse(db_template)
instance = self._setup_test_instance('dbinstance_del', t)
self.client.instances.get.side_effect = [self.fake_instance,
troveexc.NotFound(404)]
scheduler.TaskRunner(instance.create)()
scheduler.TaskRunner(instance.delete)()
def test_instance_attributes(self):
fake_instance = FakeDBInstance()
self.client.instances.create.return_value = fake_instance
self.client.instances.get.return_value = fake_instance
t = template_format.parse(db_template)
instance = self._setup_test_instance('attr_test', t)
self.assertEqual("testhost", instance.FnGetAtt('hostname'))
self.assertEqual("https://adga23dd432a.rackspacecloud.com/132345245",
instance.FnGetAtt('href'))
def test_instance_validation_success(self):
t = template_format.parse(db_template)
instance = self._setup_test_instance('dbinstance_test', t)
self._stubout_validate(instance)
self.assertIsNone(instance.validate())
def test_instance_validation_invalid_db(self):
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties']['databases'] = [
{"name": "onedb"}]
t['Resources']['MySqlCloudDB']['Properties']['users'] = [
{"name": "testuser",
"password": "pass",
"databases": ["invaliddb"]}]
instance = self._setup_test_instance('dbinstance_test', t)
self._stubout_validate(instance)
ex = self.assertRaises(exception.StackValidationFailed,
instance.validate)
self.assertEqual("Database ['invaliddb'] specified for user does not "
"exist in databases for resource MySqlCloudDB.",
str(ex))
def test_instance_validation_db_name_hyphens(self):
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties']['databases'] = [
{"name": "-foo-bar-"}]
t['Resources']['MySqlCloudDB']['Properties']['users'] = [
{"name": "testuser",
"password": "pass",
"databases": ["-foo-bar-"]}]
instance = self._setup_test_instance('dbinstance_test', t)
self._stubout_validate(instance)
self.assertIsNone(instance.validate())
def test_instance_validation_users_none(self):
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties']['users'] = []
instance = self._setup_test_instance('dbinstance_test', t)
self._stubout_validate(instance)
self.assertIsNone(instance.validate())
def test_instance_validation_databases_none(self):
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties']['databases'] = []
t['Resources']['MySqlCloudDB']['Properties']['users'] = [
{"name": "testuser",
"password": "pass",
"databases": ["invaliddb"]}]
instance = self._setup_test_instance('dbinstance_test', t)
self._stubout_validate(instance)
ex = self.assertRaises(exception.StackValidationFailed,
instance.validate)
self.assertEqual('Databases property is required if users property '
'is provided for resource MySqlCloudDB.',
str(ex))
def test_instance_validation_user_no_db(self):
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties']['databases'] = [
{"name": "validdb"}]
t['Resources']['MySqlCloudDB']['Properties']['users'] = [
{"name": "testuser", "password": "pass", "databases": []}]
instance = self._setup_test_instance('dbinstance_test', t)
ex = self.assertRaises(exception.StackValidationFailed,
instance.validate)
self.assertEqual('Property error: '
'Resources.MySqlCloudDB.Properties.'
'users[0].databases: length (0) is out of range '
'(min: 1, max: None)', str(ex))
def test_instance_validation_no_datastore_yes_version(self):
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties'].pop('datastore_type')
instance = self._setup_test_instance('dbinstance_test', t)
ex = self.assertRaises(exception.StackValidationFailed,
instance.validate)
exp_msg = "Not allowed - datastore_version without datastore_type."
self.assertEqual(exp_msg, str(ex))
def test_instance_validation_no_ds_version(self):
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties'][
'datastore_type'] = 'mysql'
t['Resources']['MySqlCloudDB']['Properties'].pop('datastore_version')
instance = self._setup_test_instance('dbinstance_test', t)
self._stubout_validate(instance)
self.assertIsNone(instance.validate())
def test_instance_validation_wrong_dsversion(self):
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties'][
'datastore_type'] = 'mysql'
t['Resources']['MySqlCloudDB']['Properties'][
'datastore_version'] = 'SomeVersion'
instance = self._setup_test_instance('dbinstance_test', t)
self._stubout_validate(instance)
ex = self.assertRaises(exception.StackValidationFailed,
instance.validate)
expected_msg = ("Datastore version SomeVersion for datastore type "
"mysql is not valid. "
"Allowed versions are MariaDB-5.5.")
self.assertEqual(expected_msg, str(ex))
def test_instance_validation_implicit_version(self):
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties'][
'datastore_type'] = 'mysql'
t['Resources']['MySqlCloudDB']['Properties'].pop('datastore_version')
instance = self._setup_test_instance('dbinstance_test', t)
self.client.datastore_versions.list.return_value = [
FakeVersion(), FakeVersion('MariaDB-5.0')]
self.assertIsNone(instance.validate())
def test_instance_validation_net_with_port_fail(self):
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties']['networks'] = [
{
"port": "someportuuid",
"network": "somenetuuid"
}]
instance = self._setup_test_instance('dbinstance_test', t)
self._stubout_validate(instance, neutron=True,
mock_net_constraint=True)
ex = self.assertRaises(
exception.StackValidationFailed, instance.validate)
self.assertEqual('Either network or port must be provided.',
str(ex))
def test_instance_validation_no_net_no_port_fail(self):
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties']['networks'] = [
{
"fixed_ip": "1.2.3.4"
}]
instance = self._setup_test_instance('dbinstance_test', t)
self._stubout_validate(instance, neutron=True, with_port=False)
ex = self.assertRaises(
exception.StackValidationFailed, instance.validate)
self.assertEqual('Either network or port must be provided.',
str(ex))
def test_instance_validation_nic_port_on_novanet_fails(self):
t = template_format.parse(db_template)
t['Resources']['MySqlCloudDB']['Properties']['networks'] = [
{
"port": "someportuuid",
}]
instance = self._setup_test_instance('dbinstance_test', t)
self._stubout_validate(instance, neutron=False)
ex = self.assertRaises(
exception.StackValidationFailed, instance.validate)
self.assertEqual('Can not use port property on Nova-network.',
str(ex))
def test_instance_create_with_port(self):
t = template_format.parse(db_template_with_nics)
instance = self._setup_test_instance('dbinstance_test', t)
self.patchobject(neutron.NeutronClientPlugin,
'find_resourceid_by_name_or_id',
return_value='someportid')
self.stub_PortConstraint_validate()
scheduler.TaskRunner(instance.create)()
self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)
self.client.instances.create.assert_called_once_with(
'test', '1', volume={'size': 30}, databases=[], users=[],
restorePoint=None, availability_zone=None, datastore=None,
datastore_version=None, nics=[{'port-id': 'someportid',
'v4-fixed-ip': '1.2.3.4'}],
replica_of=None, replica_count=None)
def test_instance_create_with_net_id(self):
net_id = '034aa4d5-0f36-4127-8481-5caa5bfc9403'
t = template_format.parse(db_template_with_nics)
t['resources']['MySqlCloudDB']['properties']['networks'] = [
{'network': net_id}]
instance = self._setup_test_instance('dbinstance_test', t)
self.stub_NetworkConstraint_validate()
self.patchobject(neutron.NeutronClientPlugin,
'find_resourceid_by_name_or_id',
return_value=net_id)
scheduler.TaskRunner(instance.create)()
self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)
self.client.instances.create.assert_called_once_with(
'test', '1', volume={'size': 30}, databases=[], users=[],
restorePoint=None, availability_zone=None, datastore=None,
datastore_version=None, nics=[{'net-id': net_id}], replica_of=None,
replica_count=None)
def test_instance_create_with_replication(self):
t = template_format.parse(db_template_with_replication)
instance = self._setup_test_instance('dbinstance_test', t)
scheduler.TaskRunner(instance.create)()
self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)
self.client.instances.create.assert_called_once_with(
'test', '1', volume={'size': 30}, databases=[], users=[],
restorePoint=None, availability_zone=None, datastore=None,
datastore_version=None, nics=[],
replica_of="0e642916-dd64-43b3-933f-ff34fff69a7f", replica_count=2)
def test_instance_get_live_state(self):
self.fake_instance.to_dict = mock.Mock(return_value={
'name': 'test_instance',
'flavor': {'id': '1'},
'volume': {'size': 30}
})
fake_db1 = mock.Mock()
fake_db1.name = 'validdb'
fake_db2 = mock.Mock()
fake_db2.name = 'secondvaliddb'
self.client.databases.list.return_value = [fake_db1, fake_db2]
expected = {
'flavor': '1',
'name': 'test_instance',
'size': 30,
'databases': [{'name': 'validdb',
'character_set': 'utf8',
'collate': 'utf8_general_ci'},
{'name': 'secondvaliddb'}]
}
t = template_format.parse(db_template)
instance = self._setup_test_instance('get_live_state_test', t)
reality = instance.get_live_state(instance.properties)
self.assertEqual(expected, reality)
@mock.patch.object(resource.Resource, "client_plugin")
@mock.patch.object(resource.Resource, "client")
class InstanceUpdateTests(common.HeatTestCase):
def setUp(self):
super(InstanceUpdateTests, self).setUp()
self._stack = utils.parse_stack(template_format.parse(db_template))
testprops = {
"name": "testinstance",
"flavor": "foo",
"datastore_type": "database",
"datastore_version": "1",
"size": 10,
"databases": [
{"name": "bar"},
{"name": "biff"}
],
"users": [
{
"name": "baz",
"password": "password",
"databases": ["bar"]
},
{
"name": "deleted",
"password": "password",
"databases": ["biff"]
}
]
}
self._rdef = rsrc_defn.ResourceDefinition('test',
dbinstance.Instance,
properties=testprops)
def test_handle_no_update(self, mock_client, mock_plugin):
trove = dbinstance.Instance('test', self._rdef, self._stack)
self.assertEqual({}, trove.handle_update(None, None, {}))
def test_handle_update_name(self, mock_client, mock_plugin):
prop_diff = {
"name": "changed"
}
trove = dbinstance.Instance('test', self._rdef, self._stack)
self.assertEqual(prop_diff, trove.handle_update(None, None, prop_diff))
def test_handle_update_databases(self, mock_client, mock_plugin):
prop_diff = {
"databases": [
{"name": "bar",
"character_set": "ascii"},
{'name': "baz"}
]
}
mget = mock_client().databases.list
mbar = mock.Mock(name='bar')
mbar.name = 'bar'
mbiff = mock.Mock(name='biff')
mbiff.name = 'biff'
mget.return_value = [mbar, mbiff]
trove = dbinstance.Instance('test', self._rdef, self._stack)
expected = {
'databases': [
{'character_set': 'ascii', 'name': 'bar'},
{'ACTION': 'CREATE', 'name': 'baz'},
{'ACTION': 'DELETE', 'name': 'biff'}
]}
self.assertEqual(expected, trove.handle_update(None, None, prop_diff))
def test_handle_update_users(self, mock_client, mock_plugin):
prop_diff = {
"users": [
{"name": "baz",
"password": "changed",
"databases": ["bar", "biff"]},
{'name': "user2",
"password": "password",
"databases": ["biff", "bar"]}
]
}
uget = mock_client().users
mbaz = mock.Mock(name='baz')
mbaz.name = 'baz'
mdel = mock.Mock(name='deleted')
mdel.name = 'deleted'
uget.list.return_value = [mbaz, mdel]
trove = dbinstance.Instance('test', self._rdef, self._stack)
expected = {
'users': [{
'databases': ['bar', 'biff'],
'name': 'baz',
'password': 'changed'
}, {
'ACTION': 'CREATE',
'databases': ['biff', 'bar'],
'name': 'user2',
'password': 'password'
}, {
'ACTION': 'DELETE',
'name': 'deleted'
}]}
self.assertEqual(expected, trove.handle_update(None, None, prop_diff))
def test_handle_update_flavor(self, mock_client, mock_plugin):
# Translation mechanism already resolved flavor name to id.
prop_diff = {
"flavor": 1234
}
trove = dbinstance.Instance('test', self._rdef, self._stack)
expected = {
"flavor": 1234
}
self.assertEqual(expected, trove.handle_update(None, None, prop_diff))
def test_handle_update_size(self, mock_client, mock_plugin):
prop_diff = {
"size": 42
}
trove = dbinstance.Instance('test', self._rdef, self._stack)
expected = {
"size": 42
}
self.assertEqual(expected, trove.handle_update(None, None, prop_diff))
def test_check_complete_none(self, mock_client, mock_plugin):
trove = dbinstance.Instance('test', self._rdef, self._stack)
self.assertTrue(trove.check_update_complete({}))
def test_check_complete_error(self, mock_client, mock_plugin):
mock_instance = mock.Mock(status="ERROR")
mock_client().instances.get.return_value = mock_instance
trove = dbinstance.Instance('test', self._rdef, self._stack)
exc = self.assertRaises(exception.ResourceInError,
trove.check_update_complete,
{"foo": "bar"})
msg = "The last operation for the database instance failed"
self.assertIn(msg, str(exc))
def test_check_client_exceptions(self, mock_client, mock_plugin):
mock_instance = mock.Mock(status="ACTIVE")
mock_client().instances.get.return_value = mock_instance
mock_plugin().is_client_exception.return_value = True
mock_plugin().is_over_limit.side_effect = [True, False]
trove = dbinstance.Instance('test', self._rdef, self._stack)
with mock.patch.object(trove, "_update_flavor") as mupdate:
mupdate.side_effect = [Exception("test"),
Exception("No change was requested "
"because I'm testing")]
self.assertFalse(trove.check_update_complete({"foo": "bar"}))
self.assertFalse(trove.check_update_complete({"foo": "bar"}))
self.assertEqual(2, mupdate.call_count)
self.assertEqual(2, mock_plugin().is_client_exception.call_count)
def test_check_complete_status(self, mock_client, mock_plugin):
mock_instance = mock.Mock(status="RESIZING")
mock_client().instances.get.return_value = mock_instance
updates = {"foo": "bar"}
trove = dbinstance.Instance('test', self._rdef, self._stack)
self.assertFalse(trove.check_update_complete(updates))
def test_check_complete_name(self, mock_client, mock_plugin):
mock_instance = mock.Mock(status="ACTIVE", name="mock_instance")
mock_client().instances.get.return_value = mock_instance
updates = {"name": "changed"}
trove = dbinstance.Instance('test', self._rdef, self._stack)
self.assertFalse(trove.check_update_complete(updates))
mock_instance.name = "changed"
self.assertTrue(trove.check_update_complete(updates))
mock_client().instances.edit.assert_called_once_with(mock_instance,
name="changed")
def test_check_complete_databases(self, mock_client, mock_plugin):
mock_instance = mock.Mock(status="ACTIVE", name="mock_instance")
mock_client().instances.get.return_value = mock_instance
updates = {
'databases': [
{'name': 'bar', "character_set": "ascii"},
{'ACTION': 'CREATE', 'name': 'baz'},
{'ACTION': 'DELETE', 'name': 'biff'}
]}
trove = dbinstance.Instance('test', self._rdef, self._stack)
self.assertTrue(trove.check_update_complete(updates))
mcreate = mock_client().databases.create
mdelete = mock_client().databases.delete
mcreate.assert_called_once_with(mock_instance, [{'name': 'baz'}])
mdelete.assert_called_once_with(mock_instance, 'biff')
def test_check_complete_users(self, mock_client, mock_plugin):
mock_instance = mock.Mock(status="ACTIVE", name="mock_instance")
mock_client().instances.get.return_value = mock_instance
mock_plugin().is_client_exception.return_value = False
mock_client().users.get.return_value = users.User(None, {
"databases": [{
"name": "bar"
}, {
"name": "buzz"
}],
"name": "baz"
}, loaded=True)
updates = {
'users': [{
'databases': ['bar', 'biff'],
'name': 'baz',
'password': 'changed'
}, {
'ACTION': 'CREATE',
'databases': ['biff', 'bar'],
'name': 'user2',
'password': 'password'
}, {
'ACTION': 'DELETE',
'name': 'deleted'
}]}
trove = dbinstance.Instance('test', self._rdef, self._stack)
self.assertTrue(trove.check_update_complete(updates))
create_calls = [
mock.call(mock_instance, [{'password': 'password',
'databases': [{'name': 'biff'},
{'name': 'bar'}],
'name': 'user2'}])
]
delete_calls = [
mock.call(mock_instance, 'deleted')
]
mock_client().users.create.assert_has_calls(create_calls)
mock_client().users.delete.assert_has_calls(delete_calls)
self.assertEqual(1, mock_client().users.create.call_count)
self.assertEqual(1, mock_client().users.delete.call_count)
updateattr = mock_client().users.update_attributes
updateattr.assert_called_once_with(
mock_instance, 'baz', newuserattr={'password': 'changed'},
hostname=mock.ANY)
mock_client().users.grant.assert_called_once_with(
mock_instance, 'baz', ['biff'])
mock_client().users.revoke.assert_called_once_with(
mock_instance, 'baz', ['buzz'])
def test_check_complete_flavor(self, mock_client, mock_plugin):
mock_instance = mock.Mock(status="ACTIVE", flavor={'id': 4567},
name="mock_instance")
mock_client().instances.get.return_value = mock_instance
updates = {
"flavor": 1234
}
trove = dbinstance.Instance('test', self._rdef, self._stack)
self.assertFalse(trove.check_update_complete(updates))
mock_instance.status = "RESIZING"
self.assertFalse(trove.check_update_complete(updates))
mock_instance.status = "ACTIVE"
mock_instance.flavor = {'id': 1234}
self.assertTrue(trove.check_update_complete(updates))
def test_check_complete_size(self, mock_client, mock_plugin):
mock_instance = mock.Mock(status="ACTIVE", volume={'size': 24},
name="mock_instance")
mock_client().instances.get.return_value = mock_instance
updates = {
"size": 42
}
trove = dbinstance.Instance('test', self._rdef, self._stack)
self.assertFalse(trove.check_update_complete(updates))
mock_instance.status = "RESIZING"
self.assertFalse(trove.check_update_complete(updates))
mock_instance.status = "ACTIVE"
mock_instance.volume = {'size': 42}
self.assertTrue(trove.check_update_complete(updates))
|
|
# -*- coding: utf-8 -*-
import numpy as np
from allel.util import asarray_ndim, check_integer_dtype
def _check_dac_n(dac, n):
dac = asarray_ndim(dac, 1)
check_integer_dtype(dac)
mx = np.max(dac)
if n is None:
n = mx
elif n < mx:
raise ValueError('number of chromosomes too small; expected {}, found {}'
.format(n, mx))
return dac, int(n)
def _check_ac_n(ac, n):
ac = asarray_ndim(ac, 2)
if ac.shape[1] != 2:
raise ValueError('only biallelic variants are supported')
check_integer_dtype(ac)
mx = np.max(np.sum(ac, axis=1))
if n is None:
n = mx
elif n < mx:
raise ValueError('number of chromosomes too small; expected {}, found {}'
.format(n, mx))
return ac, int(n)
def sfs(dac, n=None):
"""Compute the site frequency spectrum given derived allele counts at
a set of biallelic variants.
Parameters
----------
dac : array_like, int, shape (n_variants,)
Array of derived allele counts.
n : int, optional
The total number of chromosomes called.
Returns
-------
sfs : ndarray, int, shape (n_chromosomes,)
Array where the kth element is the number of variant sites with k
derived alleles.
"""
# check input
dac, n = _check_dac_n(dac, n)
# need platform integer for bincount
dac = dac.astype(int, copy=False)
# compute site frequency spectrum
x = n + 1
s = np.bincount(dac, minlength=x)
return s
def sfs_folded(ac, n=None):
"""Compute the folded site frequency spectrum given reference and
alternate allele counts at a set of biallelic variants.
Parameters
----------
ac : array_like, int, shape (n_variants, 2)
Allele counts array.
n : int, optional
The total number of chromosomes called.
Returns
-------
sfs_folded : ndarray, int, shape (n_chromosomes//2,)
Array where the kth element is the number of variant sites with a
minor allele count of k.
"""
# check input
ac, n = _check_ac_n(ac, n)
# compute minor allele counts
mac = np.amin(ac, axis=1)
# need platform integer for bincount
mac = mac.astype(int, copy=False)
# compute folded site frequency spectrum
x = n//2 + 1
s = np.bincount(mac, minlength=x)
return s
def sfs_scaled(dac, n=None):
"""Compute the site frequency spectrum scaled such that a constant value is
expected across the spectrum for neutral variation and constant
population size.
Parameters
----------
dac : array_like, int, shape (n_variants,)
Array of derived allele counts.
n : int, optional
The total number of chromosomes called.
Returns
-------
sfs_scaled : ndarray, int, shape (n_chromosomes,)
An array where the value of the kth element is the number of variants
with k derived alleles, multiplied by k.
"""
# compute site frequency spectrum
s = sfs(dac, n=n)
# apply scaling
s = scale_sfs(s)
return s
def scale_sfs(s):
"""Scale a site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n_chromosomes,)
Site frequency spectrum.
Returns
-------
sfs_scaled : ndarray, int, shape (n_chromosomes,)
Scaled site frequency spectrum.
"""
k = np.arange(s.size)
out = s * k
return out
def sfs_folded_scaled(ac, n=None):
"""Compute the folded site frequency spectrum scaled such that a constant
value is expected across the spectrum for neutral variation and constant
population size.
Parameters
----------
ac : array_like, int, shape (n_variants, 2)
Allele counts array.
n : int, optional
The total number of chromosomes called.
Returns
-------
sfs_folded_scaled : ndarray, int, shape (n_chromosomes//2,)
An array where the value of the kth element is the number of variants
with minor allele count k, multiplied by the scaling factor
(k * (n - k) / n).
"""
# check input
ac, n = _check_ac_n(ac, n)
# compute the site frequency spectrum
s = sfs_folded(ac, n=n)
# apply scaling
s = scale_sfs_folded(s, n)
return s
def scale_sfs_folded(s, n):
"""Scale a folded site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n_chromosomes//2,)
Folded site frequency spectrum.
n : int
Number of chromosomes called.
Returns
-------
sfs_folded_scaled : ndarray, int, shape (n_chromosomes//2,)
Scaled folded site frequency spectrum.
"""
k = np.arange(s.shape[0])
out = s * k * (n - k) / n
return out
def joint_sfs(dac1, dac2, n1=None, n2=None):
"""Compute the joint site frequency spectrum between two populations.
Parameters
----------
dac1 : array_like, int, shape (n_variants,)
Derived allele counts for the first population.
dac2 : array_like, int, shape (n_variants,)
Derived allele counts for the second population.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs : ndarray, int, shape (m_chromosomes, n_chromosomes)
Array where the (i, j)th element is the number of variant sites with i
derived alleles in the first population and j derived alleles in the
second population.
"""
# check inputs
dac1, n1 = _check_dac_n(dac1, n1)
dac2, n2 = _check_dac_n(dac2, n2)
# compute site frequency spectrum
x = n1 + 1
y = n2 + 1
# need platform integer for bincount
tmp = (dac1 * y + dac2).astype(int, copy=False)
s = np.bincount(tmp)
s.resize(x, y)
return s
def joint_sfs_folded(ac1, ac2, n1=None, n2=None):
"""Compute the joint folded site frequency spectrum between two
populations.
Parameters
----------
ac1 : array_like, int, shape (n_variants, 2)
Allele counts for the first population.
ac2 : array_like, int, shape (n_variants, 2)
Allele counts for the second population.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs_folded : ndarray, int, shape (n1//2 + 1, n2//2 + 1)
Array where the (i, j)th element is the number of variant sites with a
minor allele count of i in the first population and j in the second
population.
"""
# check inputs
ac1, n1 = _check_ac_n(ac1, n1)
ac2, n2 = _check_ac_n(ac2, n2)
# compute minor allele counts
mac1 = np.amin(ac1, axis=1)
mac2 = np.amin(ac2, axis=1)
# compute site frequency spectrum
x = n1//2 + 1
y = n2//2 + 1
tmp = (mac1 * y + mac2).astype(int, copy=False)
s = np.bincount(tmp)
s.resize(x, y)
return s
def joint_sfs_scaled(dac1, dac2, n1=None, n2=None):
"""Compute the joint site frequency spectrum between two populations,
scaled such that a constant value is expected across the spectrum for
neutral variation, constant population size and unrelated populations.
Parameters
----------
dac1 : array_like, int, shape (n_variants,)
Derived allele counts for the first population.
dac2 : array_like, int, shape (n_variants,)
Derived allele counts for the second population.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs_scaled : ndarray, int, shape (n1 + 1, n2 + 1)
Array where the (i, j)th element is the scaled frequency of variant
sites with i derived alleles in the first population and j derived
alleles in the second population.
"""
# compute site frequency spectrum
s = joint_sfs(dac1, dac2, n1=n1, n2=n2)
# apply scaling
s = scale_joint_sfs(s)
return s
def scale_joint_sfs(s):
"""Scale a joint site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n1, n2)
Joint site frequency spectrum.
Returns
-------
joint_sfs_scaled : ndarray, int, shape (n1, n2)
Scaled joint site frequency spectrum.
"""
i = np.arange(s.shape[0])[:, None]
j = np.arange(s.shape[1])[None, :]
out = (s * i) * j
return out
def joint_sfs_folded_scaled(ac1, ac2, n1=None, n2=None):
"""Compute the joint folded site frequency spectrum between two
populations, scaled such that a constant value is expected across the
spectrum for neutral variation, constant population size and unrelated
populations.
Parameters
----------
ac1 : array_like, int, shape (n_variants, 2)
Allele counts for the first population.
ac2 : array_like, int, shape (n_variants, 2)
Allele counts for the second population.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs_folded_scaled : ndarray, int, shape (n1//2 + 1, n2//2 + 1)
Array where the (i, j)th element is the scaled frequency of variant
sites with a minor allele count of i in the first population and j
in the second population.
""" # noqa
# check inputs
ac1, n1 = _check_ac_n(ac1, n1)
ac2, n2 = _check_ac_n(ac2, n2)
# compute site frequency spectrum
s = joint_sfs_folded(ac1, ac2, n1=n1, n2=n2)
# apply scaling
s = scale_joint_sfs_folded(s, n1, n2)
return s
def scale_joint_sfs_folded(s, n1, n2):
"""Scale a folded joint site frequency spectrum.
Parameters
----------
s : array_like, int, shape (m_chromosomes//2, n_chromosomes//2)
Folded joint site frequency spectrum.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs_folded_scaled : ndarray, int, shape (m_chromosomes//2, n_chromosomes//2)
Scaled folded joint site frequency spectrum.
""" # noqa
out = np.empty_like(s)
for i in range(s.shape[0]):
for j in range(s.shape[1]):
out[i, j] = s[i, j] * i * j * (n1 - i) * (n2 - j)
return out
def fold_sfs(s, n):
"""Fold a site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n_chromosomes,)
Site frequency spectrum
n : int
Total number of chromosomes called.
Returns
-------
sfs_folded : ndarray, int
Folded site frequency spectrum
"""
# check inputs
s = asarray_ndim(s, 1)
assert s.shape[0] <= n + 1, 'invalid number of chromosomes'
# need to check s has all entries up to n
if s.shape[0] < n + 1:
sn = np.zeros(n + 1, dtype=s.dtype)
sn[:s.shape[0]] = s
s = sn
# fold
nf = (n + 1) // 2
n = nf * 2
o = s[:nf] + s[nf:n][::-1]
return o
def fold_joint_sfs(s, n1, n2):
"""Fold a joint site frequency spectrum.
Parameters
----------
s : array_like, int, shape (m_chromosomes, n_chromosomes)
Joint site frequency spectrum.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs_folded : ndarray, int
Folded joint site frequency spectrum.
"""
# check inputs
s = asarray_ndim(s, 2)
assert s.shape[0] <= n1 + 1, 'invalid number of chromosomes'
assert s.shape[1] <= n2 + 1, 'invalid number of chromosomes'
# need to check s has all entries up to m
if s.shape[0] < n1 + 1:
sm = np.zeros((n1 + 1, s.shape[1]), dtype=s.dtype)
sm[:s.shape[0]] = s
s = sm
# need to check s has all entries up to n
if s.shape[1] < n2 + 1:
sn = np.zeros((s.shape[0], n2 + 1), dtype=s.dtype)
sn[:, :s.shape[1]] = s
s = sn
# fold
mf = (n1 + 1) // 2
nf = (n2 + 1) // 2
n1 = mf * 2
n2 = nf * 2
o = (s[:mf, :nf] + # top left
s[mf:n1, :nf][::-1] + # top right
s[:mf, nf:n2][:, ::-1] + # bottom left
s[mf:n1, nf:n2][::-1, ::-1]) # bottom right
return o
def plot_sfs(s, yscale='log', bins=None, n=None,
clip_endpoints=True, label=None, plot_kwargs=None,
ax=None):
"""Plot a site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n_chromosomes,)
Site frequency spectrum.
yscale : string, optional
Y axis scale.
bins : int or array_like, int, optional
Allele count bins.
n : int, optional
Number of chromosomes sampled. If provided, X axis will be plotted
as allele frequency, otherwise as allele count.
clip_endpoints : bool, optional
If True, do not plot first and last values from frequency spectrum.
label : string, optional
Label for data series in plot.
plot_kwargs : dict-like
Additional keyword arguments, passed through to ax.plot().
ax : axes, optional
Axes on which to draw. If not provided, a new figure will be created.
Returns
-------
ax : axes
The axes on which the plot was drawn.
"""
import matplotlib.pyplot as plt
import scipy
# check inputs
s = asarray_ndim(s, 1)
# setup axes
if ax is None:
fig, ax = plt.subplots()
# setup data
if bins is None:
if clip_endpoints:
x = np.arange(1, s.shape[0]-1)
y = s[1:-1]
else:
x = np.arange(s.shape[0])
y = s
else:
if clip_endpoints:
y, b, _ = scipy.stats.binned_statistic(
np.arange(1, s.shape[0]-1),
values=s[1:-1],
bins=bins,
statistic='sum')
else:
y, b, _ = scipy.stats.binned_statistic(
np.arange(s.shape[0]),
values=s,
bins=bins,
statistic='sum')
# use bin midpoints for plotting
x = (b[:-1] + b[1:]) / 2
if n:
# convert allele counts to allele frequencies
x = x / n
ax.set_xlabel('derived allele frequency')
else:
ax.set_xlabel('derived allele count')
# do plotting
if plot_kwargs is None:
plot_kwargs = dict()
ax.plot(x, y, label=label, **plot_kwargs)
# tidy
ax.set_yscale(yscale)
ax.set_ylabel('site frequency')
ax.autoscale(axis='x', tight=True)
return ax
# noinspection PyIncorrectDocstring
def plot_sfs_folded(*args, **kwargs):
"""Plot a folded site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n_chromosomes/2,)
Site frequency spectrum.
yscale : string, optional
Y axis scale.
bins : int or array_like, int, optional
Allele count bins.
n : int, optional
Number of chromosomes sampled. If provided, X axis will be plotted
as allele frequency, otherwise as allele count.
clip_endpoints : bool, optional
If True, do not plot first and last values from frequency spectrum.
label : string, optional
Label for data series in plot.
plot_kwargs : dict-like
Additional keyword arguments, passed through to ax.plot().
ax : axes, optional
Axes on which to draw. If not provided, a new figure will be created.
Returns
-------
ax : axes
The axes on which the plot was drawn.
"""
ax = plot_sfs(*args, **kwargs)
n = kwargs.get('n', None)
if n:
ax.set_xlabel('minor allele frequency')
else:
ax.set_xlabel('minor allele count')
return ax
# noinspection PyIncorrectDocstring
def plot_sfs_scaled(*args, **kwargs):
"""Plot a scaled site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n_chromosomes,)
Site frequency spectrum.
yscale : string, optional
Y axis scale.
bins : int or array_like, int, optional
Allele count bins.
n : int, optional
Number of chromosomes sampled. If provided, X axis will be plotted
as allele frequency, otherwise as allele count.
clip_endpoints : bool, optional
If True, do not plot first and last values from frequency spectrum.
label : string, optional
Label for data series in plot.
plot_kwargs : dict-like
Additional keyword arguments, passed through to ax.plot().
ax : axes, optional
Axes on which to draw. If not provided, a new figure will be created.
Returns
-------
ax : axes
The axes on which the plot was drawn.
"""
kwargs.setdefault('yscale', 'linear')
ax = plot_sfs(*args, **kwargs)
ax.set_ylabel('scaled site frequency')
return ax
# noinspection PyIncorrectDocstring
def plot_sfs_folded_scaled(*args, **kwargs):
"""Plot a folded scaled site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n_chromosomes/2,)
Site frequency spectrum.
yscale : string, optional
Y axis scale.
bins : int or array_like, int, optional
Allele count bins.
n : int, optional
Number of chromosomes sampled. If provided, X axis will be plotted
as allele frequency, otherwise as allele count.
clip_endpoints : bool, optional
If True, do not plot first and last values from frequency spectrum.
label : string, optional
Label for data series in plot.
plot_kwargs : dict-like
Additional keyword arguments, passed through to ax.plot().
ax : axes, optional
Axes on which to draw. If not provided, a new figure will be created.
Returns
-------
ax : axes
The axes on which the plot was drawn.
"""
kwargs.setdefault('yscale', 'linear')
ax = plot_sfs_folded(*args, **kwargs)
ax.set_ylabel('scaled site frequency')
n = kwargs.get('n', None)
if n:
ax.set_xlabel('minor allele frequency')
else:
ax.set_xlabel('minor allele count')
return ax
def plot_joint_sfs(s, ax=None, imshow_kwargs=None):
"""Plot a joint site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n_chromosomes_pop1, n_chromosomes_pop2)
Joint site frequency spectrum.
ax : axes, optional
Axes on which to draw. If not provided, a new figure will be created.
imshow_kwargs : dict-like
Additional keyword arguments, passed through to ax.imshow().
Returns
-------
ax : axes
The axes on which the plot was drawn.
"""
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
# check inputs
s = asarray_ndim(s, 2)
# setup axes
if ax is None:
w = plt.rcParams['figure.figsize'][0]
fig, ax = plt.subplots(figsize=(w, w))
# set plotting defaults
if imshow_kwargs is None:
imshow_kwargs = dict()
imshow_kwargs.setdefault('cmap', 'jet')
imshow_kwargs.setdefault('interpolation', 'none')
imshow_kwargs.setdefault('aspect', 'auto')
imshow_kwargs.setdefault('norm', LogNorm())
# plot data
ax.imshow(s.T, **imshow_kwargs)
# tidy
ax.invert_yaxis()
ax.set_xlabel('derived allele count (population 1)')
ax.set_ylabel('derived allele count (population 2)')
return ax
# noinspection PyIncorrectDocstring
def plot_joint_sfs_folded(*args, **kwargs):
"""Plot a joint site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n_chromosomes_pop1/2, n_chromosomes_pop2/2)
Joint site frequency spectrum.
ax : axes, optional
Axes on which to draw. If not provided, a new figure will be created.
imshow_kwargs : dict-like
Additional keyword arguments, passed through to ax.imshow().
Returns
-------
ax : axes
The axes on which the plot was drawn.
"""
ax = plot_joint_sfs(*args, **kwargs)
ax.set_xlabel('minor allele count (population 1)')
ax.set_ylabel('minor allele count (population 2)')
return ax
# noinspection PyIncorrectDocstring
def plot_joint_sfs_scaled(*args, **kwargs):
"""Plot a scaled joint site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n_chromosomes_pop1, n_chromosomes_pop2)
Joint site frequency spectrum.
ax : axes, optional
Axes on which to draw. If not provided, a new figure will be created.
imshow_kwargs : dict-like
Additional keyword arguments, passed through to ax.imshow().
Returns
-------
ax : axes
The axes on which the plot was drawn.
"""
imshow_kwargs = kwargs.get('imshow_kwargs', dict())
imshow_kwargs.setdefault('norm', None)
kwargs['imshow_kwargs'] = imshow_kwargs
ax = plot_joint_sfs(*args, **kwargs)
return ax
# noinspection PyIncorrectDocstring
def plot_joint_sfs_folded_scaled(*args, **kwargs):
"""Plot a scaled folded joint site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n_chromosomes_pop1/2, n_chromosomes_pop2/2)
Joint site frequency spectrum.
ax : axes, optional
Axes on which to draw. If not provided, a new figure will be created.
imshow_kwargs : dict-like
Additional keyword arguments, passed through to ax.imshow().
Returns
-------
ax : axes
The axes on which the plot was drawn.
"""
imshow_kwargs = kwargs.get('imshow_kwargs', dict())
imshow_kwargs.setdefault('norm', None)
kwargs['imshow_kwargs'] = imshow_kwargs
ax = plot_joint_sfs_folded(*args, **kwargs)
ax.set_xlabel('minor allele count (population 1)')
ax.set_ylabel('minor allele count (population 2)')
return ax
|
|
from __future__ import division, print_function, absolute_import
from ...utils.six.moves import xrange
import numpy as np
import nose
from dipy.io.bvectxt import orientation_from_string
from dipy.tracking.utils import (affine_for_trackvis, connectivity_matrix,
density_map, length, move_streamlines,
ndbincount, reduce_labels,
reorder_voxels_affine, seeds_from_mask,
random_seeds_from_mask, target,
_rmi, unique_rows, near_roi,
reduce_rois)
import dipy.tracking.metrics as metrix
from dipy.tracking.vox2track import streamline_mapping
import numpy.testing as npt
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_raises, assert_true
def make_streamlines():
streamlines = [np.array([[0, 0, 0],
[1, 1, 1],
[2, 2, 2],
[5, 10, 12]], 'float'),
np.array([[1, 2, 3],
[3, 2, 0],
[5, 20, 33],
[40, 80, 120]], 'float')]
return streamlines
def test_density_map():
# One streamline diagonal in volume
streamlines = [np.array([np.arange(10)]*3).T]
shape = (10, 10, 10)
x = np.arange(10)
expected = np.zeros(shape)
expected[x, x, x] = 1.
dm = density_map(streamlines, vol_dims=shape, voxel_size=(1, 1, 1))
assert_array_equal(dm, expected)
# add streamline, make voxel_size smaller. Each streamline should only be
# counted once, even if multiple points lie in a voxel
streamlines.append(np.ones((5, 3)))
shape = (5, 5, 5)
x = np.arange(5)
expected = np.zeros(shape)
expected[x, x, x] = 1.
expected[0, 0, 0] += 1
dm = density_map(streamlines, vol_dims=shape, voxel_size=(2, 2, 2))
assert_array_equal(dm, expected)
# should work with a generator
dm = density_map(iter(streamlines), vol_dims=shape, voxel_size=(2, 2, 2))
assert_array_equal(dm, expected)
# Test passing affine
affine = np.diag([2, 2, 2, 1.])
affine[:3, 3] = 1.
dm = density_map(streamlines, shape, affine=affine)
assert_array_equal(dm, expected)
# Shift the image by 2 voxels, ie 4mm
affine[:3, 3] -= 4.
expected_old = expected
new_shape = [i + 2 for i in shape]
expected = np.zeros(new_shape)
expected[2:, 2:, 2:] = expected_old
dm = density_map(streamlines, new_shape, affine=affine)
assert_array_equal(dm, expected)
def test_connectivity_matrix():
label_volume = np.array([[[3, 0, 0],
[0, 0, 0],
[0, 0, 4]]])
streamlines = [np.array([[0, 0, 0], [0, 0, 0], [0, 2, 2]], 'float'),
np.array([[0, 0, 0], [0, 1, 1], [0, 2, 2]], 'float'),
np.array([[0, 2, 2], [0, 1, 1], [0, 0, 0]], 'float')]
expected = np.zeros((5, 5), 'int')
expected[3, 4] = 2
expected[4, 3] = 1
# Check basic Case
matrix = connectivity_matrix(streamlines, label_volume, (1, 1, 1),
symmetric=False)
assert_array_equal(matrix, expected)
# Test mapping
matrix, mapping = connectivity_matrix(streamlines, label_volume, (1, 1, 1),
symmetric=False, return_mapping=True)
assert_array_equal(matrix, expected)
assert_equal(mapping[3, 4], [0, 1])
assert_equal(mapping[4, 3], [2])
assert_equal(mapping.get((0, 0)), None)
# Test mapping and symmetric
matrix, mapping = connectivity_matrix(streamlines, label_volume, (1, 1, 1),
symmetric=True, return_mapping=True)
assert_equal(mapping[3, 4], [0, 1, 2])
# When symmetric only (3,4) is a key, not (4, 3)
assert_equal(mapping.get((4, 3)), None)
# expected output matrix is symmetric version of expected
expected = expected + expected.T
assert_array_equal(matrix, expected)
# Test mapping_as_streamlines, mapping dict has lists of streamlines
matrix, mapping = connectivity_matrix(streamlines, label_volume, (1, 1, 1),
symmetric=False,
return_mapping=True,
mapping_as_streamlines=True)
assert_true(mapping[3, 4][0] is streamlines[0])
assert_true(mapping[3, 4][1] is streamlines[1])
assert_true(mapping[4, 3][0] is streamlines[2])
# Test passing affine to connectivity_matrix
expected = matrix
affine = np.diag([-1, -1, -1, 1.])
streamlines = [-i for i in streamlines]
matrix = connectivity_matrix(streamlines, label_volume, affine=affine)
# In the symmetrical case, the matrix should be, well, symmetric:
assert_equal(matrix[4, 3], matrix[4, 3])
def test_ndbincount():
def check(expected):
assert_equal(bc[0, 0], expected[0])
assert_equal(bc[0, 1], expected[1])
assert_equal(bc[1, 0], expected[2])
assert_equal(bc[2, 2], expected[3])
x = np.array([[0, 0], [0, 0], [0, 1], [0, 1], [1, 0], [2, 2]]).T
expected = [2, 2, 1, 1]
# count occurrences in x
bc = ndbincount(x)
assert_equal(bc.shape, (3, 3))
check(expected)
# pass in shape
bc = ndbincount(x, shape=(4, 5))
assert_equal(bc.shape, (4, 5))
check(expected)
# pass in weights
weights = np.arange(6.)
weights[-1] = 1.23
expeceted = [1., 5., 4., 1.23]
bc = ndbincount(x, weights=weights)
check(expeceted)
# raises an error if shape is too small
assert_raises(ValueError, ndbincount, x, None, (2, 2))
def test_reduce_labels():
shape = (4, 5, 6)
# labels from 100 to 220
labels = np.arange(100, np.prod(shape)+100).reshape(shape)
# new labels form 0 to 120, and lookup maps range(0,120) to range(100, 220)
new_labels, lookup = reduce_labels(labels)
assert_array_equal(new_labels, labels-100)
assert_array_equal(lookup, labels.ravel())
def test_move_streamlines():
streamlines = make_streamlines()
affine = np.eye(4)
new_streamlines = move_streamlines(streamlines, affine)
for i, test_sl in enumerate(new_streamlines):
assert_array_equal(test_sl, streamlines[i])
affine[:3, 3] += (4, 5, 6)
new_streamlines = move_streamlines(streamlines, affine)
for i, test_sl in enumerate(new_streamlines):
assert_array_equal(test_sl, streamlines[i]+(4, 5, 6))
affine = np.eye(4)
affine = affine[[2, 1, 0, 3]]
new_streamlines = move_streamlines(streamlines, affine)
for i, test_sl in enumerate(new_streamlines):
assert_array_equal(test_sl, streamlines[i][:, [2, 1, 0]])
affine[:3, 3] += (4, 5, 6)
new_streamlines = move_streamlines(streamlines, affine)
undo_affine = move_streamlines(new_streamlines, np.eye(4),
input_space=affine)
for i, test_sl in enumerate(undo_affine):
assert_array_almost_equal(test_sl, streamlines[i])
# Test that changing affine does affect moving streamlines
affineA = affine.copy()
affineB = affine.copy()
streamlinesA = move_streamlines(streamlines, affineA)
streamlinesB = move_streamlines(streamlines, affineB)
affineB[:] = 0
for (a, b) in zip(streamlinesA, streamlinesB):
assert_array_equal(a, b)
def test_target():
streamlines = [np.array([[0., 0., 0.],
[1., 0., 0.],
[2., 0., 0.]]),
np.array([[0., 0., 0],
[0, 1., 1.],
[0, 2., 2.]])]
affine = np.eye(4)
mask = np.zeros((4, 4, 4), dtype=bool)
mask[0, 0, 0] = True
# Both pass though
new = list(target(streamlines, mask, affine=affine))
assert_equal(len(new), 2)
new = list(target(streamlines, mask, affine=affine, include=False))
assert_equal(len(new), 0)
# only first
mask[:] = False
mask[1, 0, 0] = True
new = list(target(streamlines, mask, affine=affine))
assert_equal(len(new), 1)
assert_true(new[0] is streamlines[0])
new = list(target(streamlines, mask, affine=affine, include=False))
assert_equal(len(new), 1)
assert_true(new[0] is streamlines[1])
# Test that bad points raise a value error
bad_sl = [np.array([[10., 10., 10.]])]
new = target(bad_sl, mask, affine=affine)
assert_raises(ValueError, list, new)
bad_sl = [-np.array([[10., 10., 10.]])]
new = target(bad_sl, mask, affine=affine)
assert_raises(ValueError, list, new)
# Test smaller voxels
affine = np.random.random((4, 4)) - .5
affine[3] = [0, 0, 0, 1]
streamlines = list(move_streamlines(streamlines, affine))
new = list(target(streamlines, mask, affine=affine))
assert_equal(len(new), 1)
assert_true(new[0] is streamlines[0])
new = list(target(streamlines, mask, affine=affine, include=False))
assert_equal(len(new), 1)
assert_true(new[0] is streamlines[1])
# Test that changing mask and affine do not break target
include = target(streamlines, mask, affine=affine)
exclude = target(streamlines, mask, affine=affine, include=False)
affine[:] = np.eye(4)
mask[:] = False
include = list(include)
exclude = list(exclude)
assert_equal(len(include), 1)
assert_true(include[0] is streamlines[0])
assert_equal(len(exclude), 1)
assert_true(exclude[0] is streamlines[1])
def test_near_roi():
streamlines = [np.array([[0., 0., 0.9],
[1.9, 0., 0.],
[3, 2., 2.]]),
np.array([[0.1, 0., 0],
[0, 1., 1.],
[0, 2., 2.]]),
np.array([[2, 2, 2],
[3, 3, 3]])]
affine = np.eye(4)
mask = np.zeros((4, 4, 4), dtype=bool)
mask[0, 0, 0] = True
mask[1, 0, 0] = True
assert_array_equal(near_roi(streamlines, mask, tol=1),
np.array([True, True, False]))
assert_array_equal(near_roi(streamlines, mask),
np.array([False, True, False]))
# If there is an affine, we need to use it:
affine[:, 3] = [-1, 100, -20, 1]
# Transform the streamlines:
x_streamlines = [sl + affine[:3, 3] for sl in streamlines]
assert_array_equal(near_roi(x_streamlines, mask, affine=affine, tol=1),
np.array([True, True, False]))
assert_array_equal(near_roi(x_streamlines, mask, affine=affine,
tol=None),
np.array([False, True, False]))
# Test for use of the 'all' mode:
assert_array_equal(near_roi(x_streamlines, mask, affine=affine, tol=None,
mode='all'), np.array([False, False, False]))
mask[0, 1, 1] = True
mask[0, 2, 2] = True
# Test for use of the 'all' mode, also testing that setting the tolerance
# to a very small number gets overridden:
assert_array_equal(near_roi(x_streamlines, mask, affine=affine, tol=0.1,
mode='all'), np.array([False, True, False]))
mask[2, 2, 2] = True
mask[3, 3, 3] = True
assert_array_equal(near_roi(x_streamlines, mask, affine=affine,
tol=None,
mode='all'),
np.array([False, True, True]))
# Test for use of endpoints as selection criteria:
mask = np.zeros((4, 4, 4), dtype=bool)
mask[0, 1, 1] = True
mask[3, 2, 2] = True
assert_array_equal(near_roi(streamlines, mask, tol=0.87,
mode="either_end"),
np.array([True, False, False]))
assert_array_equal(near_roi(streamlines, mask, tol=0.87,
mode="both_end"),
np.array([False, False, False]))
mask[0, 0, 0] = True
mask[0, 2, 2] = True
assert_array_equal(near_roi(streamlines, mask, mode="both_end"),
np.array([False, True, False]))
# Test with a generator input:
def generate_sl(streamlines):
for sl in streamlines:
yield sl
assert_array_equal(near_roi(generate_sl(streamlines),
mask, mode="both_end"),
np.array([False, True, False]))
def test_voxel_ornt():
sh = (40, 40, 40)
sz = (1, 2, 3)
I4 = np.eye(4)
ras = orientation_from_string('ras')
sra = orientation_from_string('sra')
lpi = orientation_from_string('lpi')
srp = orientation_from_string('srp')
affine = reorder_voxels_affine(ras, ras, sh, sz)
assert_array_equal(affine, I4)
affine = reorder_voxels_affine(sra, sra, sh, sz)
assert_array_equal(affine, I4)
affine = reorder_voxels_affine(lpi, lpi, sh, sz)
assert_array_equal(affine, I4)
affine = reorder_voxels_affine(srp, srp, sh, sz)
assert_array_equal(affine, I4)
streamlines = make_streamlines()
box = np.array(sh)*sz
sra_affine = reorder_voxels_affine(ras, sra, sh, sz)
toras_affine = reorder_voxels_affine(sra, ras, sh, sz)
assert_array_equal(np.dot(toras_affine, sra_affine), I4)
expected_sl = (sl[:, [2, 0, 1]] for sl in streamlines)
test_sl = move_streamlines(streamlines, sra_affine)
for ii in xrange(len(streamlines)):
assert_array_equal(next(test_sl), next(expected_sl))
lpi_affine = reorder_voxels_affine(ras, lpi, sh, sz)
toras_affine = reorder_voxels_affine(lpi, ras, sh, sz)
assert_array_equal(np.dot(toras_affine, lpi_affine), I4)
expected_sl = (box - sl for sl in streamlines)
test_sl = move_streamlines(streamlines, lpi_affine)
for ii in xrange(len(streamlines)):
assert_array_equal(next(test_sl), next(expected_sl))
srp_affine = reorder_voxels_affine(ras, srp, sh, sz)
toras_affine = reorder_voxels_affine(srp, ras, (40, 40, 40), (3, 1, 2))
assert_array_equal(np.dot(toras_affine, srp_affine), I4)
expected_sl = [sl.copy() for sl in streamlines]
for sl in expected_sl:
sl[:, 1] = box[1] - sl[:, 1]
expected_sl = (sl[:, [2, 0, 1]] for sl in expected_sl)
test_sl = move_streamlines(streamlines, srp_affine)
for ii in xrange(len(streamlines)):
assert_array_equal(next(test_sl), next(expected_sl))
def test_streamline_mapping():
streamlines = [np.array([[0, 0, 0], [0, 0, 0], [0, 2, 2]], 'float'),
np.array([[0, 0, 0], [0, 1, 1], [0, 2, 2]], 'float'),
np.array([[0, 2, 2], [0, 1, 1], [0, 0, 0]], 'float')]
mapping = streamline_mapping(streamlines, (1, 1, 1))
expected = {(0, 0, 0): [0, 1, 2], (0, 2, 2): [0, 1, 2],
(0, 1, 1): [1, 2]}
assert_equal(mapping, expected)
mapping = streamline_mapping(streamlines, (1, 1, 1),
mapping_as_streamlines=True)
expected = dict((k, [streamlines[i] for i in indices])
for k, indices in expected.items())
assert_equal(mapping, expected)
# Test passing affine
affine = np.eye(4)
affine[:3, 3] = .5
mapping = streamline_mapping(streamlines, affine=affine,
mapping_as_streamlines=True)
assert_equal(mapping, expected)
# Make the voxel size smaller
affine = np.diag([.5, .5, .5, 1.])
affine[:3, 3] = .25
expected = dict((tuple(i*2 for i in key), value)
for key, value in expected.items())
mapping = streamline_mapping(streamlines, affine=affine,
mapping_as_streamlines=True)
assert_equal(mapping, expected)
def test_rmi():
I1 = _rmi([3, 4], [10, 10])
assert_equal(I1, 34)
I1 = _rmi([0, 0], [10, 10])
assert_equal(I1, 0)
assert_raises(ValueError, _rmi, [10, 0], [10, 10])
try:
from numpy import ravel_multi_index
except ImportError:
raise nose.SkipTest()
# Dtype of random integers is system dependent
A, B, C, D = np.random.randint(0, 1000, size=[4, 100])
I1 = _rmi([A, B], dims=[1000, 1000])
I2 = ravel_multi_index([A, B], dims=[1000, 1000])
assert_array_equal(I1, I2)
I1 = _rmi([A, B, C, D], dims=[1000]*4)
I2 = ravel_multi_index([A, B, C, D], dims=[1000]*4)
assert_array_equal(I1, I2)
# Check for overflow with small int types
indices = np.random.randint(0, 255, size=(2, 100))
dims = (1000, 1000)
I1 = _rmi(indices, dims=dims)
I2 = ravel_multi_index(indices, dims=dims)
assert_array_equal(I1, I2)
def test_affine_for_trackvis():
voxel_size = np.array([1., 2, 3.])
affine = affine_for_trackvis(voxel_size)
origin = np.dot(affine, [0, 0, 0, 1])
assert_array_almost_equal(origin[:3], voxel_size / 2)
def test_length():
# Generate a simulated bundle of fibers:
n_streamlines = 50
n_pts = 100
t = np.linspace(-10, 10, n_pts)
bundle = []
for i in np.linspace(3, 5, n_streamlines):
pts = np.vstack((np.cos(2 * t/np.pi), np.zeros(t.shape) + i, t)).T
bundle.append(pts)
start = np.random.randint(10, 30, n_streamlines)
end = np.random.randint(60, 100, n_streamlines)
bundle = [10 * streamline[start[i]:end[i]] for (i, streamline) in
enumerate(bundle)]
bundle_lengths = length(bundle)
for idx, this_length in enumerate(bundle_lengths):
assert_equal(this_length, metrix.length(bundle[idx]))
def test_seeds_from_mask():
mask = np.random.random_integers(0, 1, size=(10, 10, 10))
seeds = seeds_from_mask(mask, density=1)
assert_equal(mask.sum(), len(seeds))
assert_array_equal(np.argwhere(mask), seeds)
mask[:] = False
mask[3, 3, 3] = True
seeds = seeds_from_mask(mask, density=[3, 4, 5])
assert_equal(len(seeds), 3 * 4 * 5)
assert_true(np.all((seeds > 2.5) & (seeds < 3.5)))
mask[4, 4, 4] = True
seeds = seeds_from_mask(mask, density=[3, 4, 5])
assert_equal(len(seeds), 2 * 3 * 4 * 5)
assert_true(np.all((seeds > 2.5) & (seeds < 4.5)))
in_333 = ((seeds > 2.5) & (seeds < 3.5)).all(1)
assert_equal(in_333.sum(), 3 * 4 * 5)
in_444 = ((seeds > 3.5) & (seeds < 4.5)).all(1)
assert_equal(in_444.sum(), 3 * 4 * 5)
def test_random_seeds_from_mask():
mask = np.random.random_integers(0, 1, size=(4, 6, 3))
seeds = random_seeds_from_mask(mask, seeds_per_voxel=24)
assert_equal(mask.sum() * 24, len(seeds))
mask[:] = False
mask[2, 2, 2] = True
seeds = random_seeds_from_mask(mask, seeds_per_voxel=8)
assert_equal(mask.sum() * 8, len(seeds))
assert_true(np.all((seeds > 1.5) & (seeds < 2.5)))
def test_connectivity_matrix_shape():
# Labels: z-planes have labels 0,1,2
labels = np.zeros((3, 3, 3), dtype=int)
labels[:, :, 1] = 1
labels[:, :, 2] = 2
# Streamline set, only moves between first two z-planes.
streamlines = [np.array([[0., 0., 0.],
[0., 0., 0.5],
[0., 0., 1.]]),
np.array([[0., 1., 1.],
[0., 1., 0.5],
[0., 1., 0.]])]
matrix = connectivity_matrix(streamlines, labels, affine=np.eye(4))
assert_equal(matrix.shape, (3, 3))
def test_unique_rows():
"""
Testing the function unique_coords
"""
arr = np.array([[1, 2, 3], [1, 2, 3], [2, 3, 4], [3, 4, 5]])
arr_w_unique = np.array([[1, 2, 3], [2, 3, 4], [3, 4, 5]])
assert_array_equal(unique_rows(arr), arr_w_unique)
# Should preserve order:
arr = np.array([[2, 3, 4], [1, 2, 3], [1, 2, 3], [3, 4, 5]])
arr_w_unique = np.array([[2, 3, 4], [1, 2, 3], [3, 4, 5]])
assert_array_equal(unique_rows(arr), arr_w_unique)
# Should work even with longer arrays:
arr = np.array([[2, 3, 4], [1, 2, 3], [1, 2, 3], [3, 4, 5],
[6, 7, 8], [0, 1, 0], [1, 0, 1]])
arr_w_unique = np.array([[2, 3, 4], [1, 2, 3], [3, 4, 5],
[6, 7, 8], [0, 1, 0], [1, 0, 1]])
assert_array_equal(unique_rows(arr), arr_w_unique)
def test_reduce_rois():
roi1 = np.zeros((4, 4, 4), dtype=np.bool)
roi2 = np.zeros((4, 4, 4), dtype=np.bool)
roi1[1, 1, 1] = 1
roi2[2, 2, 2] = 1
include_roi, exclude_roi = reduce_rois([roi1, roi2], [True, True])
npt.assert_equal(include_roi, roi1 + roi2)
npt.assert_equal(exclude_roi, np.zeros((4, 4, 4)))
include_roi, exclude_roi = reduce_rois([roi1, roi2], [True, False])
npt.assert_equal(include_roi, roi1)
npt.assert_equal(exclude_roi, roi2)
# Array input:
include_roi, exclude_roi = reduce_rois(np.array([roi1, roi2]),
[True, True])
npt.assert_equal(include_roi, roi1 + roi2)
npt.assert_equal(exclude_roi, np.zeros((4, 4, 4)))
|
|
"""
(c) RIKEN 2015. All rights reserved.
Author: Keitaro Yamashita
This software is released under the new BSD License; see LICENSE.
"""
import re
import os
import numpy
from cctbx import crystal
from cctbx import miller
from cctbx import uctbx
from cctbx.array_family import flex
from libtbx.utils import null_out
from yamtbx.dataproc.xds import re_xds_kwd
def is_xds_ascii(filein):
if not os.path.isfile(filein): return False
line = open(filein).readline()
return "FORMAT=XDS_ASCII" in line
# is_xds_ascii()
class XDS_ASCII:
def __init__(self, filein, log_out=None, read_data=True, i_only=False):
self._log = null_out() if log_out is None else log_out
self._filein = filein
self.indices = flex.miller_index()
self.i_only = i_only
self.iobs, self.sigma_iobs, self.xd, self.yd, self.zd, self.rlp, self.peak, self.corr = [flex.double() for i in xrange(8)]
self.iframe = flex.int()
self.iset = flex.int() # only for XSCALE
self.input_files = {} # only for XSCALE [iset:(filename, wavelength), ...]
self.by_dials = False
self.read_header()
if read_data:
self.read_data()
def read_header(self):
re_item = re.compile("!ITEM_([^=]+)=([0-9]+)")
colindex = {} # {"H":1, "K":2, "L":3, ...}
nitemfound = 0
flag_data_start = False
num_hkl = 0
headers = []
for line in open(self._filein):
if flag_data_start:
if line.startswith("!END_OF_DATA"):
break
num_hkl += 1
continue
if line.startswith('!END_OF_HEADER'):
flag_data_start = True
continue
if line.startswith("!Generated by dials"):
self.by_dials = True
continue
if line.startswith("! ISET="):
pars = dict(re_xds_kwd.findall(line))
iset = int(pars["ISET"])
if iset not in self.input_files: self.input_files[iset] = [None, None, None]
if "INPUT_FILE" in pars:
self.input_files[iset][0] = pars["INPUT_FILE"]
elif "X-RAY_WAVELENGTH" in pars:
tmp = pars["X-RAY_WAVELENGTH"]
if " (" in tmp: tmp = tmp[:tmp.index(" (")]
self.input_files[iset][1] = tmp
elif "UNIT_CELL_CONSTANTS" in pars:
tmp = pars["UNIT_CELL_CONSTANTS"]
self.input_files[iset][2] = tmp
else:
headers.extend(re_xds_kwd.findall(line[line.index("!")+1:]))
self.nx, self.ny, self.anomalous, self.distance, self.wavelength, self.zmin, self.zmax = (None,)*7
for key, val in headers:
if key == "NUMBER_OF_ITEMS_IN_EACH_DATA_RECORD":
nitem = int(val.strip())
print >>self._log, 'number of items according to header is', nitem
elif key == "UNIT_CELL_CONSTANTS":
a, b, c, al, be, ga = map(lambda x:float(x), val.strip().split())
elif key == "UNIT_CELL_A-AXIS":
self.a_axis = tuple(map(float, val.split()))
elif key == "UNIT_CELL_B-AXIS":
self.b_axis = tuple(map(float, val.split()))
elif key == "UNIT_CELL_C-AXIS":
self.c_axis = tuple(map(float, val.split()))
elif key.startswith("ITEM_"):
item, ind = key[len("ITEM_"):], int(val)
colindex[item] = ind - 1
nitemfound += 1
elif key == "NX":
self.nx = int(val)
elif key == "NY":
self.ny = int(val)
elif key == "QX":
self.qx = float(val)
elif key == "QY":
self.qy = float(val)
elif key == "ORGX":
self.orgx = float(val)
elif key == "ORGY":
self.orgy = float(val)
elif key == "DATA_RANGE":
self.zmin, self.zmax = map(lambda x:int(x), val.strip().split())
elif key == "SPACE_GROUP_NUMBER":
ispgrp = int(val.strip())
elif key == "FRIEDEL'S_LAW":
assert val.strip() in ("TRUE", "FALSE")
self.anomalous = val.strip() == "FALSE"
elif key == "DETECTOR_DISTANCE":
self.distance = float(val)
elif key == "X-RAY_WAVELENGTH":
self.wavelength = float(val.split()[0])
elif key == "INCIDENT_BEAM_DIRECTION":
self.incident_axis = tuple(map(float, val.split()))
elif key == "ROTATION_AXIS":
self.rotation_axis = tuple(map(float, val.split()))
elif key == "OSCILLATION_RANGE":
self.osc_range = float(val.split()[0])
elif key == "VARIANCE_MODEL":
self.variance_model = tuple(map(float, val.split()))
assert nitem == len(colindex)
self._colindex = colindex
self._num_hkl = num_hkl
self.symm = crystal.symmetry(unit_cell=(a, b, c, al, be, ga),
space_group=ispgrp)
self.symm.show_summary(self._log)
print >>self._log, 'data_range=', self.zmin, self.zmax
# read_header()
def read_data(self):
colindex = self._colindex
is_xscale = "RLP" not in colindex
flag_data_start = False
col_H, col_K, col_L = colindex["H"], colindex["K"], colindex["L"]
col_i, col_sig, col_xd, col_yd, col_zd = colindex["IOBS"], colindex["SIGMA(IOBS)"], colindex["XD"], colindex["YD"], colindex["ZD"]
col_rlp, col_peak, col_corr, col_iset = colindex.get("RLP", None), colindex.get("PEAK", None), colindex.get("CORR", None), colindex.get("ISET", None)
self.indices = []
self.xd, self.yd, self.zd = [], [], []
self.iframe, self.rlp, self.peak, self.corr, self.iset = [], [], [], [], []
for line in open(self._filein):
if flag_data_start:
if line.startswith("!END_OF_DATA"):
break
sp = line.split()
h, k, l = int(sp[col_H]), int(sp[col_K]), int(sp[col_L])
self.indices.append([h,k,l])
self.iobs.append(float(sp[col_i]))
self.sigma_iobs.append(float(sp[col_sig]))
if not self.i_only:
self.xd.append(float(sp[col_xd]))
self.yd.append(float(sp[col_yd]))
self.zd.append(float(sp[col_zd]))
self.iframe.append(int(self.zd[-1])+1)
if not is_xscale:
self.rlp.append(float(sp[col_rlp]))
self.peak.append(float(sp[col_peak]))
self.corr.append(float(sp[col_corr]))
else:
self.iset.append(int(sp[col_iset]))
#res = symm.unit_cell().d((h,k,l))
if self.iframe[-1] < 0:
self.iframe[-1] = 0
print >>self._log, 'reflection with surprisingly low z-value:', self.zd[-1]
if line.startswith('!END_OF_HEADER'):
flag_data_start = True
self.indices = flex.miller_index(self.indices)
self.iobs, self.sigma_iobs, self.xd, self.yd, self.zd, self.rlp, self.peak, self.corr = [flex.double(x) for x in (self.iobs, self.sigma_iobs, self.xd, self.yd, self.zd, self.rlp, self.peak, self.corr)]
self.iframe = flex.int(self.iframe)
self.iset = flex.int(self.iset) # only for XSCALE
print >>self._log, "Reading data done.\n"
# read_data()
def get_frame_range(self):
"""quick function only to get frame number range"""
flag_data_start = False
col_zd = self._colindex["ZD"]
min_frame, max_frame = float("inf"), -float("inf")
for line in open(self._filein):
if flag_data_start:
if line.startswith("!END_OF_DATA"):
break
sp = line.split()
iframe = int(float(sp[col_zd]))+1
if iframe > 0 and iframe < min_frame: min_frame = iframe
if iframe > max_frame: max_frame = iframe
if line.startswith('!END_OF_HEADER'):
flag_data_start = True
return min_frame, max_frame
# get_frame_range()
def as_miller_set(self, anomalous_flag=None):
if anomalous_flag is None:
anomalous_flag = self.anomalous
return miller.set(crystal_symmetry=self.symm,
indices=self.indices,
anomalous_flag=anomalous_flag)
# as_miller_set()
def i_obs(self, anomalous_flag=None):
array_info = miller.array_info(source_type="xds_ascii")#, wavelength=)
return miller.array(self.as_miller_set(anomalous_flag),
data=self.iobs, sigmas=self.sigma_iobs).set_info(array_info).set_observation_type_xray_intensity()
# i_obs()
def remove_selection(self, sel):
params = ("indices", "iobs", "sigma_iobs")
if not self.i_only:
params += ("xd", "yd", "zd", "rlp", "peak", "corr", "iframe", "iset")
for p in params:
if not getattr(self, p): continue
setattr(self, p, getattr(self, p).select(~sel))
# remove_selection()
def remove_rejected(self):
sel = self.sigma_iobs <= 0
self.remove_selection(sel)
# remove_rejected()
def write_selected(self, sel, hklout):
ofs = open(hklout, "w")
data_flag = False
count = 0
for line in open(self._filein):
if line.startswith('!END_OF_HEADER'):
ofs.write(line)
data_flag = True
elif line.startswith("!END_OF_DATA"):
ofs.write(line)
break
elif not data_flag:
ofs.write(line)
elif data_flag:
if sel[count]: ofs.write(line)
count += 1
# write_selected()
def write_reindexed(self, op, hklout, space_group=None):
"""
XXX Assuming hkl has 6*3 width!!
"""
ofs = open(hklout, "w")
col_H, col_K, col_L = map(lambda x:self._colindex[x], "HKL")
assert col_H==0 and col_K==1 and col_L==2
tr_mat = numpy.array(op.c_inv().r().as_double()).reshape(3,3).transpose()
transformed = numpy.dot(tr_mat, numpy.array([self.a_axis, self.b_axis, self.c_axis]))
data_flag = False
for line in open(self._filein):
if line.startswith('!UNIT_CELL_CONSTANTS='):
# XXX split by fixed columns
cell = uctbx.unit_cell(line[line.index("=")+1:].strip())
cell_tr = cell.change_basis(op)
if space_group is not None: cell_tr = space_group.average_unit_cell(cell_tr)
ofs.write("!UNIT_CELL_CONSTANTS=%10.3f%10.3f%10.3f%8.3f%8.3f%8.3f\n" % cell_tr.parameters())
elif line.startswith('!SPACE_GROUP_NUMBER=') and space_group is not None:
ofs.write("!SPACE_GROUP_NUMBER=%5d \n" % space_group.type().number())
elif line.startswith("!UNIT_CELL_A-AXIS="):
ofs.write("!UNIT_CELL_A-AXIS=%10.3f%10.3f%10.3f\n" % tuple(transformed[0,:]))
elif line.startswith("!UNIT_CELL_B-AXIS="):
ofs.write("!UNIT_CELL_B-AXIS=%10.3f%10.3f%10.3f\n" % tuple(transformed[1,:]))
elif line.startswith("!UNIT_CELL_C-AXIS="):
ofs.write("!UNIT_CELL_C-AXIS=%10.3f%10.3f%10.3f\n" % tuple(transformed[2,:]))
elif line.startswith('!END_OF_HEADER'):
ofs.write(line)
data_flag = True
elif line.startswith("!END_OF_DATA"):
ofs.write(line)
break
elif not data_flag:
ofs.write(line)
elif data_flag:
if not self.by_dials:
hkl = tuple(map(int, line[:18].split()))
hkl = op.apply(hkl)
ofs.write("%6d%6d%6d"%hkl)
ofs.write(line[18:])
else:
sp = line.split()
hkl = op.apply(tuple(map(int, sp[:3])))
ofs.write(" ".join(map(str, hkl)))
ofs.write(" ")
ofs.write(" ".join(sp[3:]))
ofs.write("\n")
return cell_tr
# write_selected()
#class XDS_ASCII
|
|
#-*- coding: utf-8 -*-
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Task manager for Turbinia."""
from __future__ import unicode_literals, absolute_import
import logging
from datetime import datetime
import time
import os
import filelock
from prometheus_client import Gauge
import turbinia
from turbinia import workers
from turbinia import evidence
from turbinia import config
from turbinia import job_utils
from turbinia import state_manager
from turbinia import task_utils
from turbinia import TurbiniaException
from turbinia.jobs import manager as jobs_manager
from turbinia.lib import recipe_helpers
from turbinia.workers.abort import AbortTask
config.LoadConfig()
if config.TASK_MANAGER.lower() == 'psq':
import psq
from google.cloud import exceptions
from google.cloud import datastore
from google.cloud import pubsub
from turbinia import pubsub as turbinia_pubsub
elif config.TASK_MANAGER.lower() == 'celery':
from celery import states as celery_states
from turbinia import tcelery as turbinia_celery
log = logging.getLogger('turbinia')
PSQ_TASK_TIMEOUT_SECONDS = 604800
PSQ_QUEUE_WAIT_SECONDS = 2
# The amount of time in seconds that the Server will wait in addition to the
# Job/Task timeout value before it times out a given Task. This is to make sure
# that the Server doesn't time out the Task before the Worker has a chance to
# and should account for the Task scheduling and setup time that happens before
# the Task starts.
SERVER_TASK_TIMEOUT_BUFFER = 300
# Define metrics
turbinia_server_tasks_total = Gauge(
'turbinia_server_tasks_total', 'Turbinia Server Total Tasks')
turbinia_server_tasks_completed_total = Gauge(
'turbinia_server_tasks_completed_total',
'Total number of completed server tasks')
turbinia_jobs_total = Gauge('turbinia_jobs_total', 'Total number jobs created')
turbinia_jobs_completed_total = Gauge(
'turbinia_jobs_completed_total', 'Total number jobs resolved')
turbinia_server_request_total = Gauge(
'turbinia_server_request_total', 'Total number of requests received.')
turbinia_server_task_timeout_total = Gauge(
'turbinia_server_task_timeout_total',
'Total number of Tasks that have timed out on the Server.')
turbinia_result_success_invalid = Gauge(
'turbinia_result_success_invalid',
'The result returned from the Task had an invalid success status of None')
def get_task_manager():
"""Return task manager object based on config.
Returns
Initialized TaskManager object.
Raises:
TurbiniaException: When an unknown task manager type is specified
"""
config.LoadConfig()
# pylint: disable=no-else-return
if config.TASK_MANAGER.lower() == 'psq':
return PSQTaskManager()
elif config.TASK_MANAGER.lower() == 'celery':
return CeleryTaskManager()
else:
msg = 'Task Manager type "{0:s}" not implemented'.format(
config.TASK_MANAGER)
raise turbinia.TurbiniaException(msg)
class BaseTaskManager:
"""Class to manage Turbinia Tasks.
Handles incoming new Evidence messages, adds new Tasks to the queue and
processes results from Tasks that have run.
Attributes:
jobs (list[TurbiniaJob]): Uninstantiated job classes.
running_jobs (list[TurbiniaJob]): A list of jobs that are
currently running.
evidence (list): A list of evidence objects to process.
state_manager (DatastoreStateManager|RedisStateManager): State manager
object to handle syncing with storage.
tasks (list[TurbiniaTask]): Running tasks.
"""
def __init__(self):
self.jobs = []
self.running_jobs = []
self.state_manager = state_manager.get_state_manager()
@property
def tasks(self):
"""A property that returns all outstanding Tasks.
Returns:
list[TurbiniaTask]: All outstanding Tasks.
"""
return [task for job in self.running_jobs for task in job.tasks]
def _backend_setup(self, *args, **kwargs):
"""Sets up backend dependencies.
Raises:
TurbiniaException: When encountering fatal errors setting up dependencies.
"""
raise NotImplementedError
def setup(self, jobs_denylist=None, jobs_allowlist=None, *args, **kwargs):
"""Does setup of Task manager and its dependencies.
Args:
jobs_denylist (list): Jobs that will be excluded from running
jobs_allowlist (list): The only Jobs will be included to run
"""
self._backend_setup(*args, **kwargs)
job_names = jobs_manager.JobsManager.GetJobNames()
if jobs_denylist or jobs_allowlist:
selected_jobs = jobs_denylist or jobs_allowlist
for job in selected_jobs:
if job.lower() not in job_names:
msg = (
'Error creating server. Job {0!s} is not found in registered '
'jobs {1!s}.'.format(job, job_names))
log.error(msg)
raise TurbiniaException(msg)
log.info(
'Filtering Jobs with allowlist {0!s} and denylist {1!s}'.format(
jobs_allowlist, jobs_denylist))
job_names = jobs_manager.JobsManager.FilterJobNames(
job_names, jobs_denylist, jobs_allowlist)
# Disable any jobs from the config that were not previously allowlisted.
disabled_jobs = list(config.DISABLED_JOBS) if config.DISABLED_JOBS else []
disabled_jobs = [j.lower() for j in disabled_jobs]
if jobs_allowlist:
disabled_jobs = list(set(disabled_jobs) - set(jobs_allowlist))
if disabled_jobs:
log.info(
'Disabling non-allowlisted jobs configured to be disabled in the '
'config file: {0:s}'.format(', '.join(disabled_jobs)))
job_names = jobs_manager.JobsManager.FilterJobNames(
job_names, disabled_jobs, [])
self.jobs = [job for _, job in jobs_manager.JobsManager.GetJobs(job_names)]
dependencies = config.ParseDependencies()
job_utils.register_job_timeouts(dependencies)
log.debug('Registered job list: {0:s}'.format(str(job_names)))
def abort_request(self, request_id, requester, evidence_name, message):
"""Abort the request by creating an AbortTask.
When there is a fatal error processing the request such that we can't
continue, an AbortTask will be created with the error message and is written
directly to the state database. This way the client will get a reasonable
error in response to the failure.
Args:
request_id(str): The request ID.
requester(str): The username of the requester.
evidence_name(str): Name of the Evidence requested to be processed.
message(str): The error message to abort the request with.
"""
abort_task = AbortTask(request_id=request_id, requester=requester)
result = workers.TurbiniaTaskResult(
request_id=request_id, no_output_manager=True)
result.status = 'Processing request for {0:s} aborted: {1:s}'.format(
evidence_name, message)
result.successful = False
abort_task.result = result
self.state_manager.update_task(abort_task)
def add_evidence(self, evidence_):
"""Adds new evidence and creates tasks to process it.
This creates all tasks configured to process the given type of evidence.
Args:
evidence_: evidence object to add.
Raises:
TurbiniaException: When no Jobs are found.
"""
if not self.jobs:
raise turbinia.TurbiniaException(
'Jobs must be registered before evidence can be added')
log.info('Adding new evidence: {0:s}'.format(str(evidence_)))
job_count = 0
jobs_list = []
jobs_allowlist = evidence_.config['globals'].get('jobs_allowlist', [])
jobs_denylist = evidence_.config['globals'].get('jobs_denylist', [])
if jobs_denylist or jobs_allowlist:
log.info(
'Filtering Jobs with allowlist {0!s} and denylist {1!s}'.format(
jobs_allowlist, jobs_denylist))
jobs_list = jobs_manager.JobsManager.FilterJobObjects(
self.jobs, jobs_denylist, jobs_allowlist)
else:
jobs_list = self.jobs
# TODO(aarontp): Add some kind of loop detection in here so that jobs can
# register for Evidence(), or or other evidence types that may be a super
# class of the output of the job itself. Short term we could potentially
# have a run time check for this upon Job instantiation to prevent it.
for job in jobs_list:
# Doing a strict type check here for now until we can get the above
# comment figured out.
# pylint: disable=unidiomatic-typecheck
job_applicable = [
True for t in job.evidence_input if type(evidence_) == t
]
if job_applicable:
job_instance = job(
request_id=evidence_.request_id, evidence_config=evidence_.config)
for task in job_instance.create_tasks([evidence_]):
self.add_task(task, job_instance, evidence_)
self.running_jobs.append(job_instance)
log.info(
'Adding {0:s} job to process {1:s}'.format(
job_instance.name, evidence_.name))
job_count += 1
turbinia_jobs_total.inc()
if not job_count:
log.warning(
'No Jobs/Tasks were created for Evidence [{0:s}]. '
'Request or recipe parsing may have failed, or Jobs may need to be '
'configured to allow this type of Evidence as input'.format(
str(evidence_)))
def check_done(self):
"""Checks if we have any outstanding tasks.
Returns:
bool: Indicating whether we are done.
"""
return not bool(len(self.tasks))
def check_request_done(self, request_id):
"""Checks if we have any outstanding tasks for the request ID.
Args:
request_id (str): The request ID to check for completion
Returns:
bool: Indicating whether all Jobs are done.
"""
job_completion = []
for job in self.running_jobs:
if request_id == job.request_id:
job_completion.append(job.check_done())
return min(job_completion)
def check_request_finalized(self, request_id):
"""Checks if the the request is done and finalized.
A request can be done but not finalized if all of the Tasks created by the
original Jobs have completed, but the "finalize" Job/Tasks have not been
run. These finalize Job/Tasks are created after all of the original
Jobs/Tasks have completed. Only one Job needs to be marked as finalized for
the entire request to be considered finalized.
Args:
request_id (str): The request ID to check for finalization.
Returns:
bool: Indicating whether all Jobs are done.
"""
request_finalized = False
for job in self.running_jobs:
if request_id == job.request_id and job.is_finalized:
request_finalized = True
break
return request_finalized and self.check_request_done(request_id)
def check_task_timeout(self, task):
"""Checks whether a Task has timed out.
Tasks should normally be timed out by the Worker, but if there was some
kind of fatal error on the Worker or other problem in the Task that
prevented the results from returning then we will time out on the Server
side as well and abandon the Task.
Args:
task(TurbiniaTask): The Task to check for the timeout.
Returns:
int: If the Task has timed out, this is the time in seconds, otherwise if
the Task hasn't timed out it will return 0.
"""
job = self.get_job(task.job_id)
timeout_target = jobs_manager.JobsManager.GetTimeoutValue(job.name)
task_runtime = datetime.now() - task.start_time
task_runtime = int(task_runtime.total_seconds())
if task_runtime > timeout_target + SERVER_TASK_TIMEOUT_BUFFER:
timeout = task_runtime
else:
timeout = 0
return timeout
def get_evidence(self):
"""Checks for new evidence to process.
Returns:
list[evidence.Evidence]: The evidence to process.
"""
raise NotImplementedError
def get_job(self, job_id):
"""Gets the running Job instance from the given Job ID
Args:
job_id (str): The Job id to get the job for.
Returns:
TurbiniaJob|None: Job instance if found, else None
"""
job = None
for job_instance in self.running_jobs:
if job_id == job_instance.id:
job = job_instance
break
return job
def generate_request_finalize_tasks(self, job):
"""Generates the Tasks to finalize the given request ID.
Args:
job (TurbiniaJob): The last Job that was run for this request.
"""
request_id = job.request_id
final_job = jobs_manager.JobsManager.GetJobInstance('FinalizeRequestJob')
final_job.request_id = request_id
final_job.evidence.config = job.evidence.config
log.debug(
'Request {0:s} done, but not finalized, creating FinalizeRequestJob '
'{1:s}'.format(request_id, final_job.id))
# Finalize tasks use EvidenceCollection with all evidence created by the
# request or job.
final_evidence = evidence.EvidenceCollection()
final_evidence.request_id = request_id
self.running_jobs.append(final_job)
turbinia_jobs_total.inc()
# Gather evidence created by every Job in the request.
for running_job in self.running_jobs:
if running_job.request_id == request_id:
final_evidence.collection.extend(running_job.evidence.collection)
for finalize_task in final_job.create_tasks([final_evidence]):
self.add_task(finalize_task, final_job, final_evidence)
def add_task(self, task, job, evidence_):
"""Adds a task and evidence to process to the task manager.
Args:
task: An instantiated Turbinia Task
evidence_: An Evidence object to be processed.
"""
if evidence_.request_id:
task.request_id = evidence_.request_id
elif job and job.request_id:
task.request_id = job.request_id
else:
log.error(
'Request ID not found in Evidence {0!s} or Task {1!s}. Not adding '
'new Task because of undefined state'.format(evidence_, task))
return
evidence_.config = job.evidence.config
task.base_output_dir = config.OUTPUT_DIR
task.requester = evidence_.config.get('globals', {}).get('requester')
task.group_id = evidence_.config.get('globals', {}).get('group_id')
if job:
task.job_id = job.id
task.job_name = job.name
job.tasks.append(task)
self.state_manager.write_new_task(task)
self.enqueue_task(task, evidence_)
turbinia_server_tasks_total.inc()
def remove_jobs(self, request_id):
"""Removes the all Jobs for the given request ID.
Args:
request_id (str): The ID of the request we want to remove jobs for.
"""
remove_jobs = [j for j in self.running_jobs if j.request_id == request_id]
log.debug(
'Removing {0:d} completed Job(s) for request ID {1:s}.'.format(
len(remove_jobs), request_id))
# pylint: disable=expression-not-assigned
[self.remove_job(j.id) for j in remove_jobs]
def remove_job(self, job_id):
"""Removes a Job from the running jobs list.
Args:
job_id (str): The ID of the job to remove.
Returns:
bool: True if Job removed, else False.
"""
remove_job = None
for job in self.running_jobs:
if job_id == job.id:
remove_job = job
break
if remove_job:
self.running_jobs.remove(remove_job)
turbinia_jobs_completed_total.inc()
return bool(remove_job)
def enqueue_task(self, task, evidence_):
"""Enqueues a task and evidence in the implementation specific task queue.
Args:
task: An instantiated Turbinia Task
evidence_: An Evidence object to be processed.
"""
raise NotImplementedError
def process_result(self, task_result):
"""Runs final task results recording.
self.process_tasks handles things that have failed at the task queue layer
(i.e. PSQ), and this method handles tasks that have potentially failed
below that layer (i.e. somewhere in our Task code).
This also adds the Evidence to the running jobs and running requests so we
can process those later in 'finalize' Tasks.
Args:
task_result: The TurbiniaTaskResult object
Returns:
TurbiniaJob|None: The Job for the processed task, else None
"""
if task_result.successful is None:
log.error(
'Task {0:s} from {1:s} returned invalid success status "None". '
'Setting this to False so the client knows the Task is complete. '
'Usually this means that the Task returning the TurbiniaTaskResult '
'did not call the close() method on it.'.format(
task_result.task_name, task_result.worker_name))
turbinia_result_success_invalid.inc()
task_result.successful = False
if task_result.status:
task_result.status = (
task_result.status + ' (Success status forcefully set to False)')
if not task_result.successful:
log.error(
'Task {0:s} from {1:s} was not successful'.format(
task_result.task_name, task_result.worker_name))
else:
log.info(
'Task {0:s} from {1:s} executed with status [{2:s}]'.format(
task_result.task_name, task_result.worker_name,
task_result.status))
if not isinstance(task_result.evidence, list):
log.warning(
'Task {0:s} from {1:s} did not return evidence list'.format(
task_result.task_name, task_result.worker_name))
task_result.evidence = []
job = self.get_job(task_result.job_id)
if not job:
log.warning(
'Received task results for unknown Job from Task ID {0:s}'.format(
task_result.task_id))
# Reprocess new evidence and save instance for later consumption by finalize
# tasks.
for evidence_ in task_result.evidence:
if isinstance(evidence_, evidence.Evidence):
log.info(
'Task {0:s} from {1:s} returned Evidence {2:s}'.format(
task_result.task_name, task_result.worker_name, evidence_.name))
self.add_evidence(evidence_)
if job:
job.evidence.add_evidence(evidence_)
else:
log.error(
'Task {0:s} from {1:s} returned non-Evidence output type '
'{2:s}'.format(
task_result.task_name, task_result.worker_name,
type(task_result.evidence)))
return job
def process_job(self, job, task):
"""Processes the Job after Task completes.
This removes the Task from the running Job and generates the "finalize"
Tasks after all the Tasks for the Job and Request have completed. It also
removes all Jobs from the running Job list once everything is complete.
Args:
job (TurbiniaJob): The Job to process
task (TurbiniaTask): The Task that just completed.
"""
log.debug(
'Processing Job {0:s} for completed Task {1:s}'.format(
job.name, task.id))
self.state_manager.update_task(task)
job.remove_task(task.id)
turbinia_server_tasks_completed_total.inc()
if job.check_done() and not (job.is_finalize_job or task.is_finalize_task):
log.debug(
'Job {0:s} completed, creating Job finalize tasks'.format(job.name))
final_task = job.create_final_task()
if final_task:
final_task.is_finalize_task = True
self.add_task(final_task, job, job.evidence)
turbinia_server_tasks_total.inc()
elif job.check_done() and job.is_finalize_job:
job.is_finalized = True
request_id = job.request_id
request_done = self.check_request_done(request_id)
request_finalized = self.check_request_finalized(request_id)
# If the request is done but not finalized, we generate the finalize tasks.
if request_done and not request_finalized:
self.generate_request_finalize_tasks(job)
# If the Job has been finalized then we can remove all the Jobs for this
# request since everything is complete.
elif request_done and request_finalized:
self.remove_jobs(request_id)
def process_tasks(self):
"""Process any tasks that need to be processed.
Returns:
list[TurbiniaTask]: Tasks to process that have completed.
"""
raise NotImplementedError
def run(self, under_test=False):
"""Main run loop for TaskManager."""
log.info('Starting Task Manager run loop')
while True:
# pylint: disable=expression-not-assigned
[self.add_evidence(x) for x in self.get_evidence()]
for task in self.process_tasks():
if task.result:
job = self.process_result(task.result)
if job:
self.process_job(job, task)
self.state_manager.update_task(task)
if config.SINGLE_RUN and self.check_done():
log.info('No more tasks to process. Exiting now.')
return
if under_test:
break
time.sleep(config.SLEEP_TIME)
def timeout_task(self, task, timeout):
"""Sets status and result data for timed out Task.
Args:
task(TurbiniaTask): The Task that will be timed out.
timeout(int): The timeout value that has been reached.
Returns:
TurbiniaTask: The updated Task.
"""
result = workers.TurbiniaTaskResult(
request_id=task.request_id, no_output_manager=True,
no_state_manager=True)
result.setup(task)
result.status = (
'Task {0:s} timed out on the Server and was auto-closed after '
'{1:d} seconds'.format(task.name, timeout))
result.successful = False
result.closed = True
task.result = result
turbinia_server_task_timeout_total.inc()
return task
class CeleryTaskManager(BaseTaskManager):
"""Celery implementation of BaseTaskManager.
Attributes:
celery (TurbiniaCelery): Celery task queue, handles worker tasks.
kombu (TurbiniaKombu): Kombu queue, handles receiving evidence.
celery_runner: task_runner method, but wrapped for Celery usage.
"""
def __init__(self):
self.celery = None
self.kombu = None
self.celery_runner = None
config.LoadConfig()
super(CeleryTaskManager, self).__init__()
def _backend_setup(self, *args, **kwargs):
self.celery = turbinia_celery.TurbiniaCelery()
self.celery.setup()
self.kombu = turbinia_celery.TurbiniaKombu(config.KOMBU_CHANNEL)
self.kombu.setup()
self.celery_runner = self.celery.app.task(
task_utils.task_runner, name="task_runner")
def process_tasks(self):
"""Determine the current state of our tasks.
Returns:
list[TurbiniaTask]: all completed tasks
"""
completed_tasks = []
for task in self.tasks:
check_timeout = False
celery_task = task.stub
if not celery_task:
log.debug('Task {0:s} not yet created'.format(task.stub.task_id))
check_timeout = True
elif celery_task.status == celery_states.STARTED:
log.debug('Task {0:s} not finished'.format(celery_task.id))
check_timeout = True
elif celery_task.status == celery_states.FAILURE:
log.warning('Task {0:s} failed.'.format(celery_task.id))
completed_tasks.append(task)
elif celery_task.status == celery_states.SUCCESS:
task.result = workers.TurbiniaTaskResult.deserialize(celery_task.result)
completed_tasks.append(task)
else:
check_timeout = True
log.debug('Task {0:s} status unknown'.format(celery_task.id))
# For certain Task states we want to check whether the Task has timed out
# or not.
if check_timeout:
timeout = self.check_task_timeout(task)
if timeout:
log.warning(
'Task {0:s} timed on server out after {1:d} seconds. Auto-closing Task.'
.format(celery_task.id, timeout))
task = self.timeout_task(task, timeout)
completed_tasks.append(task)
outstanding_task_count = len(self.tasks) - len(completed_tasks)
if outstanding_task_count > 0:
log.info('{0:d} Tasks still outstanding.'.format(outstanding_task_count))
return completed_tasks
def get_evidence(self):
"""Receives new evidence.
Returns:
list[Evidence]: evidence to process.
"""
requests = self.kombu.check_messages()
evidence_list = []
for request in requests:
for evidence_ in request.evidence:
if not evidence_.request_id:
evidence_.request_id = request.request_id
log.info(
'Received evidence [{0:s}] from Kombu message.'.format(
str(evidence_)))
success, message = recipe_helpers.validate_recipe(request.recipe)
if not success:
self.abort_request(
evidence_.request_id, request.requester, evidence_.name, message)
else:
evidence_.config = request.recipe
evidence_.config['globals']['requester'] = request.requester
evidence_.config['globals']['group_id'] = request.recipe['globals'][
'group_id']
evidence_list.append(evidence_)
turbinia_server_request_total.inc()
return evidence_list
def enqueue_task(self, task, evidence_):
log.info(
'Adding Celery task {0:s} with evidence {1:s} to queue'.format(
task.name, evidence_.name))
task.stub = self.celery_runner.delay(
task.serialize(), evidence_.serialize())
class PSQTaskManager(BaseTaskManager):
"""PSQ implementation of BaseTaskManager.
Attributes:
psq: PSQ Queue object.
server_pubsub: A PubSubClient object for receiving new evidence messages.
"""
def __init__(self):
self.psq = None
self.server_pubsub = None
config.LoadConfig()
super(PSQTaskManager, self).__init__()
# pylint: disable=keyword-arg-before-vararg
def _backend_setup(self, server=True, *args, **kwargs):
"""
Args:
server (bool): Whether this is the client or a server
Raises:
TurbiniaException: When there are errors creating PSQ Queue
"""
log.debug(
'Setting up PSQ Task Manager requirements on project {0:s}'.format(
config.TURBINIA_PROJECT))
self.server_pubsub = turbinia_pubsub.TurbiniaPubSub(config.PUBSUB_TOPIC)
if server:
self.server_pubsub.setup_subscriber()
else:
self.server_pubsub.setup_publisher()
psq_publisher = pubsub.PublisherClient()
psq_subscriber = pubsub.SubscriberClient()
datastore_client = datastore.Client(project=config.TURBINIA_PROJECT)
try:
self.psq = psq.Queue(
psq_publisher, psq_subscriber, config.TURBINIA_PROJECT,
name=config.PSQ_TOPIC, storage=psq.DatastoreStorage(datastore_client))
except exceptions.GoogleCloudError as e:
msg = 'Error creating PSQ Queue: {0:s}'.format(str(e))
log.error(msg)
raise turbinia.TurbiniaException(msg)
def process_tasks(self):
completed_tasks = []
for task in self.tasks:
check_timeout = False
psq_task = task.stub.get_task()
# This handles tasks that have failed at the PSQ layer.
if not psq_task:
check_timeout = True
log.debug('Task {0:s} not yet created'.format(task.stub.task_id))
elif psq_task.status not in (psq.task.FINISHED, psq.task.FAILED):
check_timeout = True
log.debug('Task {0:s} not finished'.format(psq_task.id))
elif psq_task.status == psq.task.FAILED:
log.warning('Task {0:s} failed.'.format(psq_task.id))
completed_tasks.append(task)
else:
task.result = workers.TurbiniaTaskResult.deserialize(
task.stub.result(timeout=PSQ_TASK_TIMEOUT_SECONDS))
completed_tasks.append(task)
# For certain Task states we want to check whether the Task has timed out
# or not.
if check_timeout:
timeout = self.check_task_timeout(task)
if timeout:
log.warning(
'Task {0:s} timed on server out after {0:d} seconds. Auto-closing Task.'
.format(psq_task.id, timeout))
task = self.timeout_task(task, timeout)
completed_tasks.append(task)
outstanding_task_count = len(self.tasks) - len(completed_tasks)
if outstanding_task_count > 0:
log.info('{0:d} Tasks still outstanding.'.format(outstanding_task_count))
return completed_tasks
def get_evidence(self):
requests = self.server_pubsub.check_messages()
evidence_list = []
for request in requests:
for evidence_ in request.evidence:
if not evidence_.request_id:
evidence_.request_id = request.request_id
log.info(
'Received evidence [{0:s}] from PubSub message.'.format(
str(evidence_)))
success, message = recipe_helpers.validate_recipe(request.recipe)
if not success:
self.abort_request(
evidence_.request_id, request.requester, evidence_.name, message)
else:
evidence_.config = request.recipe
evidence_.config['globals']['requester'] = request.requester
evidence_list.append(evidence_)
turbinia_server_request_total.inc()
return evidence_list
def enqueue_task(self, task, evidence_):
log.info(
'Adding PSQ task {0:s} with evidence {1:s} to queue'.format(
task.name, evidence_.name))
task.stub = self.psq.enqueue(
task_utils.task_runner, task.serialize(), evidence_.serialize())
time.sleep(PSQ_QUEUE_WAIT_SECONDS)
|
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ProcessPoolExecutor.
The follow diagram and text describe the data-flow through the system:
|======================= In-process =====================|== Out-of-process ==|
+----------+ +----------+ +--------+ +-----------+ +---------+
| | => | Work Ids | => | | => | Call Q | => | |
| | +----------+ | | +-----------+ | |
| | | ... | | | | ... | | |
| | | 6 | | | | 5, call() | | |
| | | 7 | | | | ... | | |
| Process | | ... | | Local | +-----------+ | Process |
| Pool | +----------+ | Worker | | #1..n |
| Executor | | Thread | | |
| | +----------- + | | +-----------+ | |
| | <=> | Work Items | <=> | | <= | Result Q | <= | |
| | +------------+ | | +-----------+ | |
| | | 6: call() | | | | ... | | |
| | | future | | | | 4, result | | |
| | | ... | | | | 3, except | | |
+----------+ +------------+ +--------+ +-----------+ +---------+
Executor.submit() called:
- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
- adds the id of the _WorkItem to the "Work Ids" queue
Local worker thread:
- reads work ids from the "Work Ids" queue and looks up the corresponding
WorkItem from the "Work Items" dict: if the work item has been cancelled then
it is simply removed from the dict, otherwise it is repackaged as a
_CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
- reads _ResultItems from "Result Q", updates the future stored in the
"Work Items" dict and deletes the dict entry
Process #1..n:
- reads _CallItems from "Call Q", executes the calls, and puts the resulting
_ResultItems in "Result Q"
"""
__author__ = 'Brian Quinlan ([email protected])'
import atexit
import os
from concurrent.futures import _base
import queue
from queue import Full
import multiprocessing
from multiprocessing import SimpleQueue
from multiprocessing.connection import wait
import threading
import weakref
from functools import partial
import itertools
import traceback
# Workers are created as daemon threads and processes. This is done to allow the
# interpreter to exit when there are still idle processes in a
# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However,
# allowing workers to die with the interpreter has two undesirable properties:
# - The workers would still be running during interpreter shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads/processes finish.
_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items())
for t, q in items:
q.put(None)
for t, q in items:
t.join()
# Controls how many more calls than processes will be queued in the call queue.
# A smaller number will mean that processes spend more time idle waiting for
# work while a larger number will make Future.cancel() succeed less frequently
# (Futures in the call queue cannot be cancelled).
EXTRA_QUEUED_CALLS = 1
# Hack to embed stringification of remote traceback in local traceback
class _RemoteTraceback(Exception):
def __init__(self, tb):
self.tb = tb
def __str__(self):
return self.tb
class _ExceptionWithTraceback:
def __init__(self, exc, tb):
tb = traceback.format_exception(type(exc), exc, tb)
tb = ''.join(tb)
self.exc = exc
self.tb = '\n"""\n%s"""' % tb
def __reduce__(self):
return _rebuild_exc, (self.exc, self.tb)
def _rebuild_exc(exc, tb):
exc.__cause__ = _RemoteTraceback(tb)
return exc
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
class _ResultItem(object):
def __init__(self, work_id, exception=None, result=None):
self.work_id = work_id
self.exception = exception
self.result = result
class _CallItem(object):
def __init__(self, work_id, fn, args, kwargs):
self.work_id = work_id
self.fn = fn
self.args = args
self.kwargs = kwargs
def _get_chunks(*iterables, chunksize):
""" Iterates over zip()ed iterables in chunks. """
it = zip(*iterables)
while True:
chunk = tuple(itertools.islice(it, chunksize))
if not chunk:
return
yield chunk
def _process_chunk(fn, chunk):
""" Processes a chunk of an iterable passed to map.
Runs the function passed to map() on a chunk of the
iterable passed to map.
This function is run in a separate process.
"""
return [fn(*args) for args in chunk]
def _process_worker(call_queue, result_queue):
"""Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a separate process.
Args:
call_queue: A multiprocessing.Queue of _CallItems that will be read and
evaluated by the worker.
result_queue: A multiprocessing.Queue of _ResultItems that will written
to by the worker.
shutdown: A multiprocessing.Event that will be set as a signal to the
worker that it should exit when call_queue is empty.
"""
while True:
call_item = call_queue.get(block=True)
if call_item is None:
# Wake up queue management thread
result_queue.put(os.getpid())
return
try:
r = call_item.fn(*call_item.args, **call_item.kwargs)
except BaseException as e:
exc = _ExceptionWithTraceback(e, e.__traceback__)
result_queue.put(_ResultItem(call_item.work_id, exception=exc))
else:
result_queue.put(_ResultItem(call_item.work_id,
result=r))
def _add_call_item_to_queue(pending_work_items,
work_ids,
call_queue):
"""Fills call_queue with _WorkItems from pending_work_items.
This function never blocks.
Args:
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids
are consumed and the corresponding _WorkItems from
pending_work_items are transformed into _CallItems and put in
call_queue.
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems.
"""
while True:
if call_queue.full():
return
try:
work_id = work_ids.get(block=False)
except queue.Empty:
return
else:
work_item = pending_work_items[work_id]
if work_item.future.set_running_or_notify_cancel():
call_queue.put(_CallItem(work_id,
work_item.fn,
work_item.args,
work_item.kwargs),
block=True)
else:
del pending_work_items[work_id]
continue
def _queue_management_worker(executor_reference,
processes,
pending_work_items,
work_ids_queue,
call_queue,
result_queue):
"""Manages the communication between this process and the worker processes.
This function is run in a local thread.
Args:
executor_reference: A weakref.ref to the ProcessPoolExecutor that owns
this thread. Used to determine if the ProcessPoolExecutor has been
garbage collected and that this function can exit.
process: A list of the multiprocessing.Process instances used as
workers.
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]).
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems for processing by the process workers.
result_queue: A multiprocessing.Queue of _ResultItems generated by the
process workers.
"""
executor = None
def shutting_down():
return _shutdown or executor is None or executor._shutdown_thread
def shutdown_worker():
# This is an upper bound
nb_children_alive = sum(p.is_alive() for p in processes.values())
for i in range(0, nb_children_alive):
call_queue.put_nowait(None)
# Release the queue's resources as soon as possible.
call_queue.close()
# If .join() is not called on the created processes then
# some multiprocessing.Queue methods may deadlock on Mac OS X.
for p in processes.values():
p.join()
reader = result_queue._reader
while True:
_add_call_item_to_queue(pending_work_items,
work_ids_queue,
call_queue)
sentinels = [p.sentinel for p in processes.values()]
assert sentinels
ready = wait([reader] + sentinels)
if reader in ready:
result_item = reader.recv()
else:
# Mark the process pool broken so that submits fail right now.
executor = executor_reference()
if executor is not None:
executor._broken = True
executor._shutdown_thread = True
executor = None
# All futures in flight must be marked failed
for work_id, work_item in pending_work_items.items():
work_item.future.set_exception(
BrokenProcessPool(
"A process in the process pool was "
"terminated abruptly while the future was "
"running or pending."
))
# Delete references to object. See issue16284
del work_item
pending_work_items.clear()
# Terminate remaining workers forcibly: the queues or their
# locks may be in a dirty state and block forever.
for p in processes.values():
p.terminate()
shutdown_worker()
return
if isinstance(result_item, int):
# Clean shutdown of a worker using its PID
# (avoids marking the executor broken)
assert shutting_down()
p = processes.pop(result_item)
p.join()
if not processes:
shutdown_worker()
return
elif result_item is not None:
work_item = pending_work_items.pop(result_item.work_id, None)
# work_item can be None if another process terminated (see above)
if work_item is not None:
if result_item.exception:
work_item.future.set_exception(result_item.exception)
else:
work_item.future.set_result(result_item.result)
# Delete references to object. See issue16284
del work_item
# Check whether we should start shutting down.
executor = executor_reference()
# No more work items can be added if:
# - The interpreter is shutting down OR
# - The executor that owns this worker has been collected OR
# - The executor that owns this worker has been shutdown.
if shutting_down():
try:
# Since no new work items can be added, it is safe to shutdown
# this thread if there are no pending work items.
if not pending_work_items:
shutdown_worker()
return
except Full:
# This is not a problem: we will eventually be woken up (in
# result_queue.get()) and be able to send a sentinel again.
pass
executor = None
_system_limits_checked = False
_system_limited = None
def _check_system_limits():
global _system_limits_checked, _system_limited
if _system_limits_checked:
if _system_limited:
raise NotImplementedError(_system_limited)
_system_limits_checked = True
try:
nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems_max == -1:
# indetermined limit, assume that limit is determined
# by available memory only
return
if nsems_max >= 256:
# minimum number of semaphores available
# according to POSIX
return
_system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max
raise NotImplementedError(_system_limited)
class BrokenProcessPool(RuntimeError):
"""
Raised when a process in a ProcessPoolExecutor terminated abruptly
while a future was in the running state.
"""
class ProcessPoolExecutor(_base.Executor):
def __init__(self, max_workers=None):
"""Initializes a new ProcessPoolExecutor instance.
Args:
max_workers: The maximum number of processes that can be used to
execute the given calls. If None or not given then as many
worker processes will be created as the machine has processors.
"""
_check_system_limits()
if max_workers is None:
self._max_workers = os.cpu_count() or 1
else:
if max_workers <= 0:
raise ValueError("max_workers must be greater than 0")
self._max_workers = max_workers
# Make the call queue slightly larger than the number of processes to
# prevent the worker processes from idling. But don't make it too big
# because futures in the call queue cannot be cancelled.
self._call_queue = multiprocessing.Queue(self._max_workers +
EXTRA_QUEUED_CALLS)
# Killed worker processes can produce spurious "broken pipe"
# tracebacks in the queue's own worker thread. But we detect killed
# processes anyway, so silence the tracebacks.
self._call_queue._ignore_epipe = True
self._result_queue = SimpleQueue()
self._work_ids = queue.Queue()
self._queue_management_thread = None
# Map of pids to processes
self._processes = {}
# Shutdown is a two-step process.
self._shutdown_thread = False
self._shutdown_lock = threading.Lock()
self._broken = False
self._queue_count = 0
self._pending_work_items = {}
def _start_queue_management_thread(self):
# When the executor gets lost, the weakref callback will wake up
# the queue management thread.
def weakref_cb(_, q=self._result_queue):
q.put(None)
if self._queue_management_thread is None:
# Start the processes so that their sentinels are known.
self._adjust_process_count()
self._queue_management_thread = threading.Thread(
target=_queue_management_worker,
args=(weakref.ref(self, weakref_cb),
self._processes,
self._pending_work_items,
self._work_ids,
self._call_queue,
self._result_queue))
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
_threads_queues[self._queue_management_thread] = self._result_queue
def _adjust_process_count(self):
for _ in range(len(self._processes), self._max_workers):
p = multiprocessing.Process(
target=_process_worker,
args=(self._call_queue,
self._result_queue))
p.start()
self._processes[p.pid] = p
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._broken:
raise BrokenProcessPool('A child process terminated '
'abruptly, the process pool is not usable anymore')
if self._shutdown_thread:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._pending_work_items[self._queue_count] = w
self._work_ids.put(self._queue_count)
self._queue_count += 1
# Wake up queue management thread
self._result_queue.put(None)
self._start_queue_management_thread()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def map(self, fn, *iterables, timeout=None, chunksize=1):
"""Returns an iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
chunksize: If greater than one, the iterables will be chopped into
chunks of size chunksize and submitted to the process pool.
If set to one, the items in the list will be sent one at a time.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
if chunksize < 1:
raise ValueError("chunksize must be >= 1.")
results = super().map(partial(_process_chunk, fn),
_get_chunks(*iterables, chunksize=chunksize),
timeout=timeout)
return itertools.chain.from_iterable(results)
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown_thread = True
if self._queue_management_thread:
# Wake up queue management thread
self._result_queue.put(None)
if wait:
self._queue_management_thread.join()
# To reduce the risk of opening too many files, remove references to
# objects that use file descriptors.
self._queue_management_thread = None
self._call_queue = None
self._result_queue = None
self._processes = None
shutdown.__doc__ = _base.Executor.shutdown.__doc__
atexit.register(_python_exit)
|
|
# Copyright 2012, Intel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the volume RPC API.
"""
from oslo.config import cfg
from oslo import messaging
from cinder.openstack.common import jsonutils
from cinder import rpc
from cinder.volume import utils
CONF = cfg.CONF
class VolumeAPI(object):
'''Client side of the volume rpc API.
API version history:
1.0 - Initial version.
1.1 - Adds clone volume option to create_volume.
1.2 - Add publish_service_capabilities() method.
1.3 - Pass all image metadata (not just ID) in copy_volume_to_image.
1.4 - Add request_spec, filter_properties and
allow_reschedule arguments to create_volume().
1.5 - Add accept_transfer.
1.6 - Add extend_volume.
1.7 - Adds host_name parameter to attach_volume()
to allow attaching to host rather than instance.
1.8 - Add migrate_volume, rename_volume.
1.9 - Add new_user and new_project to accept_transfer.
1.10 - Add migrate_volume_completion, remove rename_volume.
1.11 - Adds mode parameter to attach_volume()
to support volume read-only attaching.
1.12 - Adds retype.
1.13 - Adds create_export.
1.14 - Adds reservation parameter to extend_volume().
1.15 - Adds manage_existing and unmanage_only flag to delete_volume.
1.16 - Removes create_export.
1.17 - Add replica option to create_volume, promote_replica and
sync_replica.
1.18 - Adds create_consistencygroup, delete_consistencygroup,
create_cgsnapshot, and delete_cgsnapshot. Also adds
the consistencygroup_id parameter in create_volume.
'''
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic=None):
super(VolumeAPI, self).__init__()
target = messaging.Target(topic=CONF.volume_topic,
version=self.BASE_RPC_API_VERSION)
self.client = rpc.get_client(target, '1.18')
def create_consistencygroup(self, ctxt, group, host):
new_host = utils.extract_host(host)
cctxt = self.client.prepare(server=new_host, version='1.18')
cctxt.cast(ctxt, 'create_consistencygroup',
group_id=group['id'])
def delete_consistencygroup(self, ctxt, group):
host = utils.extract_host(group['host'])
cctxt = self.client.prepare(server=host, version='1.18')
cctxt.cast(ctxt, 'delete_consistencygroup',
group_id=group['id'])
def create_cgsnapshot(self, ctxt, group, cgsnapshot):
host = utils.extract_host(group['host'])
cctxt = self.client.prepare(server=host, version='1.18')
cctxt.cast(ctxt, 'create_cgsnapshot',
group_id=group['id'],
cgsnapshot_id=cgsnapshot['id'])
def delete_cgsnapshot(self, ctxt, cgsnapshot, host):
new_host = utils.extract_host(host)
cctxt = self.client.prepare(server=new_host, version='1.18')
cctxt.cast(ctxt, 'delete_cgsnapshot',
cgsnapshot_id=cgsnapshot['id'])
def create_volume(self, ctxt, volume, host,
request_spec, filter_properties,
allow_reschedule=True,
snapshot_id=None, image_id=None,
source_replicaid=None,
source_volid=None,
consistencygroup_id=None):
new_host = utils.extract_host(host)
cctxt = self.client.prepare(server=new_host, version='1.4')
request_spec_p = jsonutils.to_primitive(request_spec)
cctxt.cast(ctxt, 'create_volume',
volume_id=volume['id'],
request_spec=request_spec_p,
filter_properties=filter_properties,
allow_reschedule=allow_reschedule,
snapshot_id=snapshot_id,
image_id=image_id,
source_replicaid=source_replicaid,
source_volid=source_volid,
consistencygroup_id=consistencygroup_id)
def delete_volume(self, ctxt, volume, unmanage_only=False):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.15')
cctxt.cast(ctxt, 'delete_volume',
volume_id=volume['id'],
unmanage_only=unmanage_only)
def create_snapshot(self, ctxt, volume, snapshot):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host)
cctxt.cast(ctxt, 'create_snapshot', volume_id=volume['id'],
snapshot_id=snapshot['id'])
def delete_snapshot(self, ctxt, snapshot, host):
new_host = utils.extract_host(host)
cctxt = self.client.prepare(server=new_host)
cctxt.cast(ctxt, 'delete_snapshot', snapshot_id=snapshot['id'])
def attach_volume(self, ctxt, volume, instance_uuid, host_name,
mountpoint, mode):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.11')
return cctxt.call(ctxt, 'attach_volume',
volume_id=volume['id'],
instance_uuid=instance_uuid,
host_name=host_name,
mountpoint=mountpoint,
mode=mode)
def detach_volume(self, ctxt, volume, attachment_id):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host)
return cctxt.call(ctxt, 'detach_volume', volume_id=volume['id'],attachment_id=attachment_id)
def copy_volume_to_image(self, ctxt, volume, image_meta):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.3')
cctxt.cast(ctxt, 'copy_volume_to_image', volume_id=volume['id'],
image_meta=image_meta)
def initialize_connection(self, ctxt, volume, connector):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host)
return cctxt.call(ctxt, 'initialize_connection',
volume_id=volume['id'],
connector=connector)
def terminate_connection(self, ctxt, volume, connector, force=False):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host)
return cctxt.call(ctxt, 'terminate_connection', volume_id=volume['id'],
connector=connector, force=force)
def publish_service_capabilities(self, ctxt):
cctxt = self.client.prepare(fanout=True, version='1.2')
cctxt.cast(ctxt, 'publish_service_capabilities')
def accept_transfer(self, ctxt, volume, new_user, new_project):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.9')
return cctxt.call(ctxt, 'accept_transfer', volume_id=volume['id'],
new_user=new_user, new_project=new_project)
def extend_volume(self, ctxt, volume, new_size, reservations):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.14')
cctxt.cast(ctxt, 'extend_volume', volume_id=volume['id'],
new_size=new_size, reservations=reservations)
def migrate_volume(self, ctxt, volume, dest_host, force_host_copy):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.8')
host_p = {'host': dest_host.host,
'capabilities': dest_host.capabilities}
cctxt.cast(ctxt, 'migrate_volume', volume_id=volume['id'],
host=host_p, force_host_copy=force_host_copy)
def migrate_volume_completion(self, ctxt, volume, new_volume, error):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.10')
return cctxt.call(ctxt, 'migrate_volume_completion',
volume_id=volume['id'],
new_volume_id=new_volume['id'],
error=error)
def retype(self, ctxt, volume, new_type_id, dest_host,
migration_policy='never', reservations=None):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.12')
host_p = {'host': dest_host.host,
'capabilities': dest_host.capabilities}
cctxt.cast(ctxt, 'retype', volume_id=volume['id'],
new_type_id=new_type_id, host=host_p,
migration_policy=migration_policy,
reservations=reservations)
def manage_existing(self, ctxt, volume, ref):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.15')
cctxt.cast(ctxt, 'manage_existing', volume_id=volume['id'], ref=ref)
def promote_replica(self, ctxt, volume):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.17')
cctxt.cast(ctxt, 'promote_replica', volume_id=volume['id'])
def reenable_replication(self, ctxt, volume):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.17')
cctxt.cast(ctxt, 'reenable_replication', volume_id=volume['id'])
def update_volume_metadata(self, ctxt, volume, metadata, delete):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.18')
cctxt.cast(ctxt, 'update_volume_metadata', volume_id=volume['id'],
metadata=metadata,delete=delete)
def delete_volume_metadata(self, ctxt, volume, keys):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.18')
cctxt.cast(ctxt, 'delete_volume_metadata', volume_id=volume['id'],
keys=keys)
|
|
# Copyright 2020 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This showcases how simple it is to build image classification networks.
It follows description from this TensorFlow tutorial:
https://www.tensorflow.org/versions/master/tutorials/mnist/pros/index.html#deep-mnist-for-experts
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import sys
import numpy as np
import tensorflow as tf
N_DIGITS = 10 # Number of digits.
X_FEATURE = 'x' # Name of the input feature.
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--tf-data-dir',
type=str,
default='/tmp/data/',
help='GCS path or local path of training data.')
parser.add_argument('--tf-model-dir',
type=str,
help='GCS path or local directory.')
parser.add_argument('--tf-export-dir',
type=str,
default='mnist/',
help='GCS path or local directory to export model')
parser.add_argument('--tf-model-type',
type=str,
default='CNN',
help='Tensorflow model type for training.')
parser.add_argument('--tf-train-steps',
type=int,
default=200,
help='The number of training steps to perform.')
parser.add_argument('--tf-batch-size',
type=int,
default=100,
help='The number of batch size during training')
parser.add_argument('--tf-learning-rate',
type=float,
default=0.01,
help='Learning rate for training.')
args = parser.parse_known_args()[0]
return args
def conv_model(features, labels, mode, params):
"""2-layer convolution model."""
# Reshape feature to 4d tensor with 2nd and 3rd dimensions being
# image width and height final dimension being the number of color channels.
feature = tf.reshape(features[X_FEATURE], [-1, 28, 28, 1])
# First conv layer will compute 32 features for each 5x5 patch
with tf.variable_scope('conv_layer1'):
h_conv1 = tf.layers.conv2d(
feature,
filters=32,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu)
h_pool1 = tf.layers.max_pooling2d(
h_conv1, pool_size=2, strides=2, padding='same')
# Second conv layer will compute 64 features for each 5x5 patch.
with tf.variable_scope('conv_layer2'):
h_conv2 = tf.layers.conv2d(
h_pool1,
filters=64,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu)
h_pool2 = tf.layers.max_pooling2d(
h_conv2, pool_size=2, strides=2, padding='same')
# reshape tensor into a batch of vectors
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# Densely connected layer with 1024 neurons.
h_fc1 = tf.layers.dense(h_pool2_flat, 1024, activation=tf.nn.relu)
h_fc1 = tf.layers.dropout(
h_fc1,
rate=0.5,
training=(mode == tf.estimator.ModeKeys.TRAIN))
# Compute logits (1 per class) and compute loss.
logits = tf.layers.dense(h_fc1, N_DIGITS, activation=None)
predict = tf.nn.softmax(logits)
classes = tf.cast(tf.argmax(predict, 1), tf.uint8)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(
mode, predictions=predictions,
export_outputs={
'classes':tf.estimator.export.PredictOutput(
{"predictions": predict, "classes": classes})})
# Compute loss.
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Create training op.
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=params["learning_rate"])
train_op = optimizer.minimize(
loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def cnn_serving_input_receiver_fn():
inputs = {X_FEATURE: tf.placeholder(tf.float32, [None, 28, 28])}
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
def linear_serving_input_receiver_fn():
inputs = {X_FEATURE: tf.placeholder(tf.float32, (784,))}
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
args = parse_arguments()
tf_config = os.environ.get('TF_CONFIG', '{}')
tf.logging.info("TF_CONFIG %s", tf_config)
tf_config_json = json.loads(tf_config)
cluster = tf_config_json.get('cluster')
job_name = tf_config_json.get('task', {}).get('type')
task_index = tf_config_json.get('task', {}).get('index')
tf.logging.info("cluster=%s job_name=%s task_index=%s", cluster, job_name,
task_index)
is_chief = False
if not job_name or job_name.lower() in ["chief", "master"]:
is_chief = True
tf.logging.info("Will export model")
else:
tf.logging.info("Will not export model")
# Download and load MNIST dataset.
mnist = tf.contrib.learn.datasets.DATASETS['mnist'](args.tf_data_dir)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: mnist.train.images},
y=mnist.train.labels.astype(np.int32),
batch_size=args.tf_batch_size,
num_epochs=None,
shuffle=True)
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: mnist.train.images},
y=mnist.train.labels.astype(np.int32),
num_epochs=1,
shuffle=False)
training_config = tf.estimator.RunConfig(
model_dir=args.tf_model_dir, save_summary_steps=100, save_checkpoints_steps=1000)
if args.tf_model_type == "LINEAR":
# Linear classifier.
feature_columns = [
tf.feature_column.numeric_column(
X_FEATURE, shape=mnist.train.images.shape[1:])]
classifier = tf.estimator.LinearClassifier(
feature_columns=feature_columns, n_classes=N_DIGITS,
model_dir=args.tf_model_dir, config=training_config)
# TODO(jlewi): Should it be linear_serving_input_receiver_fn here?
serving_fn = cnn_serving_input_receiver_fn
export_final = tf.estimator.FinalExporter(
args.tf_export_dir, serving_input_receiver_fn=cnn_serving_input_receiver_fn)
elif args.tf_model_type == "CNN":
# Convolutional network
model_params = {"learning_rate": args.tf_learning_rate}
classifier = tf.estimator.Estimator(
model_fn=conv_model, model_dir=args.tf_model_dir,
config=training_config, params=model_params)
serving_fn = cnn_serving_input_receiver_fn
export_final = tf.estimator.FinalExporter(
args.tf_export_dir, serving_input_receiver_fn=cnn_serving_input_receiver_fn)
else:
print("No such model type: %s" % args.tf_model_type)
sys.exit(1)
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn, max_steps=args.tf_train_steps)
eval_spec = tf.estimator.EvalSpec(input_fn=test_input_fn,
steps=1,
exporters=export_final,
throttle_secs=1,
start_delay_secs=1)
print("Train and evaluate")
tf.estimator.train_and_evaluate(classifier, train_spec, eval_spec)
print("Training done")
if is_chief:
print("Export saved model")
classifier.export_savedmodel(
args.tf_export_dir, serving_input_receiver_fn=serving_fn)
print("Done exporting the model")
if __name__ == '__main__':
tf.app.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.